x86/kvm: Testing... git grep -l EXPORT_SYMBOL arch/x86/kvm/ | while read file; do quilt add $file; sed -i -e 's/EXPORT_SYMBOL_GPL(\(.[^)]*\))/EXPORT_SYMBOL_GPL_FOR(\1, "kvm,kvm-intel,kvm-amd")/g' $file; done Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 097bdc0..125efee4 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c
@@ -34,7 +34,7 @@ * aligned to sizeof(unsigned long) because it's not accessed via bitops. */ u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly; -EXPORT_SYMBOL_GPL(kvm_cpu_caps); +EXPORT_SYMBOL_GPL_FOR(kvm_cpu_caps, "kvm,kvm-intel,kvm-amd"); u32 xstate_required_size(u64 xstate_bv, bool compacted) { @@ -320,7 +320,7 @@ void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu) { __kvm_update_cpuid_runtime(vcpu, vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent); } -EXPORT_SYMBOL_GPL(kvm_update_cpuid_runtime); +EXPORT_SYMBOL_GPL_FOR(kvm_update_cpuid_runtime, "kvm,kvm-intel,kvm-amd"); static bool kvm_cpuid_has_hyperv(struct kvm_cpuid_entry2 *entries, int nent) { @@ -848,7 +848,7 @@ void kvm_set_cpu_caps(void) kvm_cpu_cap_clear(X86_FEATURE_RDPID); } } -EXPORT_SYMBOL_GPL(kvm_set_cpu_caps); +EXPORT_SYMBOL_GPL_FOR(kvm_set_cpu_caps, "kvm,kvm-intel,kvm-amd"); struct kvm_cpuid_array { struct kvm_cpuid_entry2 *entries; @@ -1525,7 +1525,7 @@ struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu, return cpuid_entry2_find(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent, function, index); } -EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry_index); +EXPORT_SYMBOL_GPL_FOR(kvm_find_cpuid_entry_index, "kvm,kvm-intel,kvm-amd"); struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, u32 function) @@ -1533,7 +1533,7 @@ struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, return cpuid_entry2_find(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent, function, KVM_CPUID_INDEX_NOT_SIGNIFICANT); } -EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry); +EXPORT_SYMBOL_GPL_FOR(kvm_find_cpuid_entry, "kvm,kvm-intel,kvm-amd"); /* * Intel CPUID semantics treats any query for an out-of-range leaf as if the @@ -1653,7 +1653,7 @@ bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, used_max_basic); return exact; } -EXPORT_SYMBOL_GPL(kvm_cpuid); +EXPORT_SYMBOL_GPL_FOR(kvm_cpuid, "kvm,kvm-intel,kvm-amd"); int kvm_emulate_cpuid(struct kvm_vcpu *vcpu) { @@ -1671,4 +1671,4 @@ int kvm_emulate_cpuid(struct kvm_vcpu *vcpu) kvm_rdx_write(vcpu, edx); return kvm_skip_emulated_instruction(vcpu); } -EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); +EXPORT_SYMBOL_GPL_FOR(kvm_emulate_cpuid, "kvm,kvm-intel,kvm-amd");
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 4f0a943..eb3f5d5 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c
@@ -919,7 +919,7 @@ bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu) return false; return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED; } -EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled); +EXPORT_SYMBOL_GPL_FOR(kvm_hv_assist_page_enabled, "kvm,kvm-intel,kvm-amd"); int kvm_hv_get_assist_page(struct kvm_vcpu *vcpu) { @@ -931,7 +931,7 @@ int kvm_hv_get_assist_page(struct kvm_vcpu *vcpu) return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &hv_vcpu->vp_assist_page, sizeof(struct hv_vp_assist_page)); } -EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page); +EXPORT_SYMBOL_GPL_FOR(kvm_hv_get_assist_page, "kvm,kvm-intel,kvm-amd"); static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer) {
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c index 63f66c5..b15a6c4 100644 --- a/arch/x86/kvm/irq.c +++ b/arch/x86/kvm/irq.c
@@ -89,7 +89,7 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v) return kvm_apic_has_interrupt(v) != -1; /* LAPIC */ } -EXPORT_SYMBOL_GPL(kvm_cpu_has_injectable_intr); +EXPORT_SYMBOL_GPL_FOR(kvm_cpu_has_injectable_intr, "kvm,kvm-intel,kvm-amd"); /* * check if there is pending interrupt without @@ -102,7 +102,7 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *v) return kvm_apic_has_interrupt(v) != -1; /* LAPIC */ } -EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt); +EXPORT_SYMBOL_GPL_FOR(kvm_cpu_has_interrupt, "kvm,kvm-intel,kvm-amd"); /* * Read pending interrupt(from non-APIC source) @@ -131,7 +131,7 @@ int kvm_cpu_get_extint(struct kvm_vcpu *v) } else return kvm_pic_read_irq(v->kvm); /* PIC */ } -EXPORT_SYMBOL_GPL(kvm_cpu_get_extint); +EXPORT_SYMBOL_GPL_FOR(kvm_cpu_get_extint, "kvm,kvm-intel,kvm-amd"); /* * Read pending interrupt vector and intack.
diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c index 8136695..606ef80 100644 --- a/arch/x86/kvm/irq_comm.c +++ b/arch/x86/kvm/irq_comm.c
@@ -120,7 +120,7 @@ void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, irq->level = 1; irq->shorthand = APIC_DEST_NOSHORT; } -EXPORT_SYMBOL_GPL(kvm_set_msi_irq); +EXPORT_SYMBOL_GPL_FOR(kvm_set_msi_irq, "kvm,kvm-intel,kvm-amd"); static inline bool kvm_msi_route_invalid(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e) @@ -361,7 +361,7 @@ bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq, return r == 1; } -EXPORT_SYMBOL_GPL(kvm_intr_is_single_vcpu); +EXPORT_SYMBOL_GPL_FOR(kvm_intr_is_single_vcpu, "kvm,kvm-intel,kvm-amd"); #define IOAPIC_ROUTING_ENTRY(irq) \ { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \
diff --git a/arch/x86/kvm/kvm_onhyperv.c b/arch/x86/kvm/kvm_onhyperv.c index ded0bd6..7e5bb6c 100644 --- a/arch/x86/kvm/kvm_onhyperv.c +++ b/arch/x86/kvm/kvm_onhyperv.c
@@ -101,13 +101,13 @@ int hv_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn, gfn_t nr_pages) return __hv_flush_remote_tlbs_range(kvm, &range); } -EXPORT_SYMBOL_GPL(hv_flush_remote_tlbs_range); +EXPORT_SYMBOL_GPL_FOR(hv_flush_remote_tlbs_range, "kvm,kvm-intel,kvm-amd"); int hv_flush_remote_tlbs(struct kvm *kvm) { return __hv_flush_remote_tlbs_range(kvm, NULL); } -EXPORT_SYMBOL_GPL(hv_flush_remote_tlbs); +EXPORT_SYMBOL_GPL_FOR(hv_flush_remote_tlbs, "kvm,kvm-intel,kvm-amd"); void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp) { @@ -121,4 +121,4 @@ void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp) spin_unlock(&kvm_arch->hv_root_tdp_lock); } } -EXPORT_SYMBOL_GPL(hv_track_root_tdp); +EXPORT_SYMBOL_GPL_FOR(hv_track_root_tdp, "kvm,kvm-intel,kvm-amd");
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 3c83951..5be399b 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c
@@ -136,7 +136,7 @@ static inline int __apic_test_and_clear_vector(int vec, void *bitmap) } __read_mostly DEFINE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu); -EXPORT_SYMBOL_GPL(kvm_has_noapic_vcpu); +EXPORT_SYMBOL_GPL_FOR(kvm_has_noapic_vcpu, "kvm,kvm-intel,kvm-amd"); __read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_hw_disabled, HZ); __read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_sw_disabled, HZ); @@ -696,7 +696,7 @@ bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr) return ((max_updated_irr != -1) && (max_updated_irr == *max_irr)); } -EXPORT_SYMBOL_GPL(__kvm_apic_update_irr); +EXPORT_SYMBOL_GPL_FOR(__kvm_apic_update_irr, "kvm,kvm-intel,kvm-amd"); bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr) { @@ -707,7 +707,7 @@ bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr) apic->irr_pending = true; return irr_updated; } -EXPORT_SYMBOL_GPL(kvm_apic_update_irr); +EXPORT_SYMBOL_GPL_FOR(kvm_apic_update_irr, "kvm,kvm-intel,kvm-amd"); static inline int apic_search_irr(struct kvm_lapic *apic) { @@ -750,7 +750,7 @@ void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec) { apic_clear_irr(vec, vcpu->arch.apic); } -EXPORT_SYMBOL_GPL(kvm_apic_clear_irr); +EXPORT_SYMBOL_GPL_FOR(kvm_apic_clear_irr, "kvm,kvm-intel,kvm-amd"); static inline void apic_set_isr(int vec, struct kvm_lapic *apic) { @@ -825,7 +825,7 @@ int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu) */ return apic_find_highest_irr(vcpu->arch.apic); } -EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr); +EXPORT_SYMBOL_GPL_FOR(kvm_lapic_find_highest_irr, "kvm,kvm-intel,kvm-amd"); static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, int vector, int level, int trig_mode, @@ -987,7 +987,7 @@ void kvm_apic_update_ppr(struct kvm_vcpu *vcpu) { apic_update_ppr(vcpu->arch.apic); } -EXPORT_SYMBOL_GPL(kvm_apic_update_ppr); +EXPORT_SYMBOL_GPL_FOR(kvm_apic_update_ppr, "kvm,kvm-intel,kvm-amd"); static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr) { @@ -1098,7 +1098,7 @@ bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, return false; } } -EXPORT_SYMBOL_GPL(kvm_apic_match_dest); +EXPORT_SYMBOL_GPL_FOR(kvm_apic_match_dest, "kvm,kvm-intel,kvm-amd"); int kvm_vector_to_index(u32 vector, u32 dest_vcpus, const unsigned long *bitmap, u32 bitmap_size) @@ -1510,7 +1510,7 @@ void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector) kvm_ioapic_send_eoi(apic, vector); kvm_make_request(KVM_REQ_EVENT, apic->vcpu); } -EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated); +EXPORT_SYMBOL_GPL_FOR(kvm_apic_set_eoi_accelerated, "kvm,kvm-intel,kvm-amd"); void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high) { @@ -1535,7 +1535,7 @@ void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high) kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL); } -EXPORT_SYMBOL_GPL(kvm_apic_send_ipi); +EXPORT_SYMBOL_GPL_FOR(kvm_apic_send_ipi, "kvm,kvm-intel,kvm-amd"); static u32 apic_get_tmcct(struct kvm_lapic *apic) { @@ -1652,7 +1652,7 @@ u64 kvm_lapic_readable_reg_mask(struct kvm_lapic *apic) return valid_reg_mask; } -EXPORT_SYMBOL_GPL(kvm_lapic_readable_reg_mask); +EXPORT_SYMBOL_GPL_FOR(kvm_lapic_readable_reg_mask, "kvm,kvm-intel,kvm-amd"); static int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len, void *data) @@ -1884,7 +1884,7 @@ void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu) lapic_timer_int_injected(vcpu)) __kvm_wait_lapic_expire(vcpu); } -EXPORT_SYMBOL_GPL(kvm_wait_lapic_expire); +EXPORT_SYMBOL_GPL_FOR(kvm_wait_lapic_expire, "kvm,kvm-intel,kvm-amd"); static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic) { @@ -2198,7 +2198,7 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu) out: preempt_enable(); } -EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer); +EXPORT_SYMBOL_GPL_FOR(kvm_lapic_expired_hv_timer, "kvm,kvm-intel,kvm-amd"); void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu) { @@ -2451,7 +2451,7 @@ void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu) { kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0); } -EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi); +EXPORT_SYMBOL_GPL_FOR(kvm_lapic_set_eoi, "kvm,kvm-intel,kvm-amd"); #define X2APIC_ICR_RESERVED_BITS (GENMASK_ULL(31, 20) | GENMASK_ULL(17, 16) | BIT(13)) @@ -2511,7 +2511,7 @@ void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset) else kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset)); } -EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode); +EXPORT_SYMBOL_GPL_FOR(kvm_apic_write_nodecode, "kvm,kvm-intel,kvm-amd"); void kvm_free_lapic(struct kvm_vcpu *vcpu) { @@ -2699,7 +2699,7 @@ int kvm_alloc_apic_access_page(struct kvm *kvm) mutex_unlock(&kvm->slots_lock); return ret; } -EXPORT_SYMBOL_GPL(kvm_alloc_apic_access_page); +EXPORT_SYMBOL_GPL_FOR(kvm_alloc_apic_access_page, "kvm,kvm-intel,kvm-amd"); void kvm_inhibit_apic_access_page(struct kvm_vcpu *vcpu) { @@ -2962,7 +2962,7 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu) __apic_update_ppr(apic, &ppr); return apic_has_interrupt_for_ppr(apic, ppr); } -EXPORT_SYMBOL_GPL(kvm_apic_has_interrupt); +EXPORT_SYMBOL_GPL_FOR(kvm_apic_has_interrupt, "kvm,kvm-intel,kvm-amd"); int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu) { @@ -3021,7 +3021,7 @@ void kvm_apic_ack_interrupt(struct kvm_vcpu *vcpu, int vector) } } -EXPORT_SYMBOL_GPL(kvm_apic_ack_interrupt); +EXPORT_SYMBOL_GPL_FOR(kvm_apic_ack_interrupt, "kvm,kvm-intel,kvm-amd"); static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s, bool set)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 22e7ad2..f3cd1ec 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c
@@ -3613,7 +3613,7 @@ void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu, write_unlock(&kvm->mmu_lock); } } -EXPORT_SYMBOL_GPL(kvm_mmu_free_roots); +EXPORT_SYMBOL_GPL_FOR(kvm_mmu_free_roots, "kvm,kvm-intel,kvm-amd"); void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu) { @@ -3640,7 +3640,7 @@ void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu) kvm_mmu_free_roots(kvm, mmu, roots_to_free); } -EXPORT_SYMBOL_GPL(kvm_mmu_free_guest_mode_roots); +EXPORT_SYMBOL_GPL_FOR(kvm_mmu_free_guest_mode_roots, "kvm,kvm-intel,kvm-amd"); static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, int quadrant, u8 level) @@ -4620,7 +4620,7 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, return r; } -EXPORT_SYMBOL_GPL(kvm_handle_page_fault); +EXPORT_SYMBOL_GPL_FOR(kvm_handle_page_fault, "kvm,kvm-intel,kvm-amd"); #ifdef CONFIG_X86_64 static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu, @@ -4908,7 +4908,7 @@ void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd) __clear_sp_write_flooding_count(sp); } } -EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd); +EXPORT_SYMBOL_GPL_FOR(kvm_mmu_new_pgd, "kvm,kvm-intel,kvm-amd"); static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, unsigned int access) @@ -5547,7 +5547,7 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0, shadow_mmu_init_context(vcpu, context, cpu_role, root_role); kvm_mmu_new_pgd(vcpu, nested_cr3); } -EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu); +EXPORT_SYMBOL_GPL_FOR(kvm_init_shadow_npt_mmu, "kvm,kvm-intel,kvm-amd"); static union kvm_cpu_role kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty, @@ -5601,7 +5601,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, kvm_mmu_new_pgd(vcpu, new_eptp); } -EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu); +EXPORT_SYMBOL_GPL_FOR(kvm_init_shadow_ept_mmu, "kvm,kvm-intel,kvm-amd"); static void init_kvm_softmmu(struct kvm_vcpu *vcpu, union kvm_cpu_role cpu_role) @@ -5666,7 +5666,7 @@ void kvm_init_mmu(struct kvm_vcpu *vcpu) else init_kvm_softmmu(vcpu, cpu_role); } -EXPORT_SYMBOL_GPL(kvm_init_mmu); +EXPORT_SYMBOL_GPL_FOR(kvm_init_mmu, "kvm,kvm-intel,kvm-amd"); void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu) { @@ -5702,7 +5702,7 @@ void kvm_mmu_reset_context(struct kvm_vcpu *vcpu) kvm_mmu_unload(vcpu); kvm_init_mmu(vcpu); } -EXPORT_SYMBOL_GPL(kvm_mmu_reset_context); +EXPORT_SYMBOL_GPL_FOR(kvm_mmu_reset_context, "kvm,kvm-intel,kvm-amd"); int kvm_mmu_load(struct kvm_vcpu *vcpu) { @@ -6114,7 +6114,7 @@ int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn, insn_len); } -EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); +EXPORT_SYMBOL_GPL_FOR(kvm_mmu_page_fault, "kvm,kvm-intel,kvm-amd"); void kvm_mmu_print_sptes(struct kvm_vcpu *vcpu, gpa_t gpa, const char *msg) { @@ -6130,7 +6130,7 @@ void kvm_mmu_print_sptes(struct kvm_vcpu *vcpu, gpa_t gpa, const char *msg) pr_cont(", spte[%d] = 0x%llx", level, sptes[level]); pr_cont("\n"); } -EXPORT_SYMBOL_GPL(kvm_mmu_print_sptes); +EXPORT_SYMBOL_GPL_FOR(kvm_mmu_print_sptes, "kvm,kvm-intel,kvm-amd"); static void __kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, u64 addr, hpa_t root_hpa) @@ -6196,7 +6196,7 @@ void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, __kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->prev_roots[i].hpa); } } -EXPORT_SYMBOL_GPL(kvm_mmu_invalidate_addr); +EXPORT_SYMBOL_GPL_FOR(kvm_mmu_invalidate_addr, "kvm,kvm-intel,kvm-amd"); void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) { @@ -6213,7 +6213,7 @@ void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) kvm_mmu_invalidate_addr(vcpu, vcpu->arch.walk_mmu, gva, KVM_MMU_ROOTS_ALL); ++vcpu->stat.invlpg; } -EXPORT_SYMBOL_GPL(kvm_mmu_invlpg); +EXPORT_SYMBOL_GPL_FOR(kvm_mmu_invlpg, "kvm,kvm-intel,kvm-amd"); void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid) @@ -6266,7 +6266,7 @@ void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level, else max_huge_page_level = PG_LEVEL_2M; } -EXPORT_SYMBOL_GPL(kvm_configure_mmu); +EXPORT_SYMBOL_GPL_FOR(kvm_configure_mmu, "kvm,kvm-intel,kvm-amd"); static void free_mmu_pages(struct kvm_mmu *mmu) { @@ -6916,7 +6916,7 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm, return need_tlb_flush; } -EXPORT_SYMBOL_GPL(kvm_zap_gfn_range); +EXPORT_SYMBOL_GPL_FOR(kvm_zap_gfn_range, "kvm,kvm-intel,kvm-amd"); static void kvm_rmap_zap_collapsible_sptes(struct kvm *kvm, const struct kvm_memory_slot *slot)
diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c index 561c331..cb182a1 100644 --- a/arch/x86/kvm/mmu/page_track.c +++ b/arch/x86/kvm/mmu/page_track.c
@@ -238,7 +238,7 @@ int kvm_page_track_register_notifier(struct kvm *kvm, write_unlock(&kvm->mmu_lock); return 0; } -EXPORT_SYMBOL_GPL(kvm_page_track_register_notifier); +EXPORT_SYMBOL_GPL_FOR(kvm_page_track_register_notifier, "kvm,kvm-intel,kvm-amd"); /* * stop receiving the event interception. It is the opposed operation of @@ -258,7 +258,7 @@ void kvm_page_track_unregister_notifier(struct kvm *kvm, kvm_put_kvm(kvm); } -EXPORT_SYMBOL_GPL(kvm_page_track_unregister_notifier); +EXPORT_SYMBOL_GPL_FOR(kvm_page_track_unregister_notifier, "kvm,kvm-intel,kvm-amd"); /* * Notify the node that write access is intercepted and write emulation is @@ -337,7 +337,7 @@ int kvm_write_track_add_gfn(struct kvm *kvm, gfn_t gfn) return 0; } -EXPORT_SYMBOL_GPL(kvm_write_track_add_gfn); +EXPORT_SYMBOL_GPL_FOR(kvm_write_track_add_gfn, "kvm,kvm-intel,kvm-amd"); /* * remove the guest page from the tracking pool which stops the interception @@ -367,5 +367,5 @@ int kvm_write_track_remove_gfn(struct kvm *kvm, gfn_t gfn) return 0; } -EXPORT_SYMBOL_GPL(kvm_write_track_remove_gfn); +EXPORT_SYMBOL_GPL_FOR(kvm_write_track_remove_gfn, "kvm,kvm-intel,kvm-amd"); #endif
diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index 22551e2..879957e 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c
@@ -22,7 +22,7 @@ bool __read_mostly enable_mmio_caching = true; static bool __ro_after_init allow_mmio_caching; module_param_named(mmio_caching, enable_mmio_caching, bool, 0444); -EXPORT_SYMBOL_GPL(enable_mmio_caching); +EXPORT_SYMBOL_GPL_FOR(enable_mmio_caching, "kvm,kvm-intel,kvm-amd"); bool __read_mostly kvm_ad_enabled; @@ -431,7 +431,7 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask) shadow_mmio_mask = mmio_mask; shadow_mmio_access_mask = access_mask; } -EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); +EXPORT_SYMBOL_GPL_FOR(kvm_mmu_set_mmio_spte_mask, "kvm,kvm-intel,kvm-amd"); void kvm_mmu_set_me_spte_mask(u64 me_value, u64 me_mask) { @@ -442,7 +442,7 @@ void kvm_mmu_set_me_spte_mask(u64 me_value, u64 me_mask) shadow_me_value = me_value; shadow_me_mask = me_mask; } -EXPORT_SYMBOL_GPL(kvm_mmu_set_me_spte_mask); +EXPORT_SYMBOL_GPL_FOR(kvm_mmu_set_me_spte_mask, "kvm,kvm-intel,kvm-amd"); void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only) { @@ -474,7 +474,7 @@ void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only) kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE, VMX_EPT_RWX_MASK | VMX_EPT_SUPPRESS_VE_BIT, 0); } -EXPORT_SYMBOL_GPL(kvm_mmu_set_ept_masks); +EXPORT_SYMBOL_GPL_FOR(kvm_mmu_set_ept_masks, "kvm,kvm-intel,kvm-amd"); void kvm_mmu_reset_all_pte_masks(void) {
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 47a4628..56f46f2 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c
@@ -27,10 +27,10 @@ #define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300 struct x86_pmu_capability __read_mostly kvm_pmu_cap; -EXPORT_SYMBOL_GPL(kvm_pmu_cap); +EXPORT_SYMBOL_GPL_FOR(kvm_pmu_cap, "kvm,kvm-intel,kvm-amd"); struct kvm_pmu_emulated_event_selectors __read_mostly kvm_pmu_eventsel; -EXPORT_SYMBOL_GPL(kvm_pmu_eventsel); +EXPORT_SYMBOL_GPL_FOR(kvm_pmu_eventsel, "kvm,kvm-intel,kvm-amd"); /* Precise Distribution of Instructions Retired (PDIR) */ static const struct x86_cpu_id vmx_pebs_pdir_cpu[] = { @@ -318,7 +318,7 @@ void pmc_write_counter(struct kvm_pmc *pmc, u64 val) pmc->counter &= pmc_bitmask(pmc); pmc_update_sample_period(pmc); } -EXPORT_SYMBOL_GPL(pmc_write_counter); +EXPORT_SYMBOL_GPL_FOR(pmc_write_counter, "kvm,kvm-intel,kvm-amd"); static int filter_cmp(const void *pa, const void *pb, u64 mask) { @@ -898,7 +898,7 @@ void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel) kvm_pmu_incr_counter(pmc); } } -EXPORT_SYMBOL_GPL(kvm_pmu_trigger_event); +EXPORT_SYMBOL_GPL_FOR(kvm_pmu_trigger_event, "kvm,kvm-intel,kvm-amd"); static bool is_masked_filter_valid(const struct kvm_x86_pmu_event_filter *filter) {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2e71348..baa4ab8 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c
@@ -98,10 +98,10 @@ * vendor module being reloaded with different module parameters. */ struct kvm_caps kvm_caps __read_mostly; -EXPORT_SYMBOL_GPL(kvm_caps); +EXPORT_SYMBOL_GPL_FOR(kvm_caps, "kvm,kvm-intel,kvm-amd"); struct kvm_host_values kvm_host __read_mostly; -EXPORT_SYMBOL_GPL(kvm_host); +EXPORT_SYMBOL_GPL_FOR(kvm_host, "kvm,kvm-intel,kvm-amd"); #define ERR_PTR_USR(e) ((void __user *)ERR_PTR(e)) @@ -155,7 +155,7 @@ module_param(ignore_msrs, bool, 0644); bool __read_mostly report_ignored_msrs = true; module_param(report_ignored_msrs, bool, 0644); -EXPORT_SYMBOL_GPL(report_ignored_msrs); +EXPORT_SYMBOL_GPL_FOR(report_ignored_msrs, "kvm,kvm-intel,kvm-amd"); unsigned int min_timer_period_us = 200; module_param(min_timer_period_us, uint, 0644); @@ -172,7 +172,7 @@ module_param(vector_hashing, bool, 0444); bool __read_mostly enable_vmware_backdoor = false; module_param(enable_vmware_backdoor, bool, 0444); -EXPORT_SYMBOL_GPL(enable_vmware_backdoor); +EXPORT_SYMBOL_GPL_FOR(enable_vmware_backdoor, "kvm,kvm-intel,kvm-amd"); /* * Flags to manipulate forced emulation behavior (any non-zero value will @@ -187,7 +187,7 @@ module_param(pi_inject_timer, bint, 0644); /* Enable/disable PMU virtualization */ bool __read_mostly enable_pmu = true; -EXPORT_SYMBOL_GPL(enable_pmu); +EXPORT_SYMBOL_GPL_FOR(enable_pmu, "kvm,kvm-intel,kvm-amd"); module_param(enable_pmu, bool, 0444); bool __read_mostly eager_page_split = true; @@ -214,7 +214,7 @@ struct kvm_user_return_msrs { }; u32 __read_mostly kvm_nr_uret_msrs; -EXPORT_SYMBOL_GPL(kvm_nr_uret_msrs); +EXPORT_SYMBOL_GPL_FOR(kvm_nr_uret_msrs, "kvm,kvm-intel,kvm-amd"); static u32 __read_mostly kvm_uret_msrs_list[KVM_MAX_NR_USER_RETURN_MSRS]; static struct kvm_user_return_msrs __percpu *user_return_msrs; @@ -224,10 +224,10 @@ static struct kvm_user_return_msrs __percpu *user_return_msrs; | XFEATURE_MASK_PKRU | XFEATURE_MASK_XTILE) bool __read_mostly allow_smaller_maxphyaddr = 0; -EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr); +EXPORT_SYMBOL_GPL_FOR(allow_smaller_maxphyaddr, "kvm,kvm-intel,kvm-amd"); bool __read_mostly enable_apicv = true; -EXPORT_SYMBOL_GPL(enable_apicv); +EXPORT_SYMBOL_GPL_FOR(enable_apicv, "kvm,kvm-intel,kvm-amd"); const struct _kvm_stats_desc kvm_vm_stats_desc[] = { KVM_GENERIC_VM_STATS(), @@ -611,7 +611,7 @@ int kvm_add_user_return_msr(u32 msr) kvm_uret_msrs_list[kvm_nr_uret_msrs] = msr; return kvm_nr_uret_msrs++; } -EXPORT_SYMBOL_GPL(kvm_add_user_return_msr); +EXPORT_SYMBOL_GPL_FOR(kvm_add_user_return_msr, "kvm,kvm-intel,kvm-amd"); int kvm_find_user_return_msr(u32 msr) { @@ -623,7 +623,7 @@ int kvm_find_user_return_msr(u32 msr) } return -1; } -EXPORT_SYMBOL_GPL(kvm_find_user_return_msr); +EXPORT_SYMBOL_GPL_FOR(kvm_find_user_return_msr, "kvm,kvm-intel,kvm-amd"); static void kvm_user_return_msr_cpu_online(void) { @@ -658,7 +658,7 @@ int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask) } return 0; } -EXPORT_SYMBOL_GPL(kvm_set_user_return_msr); +EXPORT_SYMBOL_GPL_FOR(kvm_set_user_return_msr, "kvm,kvm-intel,kvm-amd"); static void drop_user_return_notifiers(void) { @@ -680,7 +680,7 @@ noinstr void kvm_spurious_fault(void) /* Fault while not rebooting. We want the trace. */ BUG_ON(!kvm_rebooting); } -EXPORT_SYMBOL_GPL(kvm_spurious_fault); +EXPORT_SYMBOL_GPL_FOR(kvm_spurious_fault, "kvm,kvm-intel,kvm-amd"); #define EXCPT_BENIGN 0 #define EXCPT_CONTRIBUTORY 1 @@ -785,7 +785,7 @@ void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu, ex->has_payload = false; ex->payload = 0; } -EXPORT_SYMBOL_GPL(kvm_deliver_exception_payload); +EXPORT_SYMBOL_GPL_FOR(kvm_deliver_exception_payload, "kvm,kvm-intel,kvm-amd"); static void kvm_queue_exception_vmexit(struct kvm_vcpu *vcpu, unsigned int vector, bool has_error_code, u32 error_code, @@ -891,20 +891,20 @@ void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) { kvm_multiple_exception(vcpu, nr, false, 0, false, 0, false); } -EXPORT_SYMBOL_GPL(kvm_queue_exception); +EXPORT_SYMBOL_GPL_FOR(kvm_queue_exception, "kvm,kvm-intel,kvm-amd"); void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) { kvm_multiple_exception(vcpu, nr, false, 0, false, 0, true); } -EXPORT_SYMBOL_GPL(kvm_requeue_exception); +EXPORT_SYMBOL_GPL_FOR(kvm_requeue_exception, "kvm,kvm-intel,kvm-amd"); void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, unsigned long payload) { kvm_multiple_exception(vcpu, nr, false, 0, true, payload, false); } -EXPORT_SYMBOL_GPL(kvm_queue_exception_p); +EXPORT_SYMBOL_GPL_FOR(kvm_queue_exception_p, "kvm,kvm-intel,kvm-amd"); static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code, unsigned long payload) @@ -922,7 +922,7 @@ int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) return 1; } -EXPORT_SYMBOL_GPL(kvm_complete_insn_gp); +EXPORT_SYMBOL_GPL_FOR(kvm_complete_insn_gp, "kvm,kvm-intel,kvm-amd"); static int complete_emulated_insn_gp(struct kvm_vcpu *vcpu, int err) { @@ -972,7 +972,7 @@ void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu, fault_mmu->inject_page_fault(vcpu, fault); } -EXPORT_SYMBOL_GPL(kvm_inject_emulated_page_fault); +EXPORT_SYMBOL_GPL_FOR(kvm_inject_emulated_page_fault, "kvm,kvm-intel,kvm-amd"); void kvm_inject_nmi(struct kvm_vcpu *vcpu) { @@ -984,13 +984,13 @@ void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) { kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, false); } -EXPORT_SYMBOL_GPL(kvm_queue_exception_e); +EXPORT_SYMBOL_GPL_FOR(kvm_queue_exception_e, "kvm,kvm-intel,kvm-amd"); void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) { kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, true); } -EXPORT_SYMBOL_GPL(kvm_requeue_exception_e); +EXPORT_SYMBOL_GPL_FOR(kvm_requeue_exception_e, "kvm,kvm-intel,kvm-amd"); /* * Checks if cpl <= required_cpl; if true, return true. Otherwise queue @@ -1012,7 +1012,7 @@ bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr) kvm_queue_exception(vcpu, UD_VECTOR); return false; } -EXPORT_SYMBOL_GPL(kvm_require_dr); +EXPORT_SYMBOL_GPL_FOR(kvm_require_dr, "kvm,kvm-intel,kvm-amd"); static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu) { @@ -1067,7 +1067,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) return 1; } -EXPORT_SYMBOL_GPL(load_pdptrs); +EXPORT_SYMBOL_GPL_FOR(load_pdptrs, "kvm,kvm-intel,kvm-amd"); static bool kvm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { @@ -1120,7 +1120,7 @@ void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned lon if ((cr0 ^ old_cr0) & KVM_MMU_CR0_ROLE_BITS) kvm_mmu_reset_context(vcpu); } -EXPORT_SYMBOL_GPL(kvm_post_set_cr0); +EXPORT_SYMBOL_GPL_FOR(kvm_post_set_cr0, "kvm,kvm-intel,kvm-amd"); int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { @@ -1161,13 +1161,13 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) return 0; } -EXPORT_SYMBOL_GPL(kvm_set_cr0); +EXPORT_SYMBOL_GPL_FOR(kvm_set_cr0, "kvm,kvm-intel,kvm-amd"); void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) { (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f)); } -EXPORT_SYMBOL_GPL(kvm_lmsw); +EXPORT_SYMBOL_GPL_FOR(kvm_lmsw, "kvm,kvm-intel,kvm-amd"); void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu) { @@ -1190,7 +1190,7 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu) kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE))) write_pkru(vcpu->arch.pkru); } -EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_state); +EXPORT_SYMBOL_GPL_FOR(kvm_load_guest_xsave_state, "kvm,kvm-intel,kvm-amd"); void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu) { @@ -1216,7 +1216,7 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu) } } -EXPORT_SYMBOL_GPL(kvm_load_host_xsave_state); +EXPORT_SYMBOL_GPL_FOR(kvm_load_host_xsave_state, "kvm,kvm-intel,kvm-amd"); #ifdef CONFIG_X86_64 static inline u64 kvm_guest_supported_xfd(struct kvm_vcpu *vcpu) @@ -1281,7 +1281,7 @@ int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu) return kvm_skip_emulated_instruction(vcpu); } -EXPORT_SYMBOL_GPL(kvm_emulate_xsetbv); +EXPORT_SYMBOL_GPL_FOR(kvm_emulate_xsetbv, "kvm,kvm-intel,kvm-amd"); bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { @@ -1293,7 +1293,7 @@ bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) return true; } -EXPORT_SYMBOL_GPL(__kvm_is_valid_cr4); +EXPORT_SYMBOL_GPL_FOR(__kvm_is_valid_cr4, "kvm,kvm-intel,kvm-amd"); static bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { @@ -1341,7 +1341,7 @@ void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned lon kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); } -EXPORT_SYMBOL_GPL(kvm_post_set_cr4); +EXPORT_SYMBOL_GPL_FOR(kvm_post_set_cr4, "kvm,kvm-intel,kvm-amd"); int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { @@ -1372,7 +1372,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) return 0; } -EXPORT_SYMBOL_GPL(kvm_set_cr4); +EXPORT_SYMBOL_GPL_FOR(kvm_set_cr4, "kvm,kvm-intel,kvm-amd"); static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid) { @@ -1464,7 +1464,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) return 0; } -EXPORT_SYMBOL_GPL(kvm_set_cr3); +EXPORT_SYMBOL_GPL_FOR(kvm_set_cr3, "kvm,kvm-intel,kvm-amd"); int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) { @@ -1476,7 +1476,7 @@ int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) vcpu->arch.cr8 = cr8; return 0; } -EXPORT_SYMBOL_GPL(kvm_set_cr8); +EXPORT_SYMBOL_GPL_FOR(kvm_set_cr8, "kvm,kvm-intel,kvm-amd"); unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) { @@ -1485,7 +1485,7 @@ unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) else return vcpu->arch.cr8; } -EXPORT_SYMBOL_GPL(kvm_get_cr8); +EXPORT_SYMBOL_GPL_FOR(kvm_get_cr8, "kvm,kvm-intel,kvm-amd"); static void kvm_update_dr0123(struct kvm_vcpu *vcpu) { @@ -1510,7 +1510,7 @@ void kvm_update_dr7(struct kvm_vcpu *vcpu) if (dr7 & DR7_BP_EN_MASK) vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; } -EXPORT_SYMBOL_GPL(kvm_update_dr7); +EXPORT_SYMBOL_GPL_FOR(kvm_update_dr7, "kvm,kvm-intel,kvm-amd"); static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu) { @@ -1551,7 +1551,7 @@ int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) return 0; } -EXPORT_SYMBOL_GPL(kvm_set_dr); +EXPORT_SYMBOL_GPL_FOR(kvm_set_dr, "kvm,kvm-intel,kvm-amd"); unsigned long kvm_get_dr(struct kvm_vcpu *vcpu, int dr) { @@ -1568,7 +1568,7 @@ unsigned long kvm_get_dr(struct kvm_vcpu *vcpu, int dr) return vcpu->arch.dr7; } } -EXPORT_SYMBOL_GPL(kvm_get_dr); +EXPORT_SYMBOL_GPL_FOR(kvm_get_dr, "kvm,kvm-intel,kvm-amd"); int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu) { @@ -1584,7 +1584,7 @@ int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu) kvm_rdx_write(vcpu, data >> 32); return kvm_skip_emulated_instruction(vcpu); } -EXPORT_SYMBOL_GPL(kvm_emulate_rdpmc); +EXPORT_SYMBOL_GPL_FOR(kvm_emulate_rdpmc, "kvm,kvm-intel,kvm-amd"); /* * Some IA32_ARCH_CAPABILITIES bits have dependencies on MSRs that KVM @@ -1721,7 +1721,7 @@ bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) return __kvm_valid_efer(vcpu, efer); } -EXPORT_SYMBOL_GPL(kvm_valid_efer); +EXPORT_SYMBOL_GPL_FOR(kvm_valid_efer, "kvm,kvm-intel,kvm-amd"); static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { @@ -1764,7 +1764,7 @@ void kvm_enable_efer_bits(u64 mask) { efer_reserved_bits &= ~mask; } -EXPORT_SYMBOL_GPL(kvm_enable_efer_bits); +EXPORT_SYMBOL_GPL_FOR(kvm_enable_efer_bits, "kvm,kvm-intel,kvm-amd"); bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type) { @@ -1807,7 +1807,7 @@ bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type) return allowed; } -EXPORT_SYMBOL_GPL(kvm_msr_allowed); +EXPORT_SYMBOL_GPL_FOR(kvm_msr_allowed, "kvm,kvm-intel,kvm-amd"); /* * Write @data into the MSR specified by @index. Select MSR specific fault @@ -1936,7 +1936,7 @@ int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data) return KVM_MSR_RET_FILTERED; return kvm_get_msr_ignored_check(vcpu, index, data, false); } -EXPORT_SYMBOL_GPL(kvm_get_msr_with_filter); +EXPORT_SYMBOL_GPL_FOR(kvm_get_msr_with_filter, "kvm,kvm-intel,kvm-amd"); int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data) { @@ -1944,19 +1944,19 @@ int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data) return KVM_MSR_RET_FILTERED; return kvm_set_msr_ignored_check(vcpu, index, data, false); } -EXPORT_SYMBOL_GPL(kvm_set_msr_with_filter); +EXPORT_SYMBOL_GPL_FOR(kvm_set_msr_with_filter, "kvm,kvm-intel,kvm-amd"); int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data) { return kvm_get_msr_ignored_check(vcpu, index, data, false); } -EXPORT_SYMBOL_GPL(kvm_get_msr); +EXPORT_SYMBOL_GPL_FOR(kvm_get_msr, "kvm,kvm-intel,kvm-amd"); int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data) { return kvm_set_msr_ignored_check(vcpu, index, data, false); } -EXPORT_SYMBOL_GPL(kvm_set_msr); +EXPORT_SYMBOL_GPL_FOR(kvm_set_msr, "kvm,kvm-intel,kvm-amd"); static void complete_userspace_rdmsr(struct kvm_vcpu *vcpu) { @@ -2045,7 +2045,7 @@ int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu) return kvm_x86_call(complete_emulated_msr)(vcpu, r); } -EXPORT_SYMBOL_GPL(kvm_emulate_rdmsr); +EXPORT_SYMBOL_GPL_FOR(kvm_emulate_rdmsr, "kvm,kvm-intel,kvm-amd"); int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu) { @@ -2070,7 +2070,7 @@ int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu) return kvm_x86_call(complete_emulated_msr)(vcpu, r); } -EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr); +EXPORT_SYMBOL_GPL_FOR(kvm_emulate_wrmsr, "kvm,kvm-intel,kvm-amd"); int kvm_emulate_as_nop(struct kvm_vcpu *vcpu) { @@ -2082,14 +2082,14 @@ int kvm_emulate_invd(struct kvm_vcpu *vcpu) /* Treat an INVD instruction as a NOP and just skip it. */ return kvm_emulate_as_nop(vcpu); } -EXPORT_SYMBOL_GPL(kvm_emulate_invd); +EXPORT_SYMBOL_GPL_FOR(kvm_emulate_invd, "kvm,kvm-intel,kvm-amd"); int kvm_handle_invalid_op(struct kvm_vcpu *vcpu) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } -EXPORT_SYMBOL_GPL(kvm_handle_invalid_op); +EXPORT_SYMBOL_GPL_FOR(kvm_handle_invalid_op, "kvm,kvm-intel,kvm-amd"); static int kvm_emulate_monitor_mwait(struct kvm_vcpu *vcpu, const char *insn) @@ -2105,13 +2105,13 @@ int kvm_emulate_mwait(struct kvm_vcpu *vcpu) { return kvm_emulate_monitor_mwait(vcpu, "MWAIT"); } -EXPORT_SYMBOL_GPL(kvm_emulate_mwait); +EXPORT_SYMBOL_GPL_FOR(kvm_emulate_mwait, "kvm,kvm-intel,kvm-amd"); int kvm_emulate_monitor(struct kvm_vcpu *vcpu) { return kvm_emulate_monitor_mwait(vcpu, "MONITOR"); } -EXPORT_SYMBOL_GPL(kvm_emulate_monitor); +EXPORT_SYMBOL_GPL_FOR(kvm_emulate_monitor, "kvm,kvm-intel,kvm-amd"); static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu) { @@ -2188,7 +2188,7 @@ fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu) return ret; } -EXPORT_SYMBOL_GPL(handle_fastpath_set_msr_irqoff); +EXPORT_SYMBOL_GPL_FOR(handle_fastpath_set_msr_irqoff, "kvm,kvm-intel,kvm-amd"); /* * Adapt set_msr() to msr_io()'s calling convention @@ -2554,7 +2554,7 @@ u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) return vcpu->arch.l1_tsc_offset + kvm_scale_tsc(host_tsc, vcpu->arch.l1_tsc_scaling_ratio); } -EXPORT_SYMBOL_GPL(kvm_read_l1_tsc); +EXPORT_SYMBOL_GPL_FOR(kvm_read_l1_tsc, "kvm,kvm-intel,kvm-amd"); u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier) { @@ -2569,7 +2569,7 @@ u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier) nested_offset += l2_offset; return nested_offset; } -EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_offset); +EXPORT_SYMBOL_GPL_FOR(kvm_calc_nested_tsc_offset, "kvm,kvm-intel,kvm-amd"); u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier) { @@ -2579,7 +2579,7 @@ u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier) return l1_multiplier; } -EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_multiplier); +EXPORT_SYMBOL_GPL_FOR(kvm_calc_nested_tsc_multiplier, "kvm,kvm-intel,kvm-amd"); static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset) { @@ -3642,7 +3642,7 @@ void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu) if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu)) kvm_vcpu_flush_tlb_guest(vcpu); } -EXPORT_SYMBOL_GPL(kvm_service_local_tlb_flush_requests); +EXPORT_SYMBOL_GPL_FOR(kvm_service_local_tlb_flush_requests, "kvm,kvm-intel,kvm-amd"); static void record_steal_time(struct kvm_vcpu *vcpu) { @@ -4128,7 +4128,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) } return 0; } -EXPORT_SYMBOL_GPL(kvm_set_msr_common); +EXPORT_SYMBOL_GPL_FOR(kvm_set_msr_common, "kvm,kvm-intel,kvm-amd"); static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) { @@ -4477,7 +4477,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) } return 0; } -EXPORT_SYMBOL_GPL(kvm_get_msr_common); +EXPORT_SYMBOL_GPL_FOR(kvm_get_msr_common, "kvm,kvm-intel,kvm-amd"); /* * Read or write a bunch of msrs. All parameters are kernel addresses. @@ -7562,7 +7562,7 @@ gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); } -EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read); +EXPORT_SYMBOL_GPL_FOR(kvm_mmu_gva_to_gpa_read, "kvm,kvm-intel,kvm-amd"); gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) @@ -7573,7 +7573,7 @@ gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, access |= PFERR_WRITE_MASK; return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); } -EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_write); +EXPORT_SYMBOL_GPL_FOR(kvm_mmu_gva_to_gpa_write, "kvm,kvm-intel,kvm-amd"); /* uses this to access any guest's mapped memory without checking CPL */ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, @@ -7659,7 +7659,7 @@ int kvm_read_guest_virt(struct kvm_vcpu *vcpu, return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); } -EXPORT_SYMBOL_GPL(kvm_read_guest_virt); +EXPORT_SYMBOL_GPL_FOR(kvm_read_guest_virt, "kvm,kvm-intel,kvm-amd"); static int emulator_read_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, @@ -7731,7 +7731,7 @@ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val, return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, PFERR_WRITE_MASK, exception); } -EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system); +EXPORT_SYMBOL_GPL_FOR(kvm_write_guest_virt_system, "kvm,kvm-intel,kvm-amd"); static int kvm_check_emulate_insn(struct kvm_vcpu *vcpu, int emul_type, void *insn, int insn_len) @@ -7765,7 +7765,7 @@ int handle_ud(struct kvm_vcpu *vcpu) return kvm_emulate_instruction(vcpu, emul_type); } -EXPORT_SYMBOL_GPL(handle_ud); +EXPORT_SYMBOL_GPL_FOR(handle_ud, "kvm,kvm-intel,kvm-amd"); static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva, gpa_t gpa, bool write) @@ -8245,7 +8245,7 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) kvm_emulate_wbinvd_noskip(vcpu); return kvm_skip_emulated_instruction(vcpu); } -EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd); +EXPORT_SYMBOL_GPL_FOR(kvm_emulate_wbinvd, "kvm,kvm-intel,kvm-amd"); @@ -8740,7 +8740,7 @@ void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip) kvm_set_rflags(vcpu, ctxt->eflags); } } -EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt); +EXPORT_SYMBOL_GPL_FOR(kvm_inject_realmode_interrupt, "kvm,kvm-intel,kvm-amd"); static void prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data, u8 ndata, u8 *insn_bytes, u8 insn_size) @@ -8805,13 +8805,13 @@ void __kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data, { prepare_emulation_failure_exit(vcpu, data, ndata, NULL, 0); } -EXPORT_SYMBOL_GPL(__kvm_prepare_emulation_failure_exit); +EXPORT_SYMBOL_GPL_FOR(__kvm_prepare_emulation_failure_exit, "kvm,kvm-intel,kvm-amd"); void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu) { __kvm_prepare_emulation_failure_exit(vcpu, NULL, 0); } -EXPORT_SYMBOL_GPL(kvm_prepare_emulation_failure_exit); +EXPORT_SYMBOL_GPL_FOR(kvm_prepare_emulation_failure_exit, "kvm,kvm-intel,kvm-amd"); static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type) { @@ -8935,7 +8935,7 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu) r = kvm_vcpu_do_singlestep(vcpu); return r; } -EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction); +EXPORT_SYMBOL_GPL_FOR(kvm_skip_emulated_instruction, "kvm,kvm-intel,kvm-amd"); static bool kvm_is_code_breakpoint_inhibited(struct kvm_vcpu *vcpu) { @@ -9066,7 +9066,7 @@ int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type, return r; } -EXPORT_SYMBOL_GPL(x86_decode_emulated_instruction); +EXPORT_SYMBOL_GPL_FOR(x86_decode_emulated_instruction, "kvm,kvm-intel,kvm-amd"); int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, int emulation_type, void *insn, int insn_len) @@ -9274,14 +9274,14 @@ int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type) { return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); } -EXPORT_SYMBOL_GPL(kvm_emulate_instruction); +EXPORT_SYMBOL_GPL_FOR(kvm_emulate_instruction, "kvm,kvm-intel,kvm-amd"); int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, void *insn, int insn_len) { return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len); } -EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer); +EXPORT_SYMBOL_GPL_FOR(kvm_emulate_instruction_from_buffer, "kvm,kvm-intel,kvm-amd"); static int complete_fast_pio_out_port_0x7e(struct kvm_vcpu *vcpu) { @@ -9376,7 +9376,7 @@ int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in) ret = kvm_fast_pio_out(vcpu, size, port); return ret && kvm_skip_emulated_instruction(vcpu); } -EXPORT_SYMBOL_GPL(kvm_fast_pio); +EXPORT_SYMBOL_GPL_FOR(kvm_fast_pio, "kvm,kvm-intel,kvm-amd"); static int kvmclock_cpu_down_prep(unsigned int cpu) { @@ -9803,7 +9803,7 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops) kmem_cache_destroy(x86_emulator_cache); return r; } -EXPORT_SYMBOL_GPL(kvm_x86_vendor_init); +EXPORT_SYMBOL_GPL_FOR(kvm_x86_vendor_init, "kvm,kvm-intel,kvm-amd"); void kvm_x86_vendor_exit(void) { @@ -9837,7 +9837,7 @@ void kvm_x86_vendor_exit(void) kvm_x86_ops.enable_virtualization_cpu = NULL; mutex_unlock(&vendor_module_lock); } -EXPORT_SYMBOL_GPL(kvm_x86_vendor_exit); +EXPORT_SYMBOL_GPL_FOR(kvm_x86_vendor_exit, "kvm,kvm-intel,kvm-amd"); #ifdef CONFIG_X86_64 static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr, @@ -9901,7 +9901,7 @@ bool kvm_apicv_activated(struct kvm *kvm) { return (READ_ONCE(kvm->arch.apicv_inhibit_reasons) == 0); } -EXPORT_SYMBOL_GPL(kvm_apicv_activated); +EXPORT_SYMBOL_GPL_FOR(kvm_apicv_activated, "kvm,kvm-intel,kvm-amd"); bool kvm_vcpu_apicv_activated(struct kvm_vcpu *vcpu) { @@ -9911,7 +9911,7 @@ bool kvm_vcpu_apicv_activated(struct kvm_vcpu *vcpu) return (vm_reasons | vcpu_reasons) == 0; } -EXPORT_SYMBOL_GPL(kvm_vcpu_apicv_activated); +EXPORT_SYMBOL_GPL_FOR(kvm_vcpu_apicv_activated, "kvm,kvm-intel,kvm-amd"); static void set_or_clear_apicv_inhibit(unsigned long *inhibits, enum kvm_apicv_inhibit reason, bool set) @@ -10073,7 +10073,7 @@ unsigned long __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr, ++vcpu->stat.hypercalls; return ret; } -EXPORT_SYMBOL_GPL(__kvm_emulate_hypercall); +EXPORT_SYMBOL_GPL_FOR(__kvm_emulate_hypercall, "kvm,kvm-intel,kvm-amd"); int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) { @@ -10106,7 +10106,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) return kvm_skip_emulated_instruction(vcpu); } -EXPORT_SYMBOL_GPL(kvm_emulate_hypercall); +EXPORT_SYMBOL_GPL_FOR(kvm_emulate_hypercall, "kvm,kvm-intel,kvm-amd"); static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) { @@ -10549,7 +10549,7 @@ void __kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu) preempt_enable(); up_read(&vcpu->kvm->arch.apicv_update_lock); } -EXPORT_SYMBOL_GPL(__kvm_vcpu_update_apicv); +EXPORT_SYMBOL_GPL_FOR(__kvm_vcpu_update_apicv, "kvm,kvm-intel,kvm-amd"); static void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu) { @@ -10625,7 +10625,7 @@ void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm, __kvm_set_or_clear_apicv_inhibit(kvm, reason, set); up_write(&kvm->arch.apicv_update_lock); } -EXPORT_SYMBOL_GPL(kvm_set_or_clear_apicv_inhibit); +EXPORT_SYMBOL_GPL_FOR(kvm_set_or_clear_apicv_inhibit, "kvm,kvm-intel,kvm-amd"); static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) { @@ -11300,7 +11300,7 @@ int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu) { return __kvm_emulate_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT); } -EXPORT_SYMBOL_GPL(kvm_emulate_halt_noskip); +EXPORT_SYMBOL_GPL_FOR(kvm_emulate_halt_noskip, "kvm,kvm-intel,kvm-amd"); int kvm_emulate_halt(struct kvm_vcpu *vcpu) { @@ -11311,7 +11311,7 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu) */ return kvm_emulate_halt_noskip(vcpu) && ret; } -EXPORT_SYMBOL_GPL(kvm_emulate_halt); +EXPORT_SYMBOL_GPL_FOR(kvm_emulate_halt, "kvm,kvm-intel,kvm-amd"); fastpath_t handle_fastpath_hlt(struct kvm_vcpu *vcpu) { @@ -11329,7 +11329,7 @@ fastpath_t handle_fastpath_hlt(struct kvm_vcpu *vcpu) return EXIT_FASTPATH_EXIT_HANDLED; } -EXPORT_SYMBOL_GPL(handle_fastpath_hlt); +EXPORT_SYMBOL_GPL_FOR(handle_fastpath_hlt, "kvm,kvm-intel,kvm-amd"); int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu) { @@ -11338,7 +11338,7 @@ int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu) return __kvm_emulate_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD, KVM_EXIT_AP_RESET_HOLD) && ret; } -EXPORT_SYMBOL_GPL(kvm_emulate_ap_reset_hold); +EXPORT_SYMBOL_GPL_FOR(kvm_emulate_ap_reset_hold, "kvm,kvm-intel,kvm-amd"); bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu) { @@ -11843,7 +11843,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, kvm_set_rflags(vcpu, ctxt->eflags); return 1; } -EXPORT_SYMBOL_GPL(kvm_task_switch); +EXPORT_SYMBOL_GPL_FOR(kvm_task_switch, "kvm,kvm-intel,kvm-amd"); static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { @@ -12519,7 +12519,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) if (init_event) kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); } -EXPORT_SYMBOL_GPL(kvm_vcpu_reset); +EXPORT_SYMBOL_GPL_FOR(kvm_vcpu_reset, "kvm,kvm-intel,kvm-amd"); void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) { @@ -12531,7 +12531,7 @@ void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); kvm_rip_write(vcpu, 0); } -EXPORT_SYMBOL_GPL(kvm_vcpu_deliver_sipi_vector); +EXPORT_SYMBOL_GPL_FOR(kvm_vcpu_deliver_sipi_vector, "kvm,kvm-intel,kvm-amd"); void kvm_arch_enable_virtualization(void) { @@ -12836,7 +12836,7 @@ void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, return (void __user *)hva; } -EXPORT_SYMBOL_GPL(__x86_set_memory_region); +EXPORT_SYMBOL_GPL_FOR(__x86_set_memory_region, "kvm,kvm-intel,kvm-amd"); void kvm_arch_pre_destroy_vm(struct kvm *kvm) { @@ -13226,13 +13226,13 @@ unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu) return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) + kvm_rip_read(vcpu)); } -EXPORT_SYMBOL_GPL(kvm_get_linear_rip); +EXPORT_SYMBOL_GPL_FOR(kvm_get_linear_rip, "kvm,kvm-intel,kvm-amd"); bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip) { return kvm_get_linear_rip(vcpu) == linear_rip; } -EXPORT_SYMBOL_GPL(kvm_is_linear_rip); +EXPORT_SYMBOL_GPL_FOR(kvm_is_linear_rip, "kvm,kvm-intel,kvm-amd"); unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) { @@ -13243,7 +13243,7 @@ unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) rflags &= ~X86_EFLAGS_TF; return rflags; } -EXPORT_SYMBOL_GPL(kvm_get_rflags); +EXPORT_SYMBOL_GPL_FOR(kvm_get_rflags, "kvm,kvm-intel,kvm-amd"); static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) { @@ -13258,7 +13258,7 @@ void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) __kvm_set_rflags(vcpu, rflags); kvm_make_request(KVM_REQ_EVENT, vcpu); } -EXPORT_SYMBOL_GPL(kvm_set_rflags); +EXPORT_SYMBOL_GPL_FOR(kvm_set_rflags, "kvm,kvm-intel,kvm-amd"); static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) { @@ -13475,19 +13475,19 @@ void kvm_arch_start_assignment(struct kvm *kvm) if (atomic_inc_return(&kvm->arch.assigned_device_count) == 1) kvm_x86_call(pi_start_assignment)(kvm); } -EXPORT_SYMBOL_GPL(kvm_arch_start_assignment); +EXPORT_SYMBOL_GPL_FOR(kvm_arch_start_assignment, "kvm,kvm-intel,kvm-amd"); void kvm_arch_end_assignment(struct kvm *kvm) { atomic_dec(&kvm->arch.assigned_device_count); } -EXPORT_SYMBOL_GPL(kvm_arch_end_assignment); +EXPORT_SYMBOL_GPL_FOR(kvm_arch_end_assignment, "kvm,kvm-intel,kvm-amd"); bool noinstr kvm_arch_has_assigned_device(struct kvm *kvm) { return raw_atomic_read(&kvm->arch.assigned_device_count); } -EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device); +EXPORT_SYMBOL_GPL_FOR(kvm_arch_has_assigned_device, "kvm,kvm-intel,kvm-amd"); static void kvm_noncoherent_dma_assignment_start_or_stop(struct kvm *kvm) { @@ -13507,20 +13507,20 @@ void kvm_arch_register_noncoherent_dma(struct kvm *kvm) if (atomic_inc_return(&kvm->arch.noncoherent_dma_count) == 1) kvm_noncoherent_dma_assignment_start_or_stop(kvm); } -EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma); +EXPORT_SYMBOL_GPL_FOR(kvm_arch_register_noncoherent_dma, "kvm,kvm-intel,kvm-amd"); void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) { if (!atomic_dec_return(&kvm->arch.noncoherent_dma_count)) kvm_noncoherent_dma_assignment_start_or_stop(kvm); } -EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma); +EXPORT_SYMBOL_GPL_FOR(kvm_arch_unregister_noncoherent_dma, "kvm,kvm-intel,kvm-amd"); bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) { return atomic_read(&kvm->arch.noncoherent_dma_count); } -EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma); +EXPORT_SYMBOL_GPL_FOR(kvm_arch_has_noncoherent_dma, "kvm,kvm-intel,kvm-amd"); bool kvm_arch_has_irq_bypass(void) { @@ -13593,7 +13593,7 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) { return (vcpu->arch.msr_kvm_poll_control & 1) == 0; } -EXPORT_SYMBOL_GPL(kvm_arch_no_poll); +EXPORT_SYMBOL_GPL_FOR(kvm_arch_no_poll, "kvm,kvm-intel,kvm-amd"); #ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order) @@ -13633,7 +13633,7 @@ int kvm_spec_ctrl_test_value(u64 value) return ret; } -EXPORT_SYMBOL_GPL(kvm_spec_ctrl_test_value); +EXPORT_SYMBOL_GPL_FOR(kvm_spec_ctrl_test_value, "kvm,kvm-intel,kvm-amd"); void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code) { @@ -13658,7 +13658,7 @@ void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_c } vcpu->arch.walk_mmu->inject_page_fault(vcpu, &fault); } -EXPORT_SYMBOL_GPL(kvm_fixup_and_inject_pf_error); +EXPORT_SYMBOL_GPL_FOR(kvm_fixup_and_inject_pf_error, "kvm,kvm-intel,kvm-amd"); /* * Handles kvm_read/write_guest_virt*() result and either injects #PF or returns @@ -13687,7 +13687,7 @@ int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r, return 0; } -EXPORT_SYMBOL_GPL(kvm_handle_memory_failure); +EXPORT_SYMBOL_GPL_FOR(kvm_handle_memory_failure, "kvm,kvm-intel,kvm-amd"); int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva) { @@ -13751,7 +13751,7 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva) return 1; } } -EXPORT_SYMBOL_GPL(kvm_handle_invpcid); +EXPORT_SYMBOL_GPL_FOR(kvm_handle_invpcid, "kvm,kvm-intel,kvm-amd"); static int complete_sev_es_emulated_mmio(struct kvm_vcpu *vcpu) { @@ -13836,7 +13836,7 @@ int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes, return 0; } -EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_write); +EXPORT_SYMBOL_GPL_FOR(kvm_sev_es_mmio_write, "kvm,kvm-intel,kvm-amd"); int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes, void *data) @@ -13874,7 +13874,7 @@ int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes, return 0; } -EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_read); +EXPORT_SYMBOL_GPL_FOR(kvm_sev_es_mmio_read, "kvm,kvm-intel,kvm-amd"); static void advance_sev_es_emulated_pio(struct kvm_vcpu *vcpu, unsigned count, int size) { @@ -13962,7 +13962,7 @@ int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size, return in ? kvm_sev_es_ins(vcpu, size, port) : kvm_sev_es_outs(vcpu, size, port); } -EXPORT_SYMBOL_GPL(kvm_sev_es_string_io); +EXPORT_SYMBOL_GPL_FOR(kvm_sev_es_string_io, "kvm,kvm-intel,kvm-amd"); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_entry); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);