| From: Ben Gardon <bgardon@google.com> |
| Date: Mon, 8 Apr 2019 11:07:30 -0700 |
| Subject: kvm: mmu: Fix overflow on kvm mmu page limit calculation |
| |
| commit bc8a3d8925a8fa09fa550e0da115d95851ce33c6 upstream. |
| |
| KVM bases its memory usage limits on the total number of guest pages |
| across all memslots. However, those limits, and the calculations to |
| produce them, use 32 bit unsigned integers. This can result in overflow |
| if a VM has more guest pages that can be represented by a u32. As a |
| result of this overflow, KVM can use a low limit on the number of MMU |
| pages it will allocate. This makes KVM unable to map all of guest memory |
| at once, prompting spurious faults. |
| |
| Tested: Ran all kvm-unit-tests on an Intel Haswell machine. This patch |
| introduced no new failures. |
| |
| Signed-off-by: Ben Gardon <bgardon@google.com> |
| Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> |
| [bwh: Backported to 3.16: adjust context] |
| Signed-off-by: Ben Hutchings <ben@decadent.org.uk> |
| --- |
| arch/x86/include/asm/kvm_host.h | 12 ++++++------ |
| arch/x86/kvm/mmu.c | 13 ++++++------- |
| arch/x86/kvm/mmu.h | 2 +- |
| arch/x86/kvm/x86.c | 4 ++-- |
| 4 files changed, 15 insertions(+), 16 deletions(-) |
| |
| --- a/arch/x86/include/asm/kvm_host.h |
| +++ b/arch/x86/include/asm/kvm_host.h |
| @@ -88,7 +88,7 @@ static inline gfn_t gfn_to_index(gfn_t g |
| #define IOPL_SHIFT 12 |
| |
| #define KVM_PERMILLE_MMU_PAGES 20 |
| -#define KVM_MIN_ALLOC_MMU_PAGES 64 |
| +#define KVM_MIN_ALLOC_MMU_PAGES 64UL |
| #define KVM_MMU_HASH_SHIFT 10 |
| #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT) |
| #define KVM_MIN_FREE_MMU_PAGES 5 |
| @@ -552,9 +552,9 @@ struct kvm_apic_map { |
| }; |
| |
| struct kvm_arch { |
| - unsigned int n_used_mmu_pages; |
| - unsigned int n_requested_mmu_pages; |
| - unsigned int n_max_mmu_pages; |
| + unsigned long n_used_mmu_pages; |
| + unsigned long n_requested_mmu_pages; |
| + unsigned long n_max_mmu_pages; |
| unsigned int indirect_shadow_pages; |
| unsigned long mmu_valid_gen; |
| struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; |
| @@ -810,8 +810,8 @@ void kvm_mmu_write_protect_pt_masked(str |
| gfn_t gfn_offset, unsigned long mask); |
| void kvm_mmu_zap_all(struct kvm *kvm); |
| void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm); |
| -unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); |
| -void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); |
| +unsigned long kvm_mmu_calculate_mmu_pages(struct kvm *kvm); |
| +void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages); |
| |
| int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3); |
| |
| --- a/arch/x86/kvm/mmu.c |
| +++ b/arch/x86/kvm/mmu.c |
| @@ -1492,7 +1492,7 @@ static int is_empty_shadow_page(u64 *spt |
| * aggregate version in order to make the slab shrinker |
| * faster |
| */ |
| -static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr) |
| +static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr) |
| { |
| kvm->arch.n_used_mmu_pages += nr; |
| percpu_counter_add(&kvm_total_used_mmu_pages, nr); |
| @@ -2207,7 +2207,7 @@ static bool prepare_zap_oldest_mmu_page( |
| * Changing the number of mmu pages allocated to the vm |
| * Note: if goal_nr_mmu_pages is too small, you will get dead lock |
| */ |
| -void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages) |
| +void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages) |
| { |
| LIST_HEAD(invalid_list); |
| |
| @@ -4505,10 +4505,10 @@ nomem: |
| /* |
| * Caculate mmu pages needed for kvm. |
| */ |
| -unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm) |
| +unsigned long kvm_mmu_calculate_mmu_pages(struct kvm *kvm) |
| { |
| - unsigned int nr_mmu_pages; |
| - unsigned int nr_pages = 0; |
| + unsigned long nr_mmu_pages; |
| + unsigned long nr_pages = 0; |
| struct kvm_memslots *slots; |
| struct kvm_memory_slot *memslot; |
| |
| @@ -4518,8 +4518,7 @@ unsigned int kvm_mmu_calculate_mmu_pages |
| nr_pages += memslot->npages; |
| |
| nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000; |
| - nr_mmu_pages = max(nr_mmu_pages, |
| - (unsigned int) KVM_MIN_ALLOC_MMU_PAGES); |
| + nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES); |
| |
| return nr_mmu_pages; |
| } |
| --- a/arch/x86/kvm/mmu.h |
| +++ b/arch/x86/kvm/mmu.h |
| @@ -81,7 +81,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_ |
| bool execonly); |
| bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu); |
| |
| -static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) |
| +static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm) |
| { |
| if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages) |
| return kvm->arch.n_max_mmu_pages - |
| --- a/arch/x86/kvm/x86.c |
| +++ b/arch/x86/kvm/x86.c |
| @@ -3529,7 +3529,7 @@ static int kvm_vm_ioctl_set_identity_map |
| } |
| |
| static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, |
| - u32 kvm_nr_mmu_pages) |
| + unsigned long kvm_nr_mmu_pages) |
| { |
| if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) |
| return -EINVAL; |
| @@ -3543,7 +3543,7 @@ static int kvm_vm_ioctl_set_nr_mmu_pages |
| return 0; |
| } |
| |
| -static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) |
| +static unsigned long kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) |
| { |
| return kvm->arch.n_max_mmu_pages; |
| } |