From 99894a799f09cf9e28296bb16e75bd5830fd2c4e Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Sun, 29 Mar 2009 16:31:25 +0300 Subject: [PATCH] KVM: MMU: Fix off-by-one calculating large page count The large page initialization code concludes there are two large pages spanned by a slot covering 1 (small) page starting at gfn 1. This is incorrect, and also results in incorrect write_count initialization in some cases (base = 1, npages = 513 for example). Cc: stable@kernel.org Signed-off-by: Avi Kivity --- virt/kvm/kvm_main.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 605697e9c4dd..28d693a1ee8f 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -920,6 +920,7 @@ int __kvm_set_memory_region(struct kvm *kvm, int r; gfn_t base_gfn; unsigned long npages; + int largepages; unsigned long i; struct kvm_memory_slot *memslot; struct kvm_memory_slot old, new; @@ -995,11 +996,8 @@ int __kvm_set_memory_region(struct kvm *kvm, new.userspace_addr = 0; } if (npages && !new.lpage_info) { - int largepages = npages / KVM_PAGES_PER_HPAGE; - if (npages % KVM_PAGES_PER_HPAGE) - largepages++; - if (base_gfn % KVM_PAGES_PER_HPAGE) - largepages++; + largepages = 1 + (base_gfn + npages - 1) / KVM_PAGES_PER_HPAGE; + largepages -= base_gfn / KVM_PAGES_PER_HPAGE; new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info));