Currently, although userspace can request any size it wants that is a power of 2 between 256k and 64GB for the hashed page table (HPT) for the guest, the HPT allocator code only uses the preallocated memory pool for HPTs of the standard size, 16MB, and uses the kernel page allocator for anything else. Now that we can allocate arbitrary power-of-2 sizes from the preallocated memory pool, we can use it for any requested size, so this makes it do that. Signed-off-by: Paul Mackerras <paulus@xxxxxxxxx> --- arch/powerpc/include/asm/kvm_ppc.h | 2 +- arch/powerpc/kvm/book3s_64_mmu_hv.c | 33 ++++++++++----------------------- arch/powerpc/kvm/book3s_hv_builtin.c | 4 ++-- 3 files changed, 13 insertions(+), 26 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 5dccdc5..925a869 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -141,7 +141,7 @@ extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *rma); extern struct kvmppc_linear_info *kvm_alloc_rma(void); extern void kvm_release_rma(struct kvmppc_linear_info *ri); -extern struct kvmppc_linear_info *kvm_alloc_hpt(void); +extern struct kvmppc_linear_info *kvm_alloc_hpt(long); extern void kvm_release_hpt(struct kvmppc_linear_info *li); extern int kvmppc_core_init_vm(struct kvm *kvm); extern void kvmppc_core_destroy_vm(struct kvm *kvm); diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index d95d113..148e444 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -42,7 +42,7 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp) { - unsigned long hpt; + unsigned long hpt = 0; struct revmap_entry *rev; struct kvmppc_linear_info *li; long order = kvm_hpt_order; @@ -53,34 +53,21 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp) order = PPC_MIN_HPT_ORDER; } - /* - * If the user wants a different size from default, - * try first to allocate it from the kernel page allocator. - */ - hpt = 0; - if (order != kvm_hpt_order) { - hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT| - __GFP_NOWARN, order - PAGE_SHIFT); - if (!hpt) - --order; - } - - /* Next try to allocate from the preallocated pool */ - if (!hpt) { - li = kvm_alloc_hpt(); + /* Try successively smaller sizes */ + while (order > PPC_MIN_HPT_ORDER) { + /* First try allocating from the preallocated contiguous pool */ + li = kvm_alloc_hpt(order); if (li) { hpt = (ulong)li->base_virt; kvm->arch.hpt_li = li; - order = kvm_hpt_order; + break; } - } - - /* Lastly try successively smaller sizes from the page allocator */ - while (!hpt && order > PPC_MIN_HPT_ORDER) { + /* If that doesn't work, try the kernel page allocator */ hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT| __GFP_NOWARN, order - PAGE_SHIFT); - if (!hpt) - --order; + if (hpt) + break; + --order; } if (!hpt) diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index f0c51c5..0c4633c 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -126,9 +126,9 @@ static int __init early_parse_hpt_count(char *p) } early_param("kvm_hpt_count", early_parse_hpt_count); -struct kvmppc_linear_info *kvm_alloc_hpt(void) +struct kvmppc_linear_info *kvm_alloc_hpt(long order) { - return kvm_alloc_linear(KVM_LINEAR_HPT, kvm_hpt_order); + return kvm_alloc_linear(KVM_LINEAR_HPT, order); } EXPORT_SYMBOL_GPL(kvm_alloc_hpt); -- 1.7.10.rc3.219.g53414 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html