Introduce mmu_topup_memory_cache_atomic(), it support topup memory cache in atomic context Signed-off-by: Xiao Guangrong <xiaoguangrong@xxxxxxxxxxxxxx> --- arch/x86/kvm/mmu.c | 29 +++++++++++++++++++++++++---- 1 files changed, 25 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index f151540..6c06666 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -291,15 +291,16 @@ static void __set_spte(u64 *sptep, u64 spte) #endif } -static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, - struct kmem_cache *base_cache, int min) +static int __mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, + struct kmem_cache *base_cache, int min, + int max, gfp_t flags) { void *obj; if (cache->nobjs >= min) return 0; - while (cache->nobjs < ARRAY_SIZE(cache->objects)) { - obj = kmem_cache_zalloc(base_cache, GFP_KERNEL); + while (cache->nobjs < max) { + obj = kmem_cache_zalloc(base_cache, flags); if (!obj) return -ENOMEM; cache->objects[cache->nobjs++] = obj; @@ -307,6 +308,26 @@ static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, return 0; } +static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, + struct kmem_cache *base_cache, int min) +{ + return __mmu_topup_memory_cache(cache, base_cache, min, + ARRAY_SIZE(cache->objects), GFP_KERNEL); +} + +static int mmu_topup_memory_cache_atomic(struct kvm_mmu_memory_cache *cache, + struct kmem_cache *base_cache, int min) +{ + return __mmu_topup_memory_cache(cache, base_cache, min, min, + GFP_ATOMIC); +} + +static int pte_prefetch_topup_memory_cache(struct kvm_vcpu *vcpu, int num) +{ + return mmu_topup_memory_cache_atomic(&vcpu->arch.mmu_rmap_desc_cache, + rmap_desc_cache, num); +} + static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc, struct kmem_cache *cache) { -- 1.6.1.2 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html