Re: [PATCH 3/3 v3] KVM: x86: Add a new VM statistic to show number of VCPUs created in a given VM

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi Krish,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on kvm/queue]
[also build test ERROR on v5.13-rc5 next-20210608]
[cannot apply to vhost/linux-next]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:    https://github.com/0day-ci/linux/commits/Krish-Sadhukhan/KVM-nVMX-nSVM-Add-more-statistics-to-KVM-debugfs/20210609-101158
base:   https://git.kernel.org/pub/scm/virt/kvm/kvm.git queue
config: s390-randconfig-r034-20210608 (attached as .config)
compiler: s390-linux-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://github.com/0day-ci/linux/commit/8b558261089468777eaf3ec89ca30eb954242e4e
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review Krish-Sadhukhan/KVM-nVMX-nSVM-Add-more-statistics-to-KVM-debugfs/20210609-101158
        git checkout 8b558261089468777eaf3ec89ca30eb954242e4e
        # save the attached .config to linux build tree
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=s390 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@xxxxxxxxx>

All errors (new ones prefixed by >>):

   arch/s390/kvm/../../../virt/kvm/kvm_main.c: In function 'kvm_vm_ioctl_create_vcpu':
>> arch/s390/kvm/../../../virt/kvm/kvm_main.c:3321:11: error: 'struct kvm_vm_stat' has no member named 'vcpus'
    3321 |  kvm->stat.vcpus++;
         |           ^
   arch/s390/kvm/../../../virt/kvm/kvm_main.c:3398:11: error: 'struct kvm_vm_stat' has no member named 'vcpus'
    3398 |  kvm->stat.vcpus--;
         |           ^


vim +3321 arch/s390/kvm/../../../virt/kvm/kvm_main.c

  3301	
  3302	/*
  3303	 * Creates some virtual cpus.  Good luck creating more than one.
  3304	 */
  3305	static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
  3306	{
  3307		int r;
  3308		struct kvm_vcpu *vcpu;
  3309		struct page *page;
  3310	
  3311		if (id >= KVM_MAX_VCPU_ID)
  3312			return -EINVAL;
  3313	
  3314		mutex_lock(&kvm->lock);
  3315		if (kvm->created_vcpus == KVM_MAX_VCPUS) {
  3316			mutex_unlock(&kvm->lock);
  3317			return -EINVAL;
  3318		}
  3319	
  3320		kvm->created_vcpus++;
> 3321		kvm->stat.vcpus++;
  3322		mutex_unlock(&kvm->lock);
  3323	
  3324		r = kvm_arch_vcpu_precreate(kvm, id);
  3325		if (r)
  3326			goto vcpu_decrement;
  3327	
  3328		vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
  3329		if (!vcpu) {
  3330			r = -ENOMEM;
  3331			goto vcpu_decrement;
  3332		}
  3333	
  3334		BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE);
  3335		page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
  3336		if (!page) {
  3337			r = -ENOMEM;
  3338			goto vcpu_free;
  3339		}
  3340		vcpu->run = page_address(page);
  3341	
  3342		kvm_vcpu_init(vcpu, kvm, id);
  3343	
  3344		r = kvm_arch_vcpu_create(vcpu);
  3345		if (r)
  3346			goto vcpu_free_run_page;
  3347	
  3348		if (kvm->dirty_ring_size) {
  3349			r = kvm_dirty_ring_alloc(&vcpu->dirty_ring,
  3350						 id, kvm->dirty_ring_size);
  3351			if (r)
  3352				goto arch_vcpu_destroy;
  3353		}
  3354	
  3355		mutex_lock(&kvm->lock);
  3356		if (kvm_get_vcpu_by_id(kvm, id)) {
  3357			r = -EEXIST;
  3358			goto unlock_vcpu_destroy;
  3359		}
  3360	
  3361		vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus);
  3362		BUG_ON(kvm->vcpus[vcpu->vcpu_idx]);
  3363	
  3364		/* Now it's all set up, let userspace reach it */
  3365		kvm_get_kvm(kvm);
  3366		r = create_vcpu_fd(vcpu);
  3367		if (r < 0) {
  3368			kvm_put_kvm_no_destroy(kvm);
  3369			goto unlock_vcpu_destroy;
  3370		}
  3371	
  3372		kvm->vcpus[vcpu->vcpu_idx] = vcpu;
  3373	
  3374		/*
  3375		 * Pairs with smp_rmb() in kvm_get_vcpu.  Write kvm->vcpus
  3376		 * before kvm->online_vcpu's incremented value.
  3377		 */
  3378		smp_wmb();
  3379		atomic_inc(&kvm->online_vcpus);
  3380	
  3381		mutex_unlock(&kvm->lock);
  3382		kvm_arch_vcpu_postcreate(vcpu);
  3383		kvm_create_vcpu_debugfs(vcpu);
  3384		return r;
  3385	
  3386	unlock_vcpu_destroy:
  3387		mutex_unlock(&kvm->lock);
  3388		kvm_dirty_ring_free(&vcpu->dirty_ring);
  3389	arch_vcpu_destroy:
  3390		kvm_arch_vcpu_destroy(vcpu);
  3391	vcpu_free_run_page:
  3392		free_page((unsigned long)vcpu->run);
  3393	vcpu_free:
  3394		kmem_cache_free(kvm_vcpu_cache, vcpu);
  3395	vcpu_decrement:
  3396		mutex_lock(&kvm->lock);
  3397		kvm->created_vcpus--;
  3398		kvm->stat.vcpus--;
  3399		mutex_unlock(&kvm->lock);
  3400		return r;
  3401	}
  3402	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@xxxxxxxxxxxx

Attachment: .config.gz
Description: application/gzip


[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux