Re: [PATCH v10 79/81] KVM: introspection: add KVMI_VCPU_TRANSLATE_GVA

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi "Adalbert,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on dc924b062488a0376aae41d3e0a27dc99f852a5e]

url:    https://github.com/0day-ci/linux/commits/Adalbert-Laz-r/VM-introspection/20201125-174530
base:    dc924b062488a0376aae41d3e0a27dc99f852a5e
config: x86_64-allyesconfig (attached as .config)
compiler: gcc-9 (Debian 9.3.0-15) 9.3.0
reproduce (this is a W=1 build):
        # https://github.com/0day-ci/linux/commit/7e18b2b2a0317b316591c7fcde367da2f6694550
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review Adalbert-Laz-r/VM-introspection/20201125-174530
        git checkout 7e18b2b2a0317b316591c7fcde367da2f6694550
        # save the attached .config to linux build tree
        make W=1 ARCH=x86_64 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@xxxxxxxxx>

All warnings (new ones prefixed by >>):

   arch/x86/kvm/kvmi_msg.c: In function 'handle_vcpu_inject_exception':
   arch/x86/kvm/kvmi_msg.c:158:38: warning: variable 'arch' set but not used [-Wunused-but-set-variable]
     158 |  struct kvm_vcpu_arch_introspection *arch;
         |                                      ^~~~
   arch/x86/kvm/kvmi_msg.c: In function 'handle_vcpu_get_xsave':
   arch/x86/kvm/kvmi_msg.c:203:11: warning: variable 'ec' set but not used [-Wunused-but-set-variable]
     203 |  int err, ec = 0;
         |           ^~
   arch/x86/kvm/kvmi_msg.c: At top level:
>> arch/x86/kvm/kvmi_msg.c:316:5: warning: no previous prototype for 'handle_vcpu_translate_gva' [-Wmissing-prototypes]
     316 | int handle_vcpu_translate_gva(const struct kvmi_vcpu_msg_job *job,
         |     ^~~~~~~~~~~~~~~~~~~~~~~~~

vim +/handle_vcpu_translate_gva +316 arch/x86/kvm/kvmi_msg.c

   152	
   153	static int handle_vcpu_inject_exception(const struct kvmi_vcpu_msg_job *job,
   154						const struct kvmi_msg_hdr *msg,
   155						const void *_req)
   156	{
   157		const struct kvmi_vcpu_inject_exception *req = _req;
 > 158		struct kvm_vcpu_arch_introspection *arch;
   159		struct kvm_vcpu *vcpu = job->vcpu;
   160		int ec;
   161	
   162		arch = &VCPUI(vcpu)->arch;
   163	
   164		if (!kvmi_is_event_allowed(KVMI(vcpu->kvm), KVMI_VCPU_EVENT_TRAP))
   165			ec = -KVM_EPERM;
   166		else if (req->padding1 || req->padding2)
   167			ec = -KVM_EINVAL;
   168		else if (VCPUI(vcpu)->arch.exception.pending ||
   169				VCPUI(vcpu)->arch.exception.send_event ||
   170				VCPUI(vcpu)->singlestep.loop)
   171			ec = -KVM_EBUSY;
   172		else
   173			ec = kvmi_arch_cmd_vcpu_inject_exception(vcpu, req);
   174	
   175		return kvmi_msg_vcpu_reply(job, msg, ec, NULL, 0);
   176	}
   177	
   178	static int handle_vcpu_get_xcr(const struct kvmi_vcpu_msg_job *job,
   179				       const struct kvmi_msg_hdr *msg,
   180				       const void *_req)
   181	{
   182		const struct kvmi_vcpu_get_xcr *req = _req;
   183		struct kvmi_vcpu_get_xcr_reply rpl;
   184		int ec = 0;
   185	
   186		memset(&rpl, 0, sizeof(rpl));
   187	
   188		if (non_zero_padding(req->padding, ARRAY_SIZE(req->padding)))
   189			ec = -KVM_EINVAL;
   190		else if (req->xcr != 0)
   191			ec = -KVM_EINVAL;
   192		else
   193			rpl.value = job->vcpu->arch.xcr0;
   194	
   195		return kvmi_msg_vcpu_reply(job, msg, ec, &rpl, sizeof(rpl));
   196	}
   197	
   198	static int handle_vcpu_get_xsave(const struct kvmi_vcpu_msg_job *job,
   199					 const struct kvmi_msg_hdr *msg,
   200					 const void *req)
   201	{
   202		struct kvmi_vcpu_get_xsave_reply *rpl;
   203		int err, ec = 0;
   204	
   205		rpl = kvmi_msg_alloc();
   206		if (!rpl)
   207			ec = -KVM_ENOMEM;
   208		else
   209			kvm_vcpu_ioctl_x86_get_xsave(job->vcpu, &rpl->xsave);
   210	
   211		err = kvmi_msg_vcpu_reply(job, msg, 0, rpl, sizeof(*rpl));
   212	
   213		kvmi_msg_free(rpl);
   214		return err;
   215	}
   216	
   217	static int handle_vcpu_set_xsave(const struct kvmi_vcpu_msg_job *job,
   218					 const struct kvmi_msg_hdr *msg,
   219					 const void *req)
   220	{
   221		size_t req_size, msg_size = msg->size;
   222		int ec = 0;
   223	
   224		if (check_sub_overflow(msg_size, sizeof(struct kvmi_vcpu_hdr),
   225				       &req_size))
   226			return -EINVAL;
   227	
   228		if (req_size < sizeof(struct kvm_xsave))
   229			ec = -KVM_EINVAL;
   230		else if (kvm_vcpu_ioctl_x86_set_xsave(job->vcpu,
   231						      (struct kvm_xsave *) req))
   232			ec = -KVM_EINVAL;
   233	
   234		return kvmi_msg_vcpu_reply(job, msg, ec, NULL, 0);
   235	}
   236	
   237	static int handle_vcpu_get_mtrr_type(const struct kvmi_vcpu_msg_job *job,
   238					     const struct kvmi_msg_hdr *msg,
   239					     const void *_req)
   240	{
   241		const struct kvmi_vcpu_get_mtrr_type *req = _req;
   242		struct kvmi_vcpu_get_mtrr_type_reply rpl;
   243		gfn_t gfn;
   244	
   245		gfn = gpa_to_gfn(req->gpa);
   246	
   247		memset(&rpl, 0, sizeof(rpl));
   248		rpl.type = kvm_mtrr_get_guest_memory_type(job->vcpu, gfn);
   249	
   250		return kvmi_msg_vcpu_reply(job, msg, 0, &rpl, sizeof(rpl));
   251	}
   252	
   253	static bool is_valid_msr(unsigned int msr)
   254	{
   255		return msr <= 0x1fff || (msr >= 0xc0000000 && msr <= 0xc0001fff);
   256	}
   257	
   258	static int handle_vcpu_control_msr(const struct kvmi_vcpu_msg_job *job,
   259					   const struct kvmi_msg_hdr *msg,
   260					   const void *_req)
   261	{
   262		const struct kvmi_vcpu_control_msr *req = _req;
   263		int ec = 0;
   264	
   265		if (req->padding1 || req->padding2 || req->enable > 1)
   266			ec = -KVM_EINVAL;
   267		else if (!is_valid_msr(req->msr))
   268			ec = -KVM_EINVAL;
   269		else if (req->enable &&
   270			 !kvm_msr_allowed(job->vcpu, req->msr,
   271					  KVM_MSR_FILTER_WRITE))
   272			ec = -KVM_EPERM;
   273		else
   274			kvmi_control_msrw_intercept(job->vcpu, req->msr,
   275						    req->enable == 1);
   276	
   277		return kvmi_msg_vcpu_reply(job, msg, ec, NULL, 0);
   278	}
   279	
   280	static int handle_vcpu_control_singlestep(const struct kvmi_vcpu_msg_job *job,
   281						  const struct kvmi_msg_hdr *msg,
   282						  const void *_req)
   283	{
   284		const struct kvmi_vcpu_control_singlestep *req = _req;
   285		struct kvm_vcpu *vcpu = job->vcpu;
   286		int ec = 0;
   287	
   288		if (!kvmi_is_event_allowed(KVMI(vcpu->kvm),
   289					   KVMI_VCPU_EVENT_SINGLESTEP)) {
   290			ec = -KVM_EPERM;
   291			goto reply;
   292		}
   293	
   294		if (non_zero_padding(req->padding, ARRAY_SIZE(req->padding)) ||
   295		    req->enable > 1) {
   296			ec = -KVM_EINVAL;
   297			goto reply;
   298		}
   299	
   300		if (!kvm_x86_ops.control_singlestep) {
   301			ec = -KVM_EOPNOTSUPP;
   302			goto reply;
   303		}
   304	
   305		if (req->enable)
   306			kvmi_arch_start_singlestep(vcpu);
   307		else
   308			kvmi_arch_stop_singlestep(vcpu);
   309	
   310		VCPUI(vcpu)->singlestep.loop = !!req->enable;
   311	
   312	reply:
   313		return kvmi_msg_vcpu_reply(job, msg, ec, NULL, 0);
   314	}
   315	
 > 316	int handle_vcpu_translate_gva(const struct kvmi_vcpu_msg_job *job,
   317				      const struct kvmi_msg_hdr *msg,
   318				      const void *_req)
   319	{
   320		const struct kvmi_vcpu_translate_gva *req = _req;
   321		struct kvmi_vcpu_translate_gva_reply rpl;
   322	
   323		memset(&rpl, 0, sizeof(rpl));
   324	
   325		rpl.gpa = kvm_mmu_gva_to_gpa_system(job->vcpu, req->gva, 0, NULL);
   326	
   327		return kvmi_msg_vcpu_reply(job, msg, 0, &rpl, sizeof(rpl));
   328	}
   329	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@xxxxxxxxxxxx

Attachment: .config.gz
Description: application/gzip

_______________________________________________
Virtualization mailing list
Virtualization@xxxxxxxxxxxxxxxxxxxxxxxxxx
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

[Index of Archives]     [KVM Development]     [Libvirt Development]     [Libvirt Users]     [CentOS Virtualization]     [Netdev]     [Ethernet Bridging]     [Linux Wireless]     [Kernel Newbies]     [Security]     [Linux for Hams]     [Netfilter]     [Bugtraq]     [Yosemite Forum]     [MIPS Linux]     [ARM Linux]     [Linux RAID]     [Linux Admin]     [Samba]

  Powered by Linux