On Wed, Jan 09, 2019 at 10:42:02AM +0100, KarimAllah Ahmed wrote: > Update the PML table without mapping and unmapping the page. This also > avoids using kvm_vcpu_gpa_to_page(..) which assumes that there is a "struct > page" for guest memory. > > As a side-effect of using kvm_write_guest_page the page is also properly > marked as dirty. > > Signed-off-by: KarimAllah Ahmed <karahmed@xxxxxxxxx> > Reviewed-by: David Hildenbrand <david@xxxxxxxxxx> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx> > --- > v1 -> v2: > - Use kvm_write_guest_page instead of kvm_write_guest (pbonzini) > - Do not use pointer arithmetic for pml_address (pbonzini) > --- > arch/x86/kvm/vmx/vmx.c | 14 +++++--------- > 1 file changed, 5 insertions(+), 9 deletions(-) > > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c > index 4d39f73..71d88df 100644 > --- a/arch/x86/kvm/vmx/vmx.c > +++ b/arch/x86/kvm/vmx/vmx.c > @@ -7199,9 +7199,7 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu) > { > struct vmcs12 *vmcs12; > struct vcpu_vmx *vmx = to_vmx(vcpu); > - gpa_t gpa; > - struct page *page = NULL; > - u64 *pml_address; > + gpa_t gpa, dst; > > if (is_guest_mode(vcpu)) { > WARN_ON_ONCE(vmx->nested.pml_full); > @@ -7221,15 +7219,13 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu) > } > > gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull; > + dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index; > > - page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->pml_address); > - if (is_error_page(page)) > + if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa, > + offset_in_page(dst), sizeof(gpa))) > return 0; > > - pml_address = kmap(page); > - pml_address[vmcs12->guest_pml_index--] = gpa; > - kunmap(page); > - kvm_release_page_clean(page); > + vmcs12->guest_pml_index--; > } > > return 0; > -- > 2.7.4 >