On Thu, May 30, 2024 at 11:07 PM Rick Edgecombe <rick.p.edgecombe@xxxxxxxxx> wrote: > + /* Unmmap the old attribute page. */ Unmap > + if (range->arg.attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE) > + range->process = KVM_PROCESS_SHARED; > + else > + range->process = KVM_PROCESS_PRIVATE; > + > return kvm_unmap_gfn_range(kvm, range); > } > > diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h > index c3c922bf077f..f92c8b605b03 100644 > --- a/include/linux/kvm_host.h > +++ b/include/linux/kvm_host.h > @@ -260,11 +260,19 @@ union kvm_mmu_notifier_arg { > unsigned long attributes; > }; > > +enum kvm_process { > + BUGGY_KVM_INVALIDATION = 0, > + KVM_PROCESS_SHARED = BIT(0), > + KVM_PROCESS_PRIVATE = BIT(1), > + KVM_PROCESS_PRIVATE_AND_SHARED = KVM_PROCESS_SHARED | KVM_PROCESS_PRIVATE, > +}; Only KVM_PROCESS_SHARED and KVM_PROCESS_PRIVATE are needed. > + /* > + * If/when KVM supports more attributes beyond private .vs shared, this > + * _could_ set exclude_{private,shared} appropriately if the entire target this could mask away KVM_PROCESS_{SHARED,PRIVATE} if the entire target... Paolo > + * range already has the desired private vs. shared state (it's unclear > + * if that is a net win). For now, KVM reaches this point if and only > + * if the private flag is being toggled, i.e. all mappings are in play. > + */ > + > for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) { > slots = __kvm_memslots(kvm, i); > > @@ -2506,6 +2519,7 @@ static int kvm_vm_set_mem_attributes(struct kvm *kvm, gfn_t start, gfn_t end, > struct kvm_mmu_notifier_range pre_set_range = { > .start = start, > .end = end, > + .arg.attributes = attributes, > .handler = kvm_pre_set_memory_attributes, > .on_lock = kvm_mmu_invalidate_begin, > .flush_on_ret = true, > -- > 2.34.1 >