Use the new protected_count field as a counter instead of the old is_protected flag. This will be used in upcoming patches. Increment the counter when a secure configuration is created, and decrement it when it is destroyed. Previously the flag was set when the set secure parameters UVC was performed. Signed-off-by: Claudio Imbrenda <imbrenda@xxxxxxxxxxxxx> Acked-by: Janosch Frank <frankja@xxxxxxxxxxxxx> --- arch/s390/include/asm/mmu.h | 2 +- arch/s390/include/asm/mmu_context.h | 2 +- arch/s390/include/asm/pgtable.h | 2 +- arch/s390/kvm/pv.c | 12 +++++++----- 4 files changed, 10 insertions(+), 8 deletions(-) diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index e12ff0f29d1a..2c4cbc254604 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h @@ -17,7 +17,7 @@ typedef struct { unsigned long asce_limit; unsigned long vdso_base; /* The mmu context belongs to a secure guest. */ - atomic_t is_protected; + atomic_t protected_count; /* * The following bitfields need a down_write on the mm * semaphore when they are written to. As they are only diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index c7937f369e62..2a38af5a00c2 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -26,7 +26,7 @@ static inline int init_new_context(struct task_struct *tsk, INIT_LIST_HEAD(&mm->context.gmap_list); cpumask_clear(&mm->context.cpu_attach_mask); atomic_set(&mm->context.flush_count, 0); - atomic_set(&mm->context.is_protected, 0); + atomic_set(&mm->context.protected_count, 0); mm->context.gmap_asce = 0; mm->context.flush_mm = 0; #ifdef CONFIG_PGSTE diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 008a6c856fa4..23ca0d8e058a 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -523,7 +523,7 @@ static inline int mm_has_pgste(struct mm_struct *mm) static inline int mm_is_protected(struct mm_struct *mm) { #ifdef CONFIG_PGSTE - if (unlikely(atomic_read(&mm->context.is_protected))) + if (unlikely(atomic_read(&mm->context.protected_count))) return 1; #endif return 0; diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c index 19f67207caa9..04aa4a20260b 100644 --- a/arch/s390/kvm/pv.c +++ b/arch/s390/kvm/pv.c @@ -173,7 +173,8 @@ int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc) cc = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), UVC_CMD_DESTROY_SEC_CONF, rc, rrc); WRITE_ONCE(kvm->arch.gmap->guest_handle, 0); - atomic_set(&kvm->mm->context.is_protected, 0); + if (!cc) + atomic_dec(&kvm->mm->context.protected_count); KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM: rc %x rrc %x", *rc, *rrc); WARN_ONCE(cc, "protvirt destroy vm failed rc %x rrc %x", *rc, *rrc); /* Intended memory leak on "impossible" error */ @@ -215,11 +216,14 @@ int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc) /* Outputs */ kvm->arch.pv.handle = uvcb.guest_handle; + atomic_inc(&kvm->mm->context.protected_count); if (cc) { - if (uvcb.header.rc & UVC_RC_NEED_DESTROY) + if (uvcb.header.rc & UVC_RC_NEED_DESTROY) { kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy); - else + } else { + atomic_dec(&kvm->mm->context.protected_count); kvm_s390_pv_dealloc_vm(kvm); + } return -EIO; } kvm->arch.gmap->guest_handle = uvcb.guest_handle; @@ -242,8 +246,6 @@ int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc, *rrc = uvcb.header.rrc; KVM_UV_EVENT(kvm, 3, "PROTVIRT VM SET PARMS: rc %x rrc %x", *rc, *rrc); - if (!cc) - atomic_set(&kvm->mm->context.is_protected, 1); return cc ? -EINVAL : 0; } -- 2.31.1