The group notifier that handles the VFIO_GROUP_NOTIFY_SET_KVM event must use the required locks in proper locking order to dynamically update the guest's APCB. The proper locking order is: 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM guest's APCB. 2. matrix_mdev->kvm->lock: required to update a KVM guest's APCB. 3. matrix_dev->mdevs_lock: required to store or access the data stored in a struct ap_matrix_mdev instance. Two macros are introduced to acquire and release the locks in the proper order. These macros are now used by the group notifier functions. Signed-off-by: Tony Krowiak <akrowiak@xxxxxxxxxxxxx> --- drivers/s390/crypto/vfio_ap_ops.c | 56 +++++++++++++++++++++++++------ 1 file changed, 46 insertions(+), 10 deletions(-) diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index 077b8c9c831b..757bbf449b04 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -30,6 +30,47 @@ static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev); static struct vfio_ap_queue *vfio_ap_find_queue(int apqn); static const struct vfio_device_ops vfio_ap_matrix_dev_ops; +/** + * get_update_locks_for_kvm: Acquire the locks required to dynamically update a + * KVM guest's APCB in the proper order. + * + * @kvm: a pointer to a struct kvm object containing the KVM guest's APCB. + * + * The proper locking order is: + * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM + * guest's APCB. + * 2. kvm->lock: required to update a guest's APCB + * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev + * + * Note: If @kvm is NULL, the KVM lock will not be taken. + */ +#define get_update_locks_for_kvm(kvm) ({ \ + mutex_lock(&matrix_dev->guests_lock); \ + if (kvm) \ + mutex_lock(&kvm->lock); \ + mutex_lock(&matrix_dev->mdevs_lock); \ +}) + +/** + * release_update_locks_for_kvm: Release the locks used to dynamically update a + * KVM guest's APCB in the proper order. + * + * @kvm: a pointer to a struct kvm object containing the KVM guest's APCB. + * + * The proper unlocking order is: + * 1. matrix_dev->mdevs_lock + * 2. kvm->lock + * 3. matrix_dev->guests_lock + * + * Note: If @kvm is NULL, the KVM lock will not be released. + */ +#define release_update_locks_for_kvm(kvm) ({ \ + mutex_unlock(&matrix_dev->mdevs_lock); \ + if (kvm) \ + mutex_unlock(&kvm->lock); \ + mutex_unlock(&matrix_dev->guests_lock); \ +}) + /** * vfio_ap_mdev_get_queue - retrieve a queue with a specific APQN from a * hash table of queues assigned to a matrix mdev @@ -1263,13 +1304,11 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev, kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook; up_write(&kvm->arch.crypto.pqap_hook_rwsem); - mutex_lock(&kvm->lock); - mutex_lock(&matrix_dev->mdevs_lock); + get_update_locks_for_kvm(kvm); list_for_each_entry(m, &matrix_dev->mdev_list, node) { if (m != matrix_mdev && m->kvm == kvm) { - mutex_unlock(&kvm->lock); - mutex_unlock(&matrix_dev->mdevs_lock); + release_update_locks_for_kvm(kvm); return -EPERM; } } @@ -1280,8 +1319,7 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev, matrix_mdev->shadow_apcb.aqm, matrix_mdev->shadow_apcb.adm); - mutex_unlock(&kvm->lock); - mutex_unlock(&matrix_dev->mdevs_lock); + release_update_locks_for_kvm(kvm); } return 0; @@ -1332,16 +1370,14 @@ static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev) kvm->arch.crypto.pqap_hook = NULL; up_write(&kvm->arch.crypto.pqap_hook_rwsem); - mutex_lock(&kvm->lock); - mutex_lock(&matrix_dev->mdevs_lock); + get_update_locks_for_kvm(kvm); kvm_arch_crypto_clear_masks(kvm); vfio_ap_mdev_reset_queues(matrix_mdev); kvm_put_kvm(kvm); matrix_mdev->kvm = NULL; - mutex_unlock(&kvm->lock); - mutex_unlock(&matrix_dev->mdevs_lock); + release_update_locks_for_kvm(kvm); } } -- 2.31.1