Using a linked-list for LPIs is less than ideal as it of course requires iterative searches to find a particular entry. An xarray is a better data structure for this use case, as it provides faster searches and can still handle a potentially sparse range of INTID allocations. Start by storing LPIs in an xarray, punting usage of the xarray to a subsequent change. Signed-off-by: Oliver Upton <oliver.upton@xxxxxxxxx> --- arch/arm64/kvm/vgic/vgic-init.c | 3 +++ arch/arm64/kvm/vgic/vgic-its.c | 16 ++++++++++++++++ arch/arm64/kvm/vgic/vgic.c | 1 + include/kvm/arm_vgic.h | 2 ++ 4 files changed, 22 insertions(+) diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c index e949e1d0fd9f..411719053107 100644 --- a/arch/arm64/kvm/vgic/vgic-init.c +++ b/arch/arm64/kvm/vgic/vgic-init.c @@ -56,6 +56,7 @@ void kvm_vgic_early_init(struct kvm *kvm) INIT_LIST_HEAD(&dist->lpi_list_head); INIT_LIST_HEAD(&dist->lpi_translation_cache); raw_spin_lock_init(&dist->lpi_list_lock); + xa_init_flags(&dist->lpi_xa, XA_FLAGS_LOCK_IRQ); } /* CREATION */ @@ -366,6 +367,8 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm) if (vgic_supports_direct_msis(kvm)) vgic_v4_teardown(kvm); + + xa_destroy(&dist->lpi_xa); } static void __kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c index 59179268ac2d..0265cd1f2d6e 100644 --- a/arch/arm64/kvm/vgic/vgic-its.c +++ b/arch/arm64/kvm/vgic/vgic-its.c @@ -53,6 +53,12 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, if (!irq) return ERR_PTR(-ENOMEM); + ret = xa_reserve_irq(&dist->lpi_xa, intid, GFP_KERNEL_ACCOUNT); + if (ret) { + kfree(irq); + return ERR_PTR(ret); + } + INIT_LIST_HEAD(&irq->lpi_list); INIT_LIST_HEAD(&irq->ap_list); raw_spin_lock_init(&irq->irq_lock); @@ -87,10 +93,20 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, goto out_unlock; } + ret = xa_err(xa_store(&dist->lpi_xa, intid, irq, 0)); + if (ret) { + xa_release(&dist->lpi_xa, intid); + kfree(irq); + goto out_unlock; + } + list_add_tail(&irq->lpi_list, &dist->lpi_list_head); dist->lpi_list_count++; out_unlock: + if (ret) + return ERR_PTR(ret); + raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); /* diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c index db2a95762b1b..c126014f8395 100644 --- a/arch/arm64/kvm/vgic/vgic.c +++ b/arch/arm64/kvm/vgic/vgic.c @@ -131,6 +131,7 @@ void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq) return; list_del(&irq->lpi_list); + xa_erase(&dist->lpi_xa, irq->intid); dist->lpi_list_count--; kfree(irq); diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 8cc38e836f54..795b35656b54 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -13,6 +13,7 @@ #include <linux/spinlock.h> #include <linux/static_key.h> #include <linux/types.h> +#include <linux/xarray.h> #include <kvm/iodev.h> #include <linux/list.h> #include <linux/jump_label.h> @@ -275,6 +276,7 @@ struct vgic_dist { /* Protects the lpi_list and the count value below. */ raw_spinlock_t lpi_list_lock; + struct xarray lpi_xa; struct list_head lpi_list_head; int lpi_list_count; -- 2.43.0.687.g38aa6559b0-goog