Re: [PATCH v2 05/11] ARM: KVM: VGIC virtual CPU interface management

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 18/08/12 20:25, Christoffer Dall wrote:
> On Thu, Jul 5, 2012 at 11:28 AM, Marc Zyngier <marc.zyngier@xxxxxxx> wrote:
>> Add VGIC virtual CPU interface code, picking pending interrupts
>> from the distributor and stashing them in the VGIC control interface
>> list registers.
>>
>> Signed-off-by: Marc Zyngier <marc.zyngier@xxxxxxx>
>> ---
>>  arch/arm/include/asm/kvm_vgic.h |   42 ++++++++
>>  arch/arm/kvm/vgic.c             |  227 ++++++++++++++++++++++++++++++++++++++-
>>  2 files changed, 268 insertions(+), 1 deletion(-)
>>
>> diff --git a/arch/arm/include/asm/kvm_vgic.h b/arch/arm/include/asm/kvm_vgic.h
>> index 02f50b7..481f4a9 100644
>> --- a/arch/arm/include/asm/kvm_vgic.h
>> +++ b/arch/arm/include/asm/kvm_vgic.h
>> @@ -173,16 +173,58 @@ struct vgic_dist {
>>  };
>>
>>  struct vgic_cpu {
>> +#ifdef CONFIG_KVM_ARM_VGIC
>> +       /* per IRQ to LR mapping */
>> +       u8              vgic_irq_lr_map[VGIC_NR_IRQS];
>> +
>> +       /* Pending interrupts on this VCPU */
>> +       DECLARE_BITMAP( pending, VGIC_NR_IRQS);
>> +
>> +       /* Bitmap of used/free list registers */
>> +       DECLARE_BITMAP( lr_used, 64);
> 
> my mind spins again, is the lr_used a duplication of the state stored
> in vgic_elrsr?

Related, but not the same. lr_used drives the LR allocation, and elrsr
gives you the state of the CPU interface on exit. Of course, elrsr and
lr_used have similar meaning before the vcpu has run.

>> +
>> +       /* Number of list registers on this CPU */
>> +       int             nr_lr;
>> +
>> +       /* CPU vif control registers for world switch */
>> +       u32             vgic_hcr;
>> +       u32             vgic_vmcr;
>> +       u32             vgic_misr;      /* Saved only */
>> +       u32             vgic_eisr[2];   /* Saved only */
>> +       u32             vgic_elrsr[2];  /* Saved only */
>> +       u32             vgic_apr;
>> +       u32             vgic_lr[64];    /* A15 has only 4... */
>> +#endif
>>  };
>>
>> +#define VGIC_HCR_EN            (1 << 0)
>> +#define VGIC_HCR_UIE           (1 << 1)
>> +
>> +#define VGIC_LR_VIRTUALID      (0x3ff << 0)
>> +#define VGIC_LR_PHYSID_CPUID   (7 << 10)
>> +#define VGIC_LR_STATE          (3 << 28)
>> +#define VGIC_LR_PENDING_BIT    (1 << 28)
>> +#define VGIC_LR_ACTIVE_BIT     (1 << 29)
>> +#define VGIC_LR_EOI            (1 << 19)
>> +
>> +#define VGIC_MISR_EOI          (1 << 0)
>> +#define VGIC_MISR_U            (1 << 1)
>> +
>> +#define LR_EMPTY       0xff
>> +
>>  struct kvm;
>>  struct kvm_vcpu;
>>  struct kvm_run;
>>  struct kvm_exit_mmio;
>>
>>  #ifdef CONFIG_KVM_ARM_VGIC
>> +void kvm_vgic_sync_to_cpu(struct kvm_vcpu *vcpu);
>> +void kvm_vgic_sync_from_cpu(struct kvm_vcpu *vcpu);
>> +int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
>>  int vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
>>                      struct kvm_exit_mmio *mmio);
>> +
>> +#define irqchip_in_kernel(k)   (!!((k)->arch.vgic.vctrl_base))
>>  #else
>>  static inline int kvm_vgic_hyp_init(void)
>>  {
>> diff --git a/arch/arm/kvm/vgic.c b/arch/arm/kvm/vgic.c
>> index ad48c89..8722efc 100644
>> --- a/arch/arm/kvm/vgic.c
>> +++ b/arch/arm/kvm/vgic.c
>> @@ -545,7 +545,25 @@ static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
>>
>>  static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
>>  {
>> -       return 0;
>> +       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
>> +       unsigned long *pending, *enabled, *pend;
>> +       int vcpu_id;
>> +
>> +       vcpu_id = vcpu->vcpu_id;
>> +       pend = vcpu->arch.vgic_cpu.pending;
>> +
>> +       pending = vgic_bitmap_get_cpu_map(&dist->irq_state, vcpu_id);
>> +       enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
>> +       bitmap_and(pend, pending, enabled, 32);
>> +
>> +       pending = vgic_bitmap_get_shared_map(&dist->irq_state);
>> +       enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
>> +       bitmap_and(pend + 1, pending, enabled, VGIC_NR_SHARED_IRQS);
>> +       bitmap_and(pend + 1, pend + 1,
>> +                  vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
>> +                  VGIC_NR_SHARED_IRQS);
>> +
>> +       return (find_first_bit(pend, VGIC_NR_IRQS) < VGIC_NR_IRQS);
>>  }
>>
>>  /*
>> @@ -570,3 +588,210 @@ static void vgic_update_state(struct kvm *kvm)
>>                 }
>>         }
>>  }
>> +
>> +/*
>> + * Queue an interrupt to a CPU virtual interface. Return 0 on success,
>> + * or 1 if it wasn't possible to queue it.
>> + */
> 
> consider a bool?

OK.

>> +static int vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
>> +{
>> +       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
>> +       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
>> +       int lr, is_level;
>> +
>> +       /* Sanitize the input... */
>> +       BUG_ON(sgi_source_id & ~7);
>> +       BUG_ON(sgi_source_id && irq > 15);
>> +       BUG_ON(irq >= VGIC_NR_IRQS);
>> +
>> +       kvm_debug("Queue IRQ%d\n", irq);
>> +
>> +       lr = vgic_cpu->vgic_irq_lr_map[irq];
>> +       is_level = !vgic_irq_is_edge(dist, irq);
>> +
>> +       /* Do we have an active interrupt for the same CPUID? */
>> +       if (lr != LR_EMPTY &&
>> +           (vgic_cpu->vgic_lr[lr] & VGIC_LR_PHYSID_CPUID) == (sgi_source_id << 10)) {
> 
> this line is too long, could we have a macro
> 
> #define LR_PHYSID(_x) ((_x & VGIC_LR_PHYSID) >> 10)
> 
> and make the line
> 
>               (LR_PHYSID(vgic_cpu->vgic_lr[lr]) == sgi_source_id) {
> 

OK.

> 
>> +               kvm_debug("LR%d piggyback for IRQ%d %x\n", lr, irq, vgic_cpu->vgic_lr[lr]);
>> +               BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
>> +               vgic_cpu->vgic_lr[lr] |= VGIC_LR_PENDING_BIT;
>> +               if (is_level)
>> +                       vgic_cpu->vgic_lr[lr] |= VGIC_LR_EOI;
>> +               return 0;
>> +       }
>> +
>> +       /* Try to use another LR for this interrupt */
>> +       lr = find_first_bit((unsigned long *)vgic_cpu->vgic_elrsr,
>> +                              vgic_cpu->nr_lr);
> 
> so here, could we just as well find the first zero bit in vgic_cpu->lr_used?

Indeed.

>> +       if (lr >= vgic_cpu->nr_lr)
>> +               return 1;
>> +
>> +       kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
>> +       vgic_cpu->vgic_lr[lr] = (VGIC_LR_PENDING_BIT |  (sgi_source_id << 10) | irq);
> 
> nit, consider a macro to create an lr, and hide the 10 magic number, like:
> 
> #define MK_LR_PEND(_src_id, _irq, _level) (VGIC_LR_PENDING_BIT |
> (_src_id << 10) | _irq)

OK.

> maybe it's ridiculous, your choice.
> 
>> +       if (is_level)
>> +               vgic_cpu->vgic_lr[lr] |= VGIC_LR_EOI;
>> +
>> +       vgic_cpu->vgic_irq_lr_map[irq] = lr;
>> +       clear_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr);
>> +       set_bit(lr, vgic_cpu->lr_used);
>> +
>> +       return 0;
>> +}
>> +
>> +/*
>> + * Fill the list registers with pending interrupts before running the
>> + * guest.
>> + */
>> +static void __kvm_vgic_sync_to_cpu(struct kvm_vcpu *vcpu)
>> +{
>> +       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
>> +       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
>> +       unsigned long *pending;
>> +       int i, c, vcpu_id;
>> +       int overflow = 0;
>> +
>> +       vcpu_id = vcpu->vcpu_id;
>> +
>> +       /*
>> +        * We may not have any pending interrupt, or the interrupts
>> +        * may have been serviced from another vcpu. In all cases,
>> +        * move along.
>> +        */
>> +       if (!kvm_vgic_vcpu_pending_irq(vcpu)) {
>> +               pr_debug("CPU%d has no pending interrupt\n", vcpu_id);
> 
> nit: kvm_debug or should go away
> 
>> +               goto epilog;
>> +       }
>> +
>> +       /* SGIs */
>> +       pending = vgic_bitmap_get_cpu_map(&dist->irq_state, vcpu_id);
>> +       for_each_set_bit(i, vgic_cpu->pending, 16) {
>> +               unsigned long sources;
>> +
>> +               pr_debug("SGI%d on CPU%d\n", i, vcpu_id);
> 
> nit: kvm_debug or should go away
> 
>> +               sources = dist->irq_sgi_sources[vcpu_id][i];
>> +               for_each_set_bit(c, &sources, 8) {
>> +                       if (vgic_queue_irq(vcpu, c, i)) {
> 
> nit: see to me the vgic_queue_irq return value should be flipped, so
> it's if (!vgic_queue_irq
> 
>> +                               overflow = 1;
> 
> not necessarily an overflow, in the sense that a HCR_UIE may not be
> relevant, but certainly we cannot clear the irq_pending_on_cpu bit.
> 
>> +                               continue;
> 
> isn't there an interesting case where we keep hammering even though
> there are no more free lr registers? makes sense to split the case
> with out of lr's versus already occupied lr for that irq?

It could be an interesting optimization if you end up with multiple
simultaneously pending interrupts.

What you could do is once you've allocated all the LRs, only consider
the interrupts that are part of the allocated set. Only these can progress.

I'll think of it, but I don't think this is high priority.

>> +                       }
>> +
>> +                       sources &= ~(1 << c);
> 
> clear_bit() ?

OK.

>> +               }
>> +
>> +               if (!sources)
>> +                       clear_bit(i, pending);
>> +
>> +               dist->irq_sgi_sources[vcpu_id][i] = sources;
>> +       }
>> +
>> +       /* PPIs */
>> +       for_each_set_bit_from(i, vgic_cpu->pending, 32) {
>> +               if (vgic_queue_irq(vcpu, 0, i)) {
>> +                       overflow = 1;
>> +                       continue;
>> +               }
>> +
>> +               clear_bit(i, pending);
>> +       }
>> +
>> +
>> +       /* SPIs */
>> +       pending = vgic_bitmap_get_shared_map(&dist->irq_state);
>> +       for_each_set_bit_from(i, vgic_cpu->pending, VGIC_NR_IRQS) {
>> +               if (vgic_bitmap_get_irq_val(&dist->irq_active, 0, i))
>> +                       continue; /* level interrupt, already queued */
>> +
>> +               if (vgic_queue_irq(vcpu, 0, i)) {
>> +                       overflow = 1;
>> +                       continue;
>> +               }
>> +
>> +               /* Immediate clear on edge, set active on level */
>> +               if (vgic_irq_is_edge(dist, i))
>> +                       clear_bit(i - 32, pending);
>> +               else
>> +                       vgic_bitmap_set_irq_val(&dist->irq_active, 0, i, 1);
>> +       }
>> +
>> +epilog:
>> +       if (overflow)
>> +               vgic_cpu->vgic_hcr |= VGIC_HCR_UIE;
>> +       else {
>> +               vgic_cpu->vgic_hcr &= ~VGIC_HCR_UIE;
>> +               /*
>> +                * We're about to run this VCPU, and we've consumed
>> +                * everything the distributor had in store for
>> +                * us. Claim we don't have anything pending. We'll
>> +                * adjust that if needed while exiting.
>> +                */
>> +               clear_bit(1 << vcpu_id, &dist->irq_pending_on_cpu);
>> +       }
>> +}
>> +
>> +/*
>> + * Sync back the VGIC state after a guest run.
>> + */
>> +static void __kvm_vgic_sync_from_cpu(struct kvm_vcpu *vcpu)
>> +{
>> +       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
>> +       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
>> +       int lr, pending;
>> +
>> +       /* Clear mappings for empty LRs */
>> +       for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr,
>> +                        vgic_cpu->nr_lr) {
>> +               int irq;
>> +
>> +               if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
>> +                       continue;
>> +
>> +               irq = vgic_cpu->vgic_lr[lr] & VGIC_LR_VIRTUALID;
>> +
>> +               BUG_ON(irq >= VGIC_NR_IRQS);
>> +               vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
>> +       }
>> +
>> +       /* Check if we still have something up our sleeve... */
>> +       pending = find_first_zero_bit((unsigned long *)vgic_cpu->vgic_elrsr,
>> +                                     vgic_cpu->nr_lr);
>> +       if (pending < vgic_cpu->nr_lr)
>> +               set_bit(1 << vcpu->vcpu_id, &dist->irq_pending_on_cpu);
>> +}
>> +
>> +void kvm_vgic_sync_to_cpu(struct kvm_vcpu *vcpu)
>> +{
>> +       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
>> +
>> +       if (!irqchip_in_kernel(vcpu->kvm))
>> +               return;
>> +
>> +       spin_lock(&dist->lock);
>> +       __kvm_vgic_sync_to_cpu(vcpu);
>> +       spin_unlock(&dist->lock);
>> +
>> +       *__this_cpu_ptr(vgic_vcpus) = vcpu;
>> +}
>> +
>> +void kvm_vgic_sync_from_cpu(struct kvm_vcpu *vcpu)
>> +{
>> +       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
>> +
>> +       if (!irqchip_in_kernel(vcpu->kvm))
>> +               return;
>> +
>> +       spin_lock(&dist->lock);
>> +       __kvm_vgic_sync_from_cpu(vcpu);
>> +       spin_unlock(&dist->lock);
>> +
>> +       *__this_cpu_ptr(vgic_vcpus) = NULL;
>> +}
>> +
>> +int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
>> +{
>> +       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
>> +
>> +       if (!irqchip_in_kernel(vcpu->kvm))
>> +               return 0;
>> +
>> +       return test_bit(1 << vcpu->vcpu_id, &dist->irq_pending_on_cpu);
>> +}
>> --
>> 1.7.10.3
>>
>>
> 


-- 
Jazz is not dead. It just smells funny...


_______________________________________________
kvmarm mailing list
kvmarm@xxxxxxxxxxxxxxxxxxxxx
https://lists.cs.columbia.edu/cucslists/listinfo/kvmarm


[Index of Archives]     [Linux KVM]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux