Re: [PATCH v4 09/10] x86/hyper-v: support extended CPU ranges for TLB flush hypercalls

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Wed, May 24, 2017 at 3:04 PM, Vitaly Kuznetsov <vkuznets@xxxxxxxxxx> wrote:
> Hyper-V hosts may support more than 64 vCPUs, we need to use
> HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX/LIST_EX hypercalls in this
> case.

> +{

> +       /*
> +        * We can't be sure that translated vcpu numbers will always be
> +        * in ascending order, so iterate over all possible banks and
> +        * check all vcpus in it instead.

vcpu -> vCPU
vcpus -> vCPUs

> +        */

> +       for (cur_bank = 0; cur_bank < ms_hyperv.max_vp_index/64; cur_bank++) {
> +               has_cpus = false;
> +               for_each_cpu(cpu, cpus) {

int vcpu_bank = vcpu / 64;
int vcpu_offset = vcpu % 64;

> +                       vcpu = hv_cpu_number_to_vp_number(cpu);

> +                       if (vcpu/64 != cur_bank)

if (vcpu_bank != cur_bank)

> +                               continue;
> +                       if (!has_cpus) {

> +                               flush->hv_vp_set.valid_bank_mask |=
> +                                       1 << vcpu / 64;

__set_bit(vcpu_bank, &mask);

> +                               flush->hv_vp_set.bank_contents[nr_bank] =
> +                                       1 << vcpu % 64;

Ditto. (vcpu_offset)

> +                               has_cpus = true;
> +                       } else {

> +                               flush->hv_vp_set.bank_contents[nr_bank] |=
> +                                       1 << vcpu % 64;

Ditto.

> +                       }
> +               }
> +               if (has_cpus)
> +                       nr_bank++;
> +       }
> +
> +       return nr_bank;
> +}

> +static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
> +                                      struct mm_struct *mm,
> +                                      unsigned long start,
> +                                      unsigned long end)
> +{
> +       struct hv_flush_pcpu_ex *flush;
> +       unsigned long cur, flags;

> +       u64 status = -1ULL;

U64_MAX

> +       int nr_bank = 0, max_gvas, gva_n;

> +       /*
> +        * We can flush not more than max_gvas with one hypercall. Flush the
> +        * whole address space if we were asked to do more.
> +        */

#define XXX    (PAGE_SIZE * PAGE_SIZE)

> +       max_gvas = (PAGE_SIZE - sizeof(*flush) - nr_bank*8) / 8;

> +
> +       if (end == TLB_FLUSH_ALL ||
> +           (end && ((end - start)/(PAGE_SIZE*PAGE_SIZE)) > max_gvas)) {
> +               if (end == TLB_FLUSH_ALL)
> +                       flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
> +
> +               status = hv_do_rep_hypercall(
> +                       HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
> +                       0, nr_bank + 2, flush, NULL);

if (end == _FLASH_ALL) {
              flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
              status = hv_do_rep_hypercall(
                      HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
                      0, nr_bank + 2, flush, NULL);
} else if (end && ((end - start) / XXX > max_gvas)) {
               status = hv_do_rep_hypercall(
                      HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
                      0, nr_bank + 2, flush, NULL);
} else {
...

Yes, a bit more code, but IMO much more understandable.

> +       } else {
> +               cur = start;
> +               gva_n = nr_bank;
> +               do {

> +                       flush->gva_list[gva_n] = cur & PAGE_MASK;

> +                       /*
> +                        * Lower 12 bits encode the number of additional
> +                        * pages to flush (in addition to the 'cur' page).
> +                        */
> +                       if (end >= cur + PAGE_SIZE * PAGE_SIZE)

if (end >= cur + XXX)

> +                               flush->gva_list[gva_n] |= ~PAGE_MASK;

> +                       else if (end > cur)
> +                               flush->gva_list[gva_n] |=
> +                                       (end - cur - 1) >> PAGE_SHIFT;
> +

> +                       cur += PAGE_SIZE * PAGE_SIZE;

+= XXX;

> +                       ++gva_n;
> +
> +               } while (cur < end);

> +}

-- 
With Best Regards,
Andy Shevchenko
_______________________________________________
devel mailing list
devel@xxxxxxxxxxxxxxxxxxxxxx
http://driverdev.linuxdriverproject.org/mailman/listinfo/driverdev-devel



[Index of Archives]     [Linux Driver Backports]     [DMA Engine]     [Linux GPIO]     [Linux SPI]     [Video for Linux]     [Linux USB Devel]     [Linux Coverity]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [Yosemite Backpacking]
  Powered by Linux