On 21/02/20 09:34, Oliver Upton wrote: > Absolutely. I thought it sensible to send out the fix in case of other > toolchains out in the wild. But if nobody else other than us has > complained it's quite obvious where the problem lies. Here is another plausible (and untested) way to fix it, in case it's the alias analysis that is throwing off the compiler (plus possibly __always_inline). diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 027259af883e..63c7dcd7c57f 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2218,6 +2218,8 @@ static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots, gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; gfn_t nr_pages_needed = end_gfn - start_gfn + 1; gfn_t nr_pages_avail; + unsigned long hva; + struct kvm_memslot *memslot; /* Update ghc->generation before performing any error checks. */ ghc->generation = slots->generation; @@ -2231,19 +2233,22 @@ static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots, * If the requested region crosses two memslots, we still * verify that the entire region is valid here. */ - for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) { - ghc->memslot = __gfn_to_memslot(slots, start_gfn); - ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, - &nr_pages_avail); - if (kvm_is_error_hva(ghc->hva)) + do { + memslot = __gfn_to_memslot(slots, start_gfn); + hva = gfn_to_hva_many(memslot, start_gfn, &nr_pages_avail); + if (kvm_is_error_hva(hva)) return -EFAULT; - } + start_gfn += nr_pages_avail; + } while (start_gfn <= end_gfn); /* Use the slow path for cross page reads and writes. */ - if (nr_pages_needed == 1) - ghc->hva += offset; - else + if (nr_pages_needed == 1) { + ghc->hva = hva + offset; + ghc->memslot = memslot; + } else { + ghc->hva = 0; ghc->memslot = NULL; + } ghc->gpa = gpa; ghc->len = len; Paolo