Convert hppa_cpu_tlb_fill to hppa_cpu_tlb_fill_align so that we can recognize alignment exceptions in the correct priority order. Resolves: https://bugzilla.kernel.org/show_bug.cgi?id=219339 Signed-off-by: Richard Henderson <richard.henderson@xxxxxxxxxx> --- target/hppa/cpu.h | 6 +++--- target/hppa/cpu.c | 2 +- target/hppa/mem_helper.c | 21 ++++++++++++--------- 3 files changed, 16 insertions(+), 13 deletions(-) diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h index 526855f982..e45ba50a59 100644 --- a/target/hppa/cpu.h +++ b/target/hppa/cpu.h @@ -363,9 +363,9 @@ void hppa_cpu_dump_state(CPUState *cs, FILE *f, int); void hppa_ptlbe(CPUHPPAState *env); hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr); void hppa_set_ior_and_isr(CPUHPPAState *env, vaddr addr, bool mmu_disabled); -bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr); +bool hppa_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr, + MMUAccessType access_type, int mmu_idx, + MemOp memop, int size, bool probe, uintptr_t ra); void hppa_cpu_do_interrupt(CPUState *cpu); bool hppa_cpu_exec_interrupt(CPUState *cpu, int int_req); int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c index 7cf2e2f266..c38439c180 100644 --- a/target/hppa/cpu.c +++ b/target/hppa/cpu.c @@ -226,7 +226,7 @@ static const TCGCPUOps hppa_tcg_ops = { .restore_state_to_opc = hppa_restore_state_to_opc, #ifndef CONFIG_USER_ONLY - .tlb_fill = hppa_cpu_tlb_fill, + .tlb_fill_align = hppa_cpu_tlb_fill_align, .cpu_exec_interrupt = hppa_cpu_exec_interrupt, .cpu_exec_halt = hppa_cpu_has_work, .do_interrupt = hppa_cpu_do_interrupt, diff --git a/target/hppa/mem_helper.c b/target/hppa/mem_helper.c index d38054da8a..b8c3e55170 100644 --- a/target/hppa/mem_helper.c +++ b/target/hppa/mem_helper.c @@ -424,12 +424,11 @@ void hppa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, } } -bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, - MMUAccessType type, int mmu_idx, - bool probe, uintptr_t retaddr) +bool hppa_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr, + MMUAccessType type, int mmu_idx, + MemOp memop, int size, bool probe, uintptr_t ra) { - HPPACPU *cpu = HPPA_CPU(cs); - CPUHPPAState *env = &cpu->env; + CPUHPPAState *env = cpu_env(cs); int prot, excp, a_prot; hwaddr phys; @@ -445,7 +444,7 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, break; } - excp = hppa_get_physical_address(env, addr, mmu_idx, a_prot, 0, + excp = hppa_get_physical_address(env, addr, mmu_idx, a_prot, memop, &phys, &prot); if (unlikely(excp >= 0)) { if (probe) { @@ -454,7 +453,7 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx); /* Failure. Raise the indicated exception. */ - raise_exception_with_ior(env, excp, retaddr, addr, + raise_exception_with_ior(env, excp, ra, addr, MMU_IDX_MMU_DISABLED(mmu_idx)); } @@ -468,8 +467,12 @@ bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, * the large page protection mask. We do not require this, * because we record the large page here in the hppa tlb. */ - tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK, - prot, mmu_idx, TARGET_PAGE_SIZE); + memset(out, 0, sizeof(*out)); + out->phys_addr = phys; + out->prot = prot; + out->attrs = MEMTXATTRS_UNSPECIFIED; + out->lg_page_size = TARGET_PAGE_BITS; + return true; } -- 2.43.0