replace redundant code in __hash_page_4K() with helper functions get_hidx_gslot() and set_hidx_slot() Signed-off-by: Ram Pai <linuxram@xxxxxxxxxx> --- arch/powerpc/mm/hash64_64k.c | 24 ++++++------------------ 1 file changed, 6 insertions(+), 18 deletions(-) diff --git a/arch/powerpc/mm/hash64_64k.c b/arch/powerpc/mm/hash64_64k.c index 5cbdaa9..cb48a60 100644 --- a/arch/powerpc/mm/hash64_64k.c +++ b/arch/powerpc/mm/hash64_64k.c @@ -103,18 +103,12 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, if (__rpte_sub_valid(rpte, subpg_index)) { int ret; - hash = hpt_hash(vpn, shift, ssize); - hidx = __rpte_to_hidx(rpte, subpg_index); - if (hidx & _PTEIDX_SECONDARY) - hash = ~hash; - slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; - slot += hidx & _PTEIDX_GROUP_IX; - - ret = mmu_hash_ops.hpte_updatepp(slot, rflags, vpn, + gslot = get_hidx_gslot(vpn, shift, ssize, rpte, subpg_index); + ret = mmu_hash_ops.hpte_updatepp(gslot, rflags, vpn, MMU_PAGE_4K, MMU_PAGE_4K, ssize, flags); /* - *if we failed because typically the HPTE wasn't really here + * if we failed because typically the HPTE wasn't really here * we try an insertion. */ if (ret == -1) @@ -214,15 +208,9 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, * Since we have H_PAGE_BUSY set on ptep, we can be sure * nobody is undating hidx. */ - hidxp = (unsigned long *)(ptep + PTRS_PER_PTE); - rpte.hidx &= ~(0xfUL << (subpg_index << 2)); - *hidxp = rpte.hidx | (slot << (subpg_index << 2)); - new_pte = mark_subptegroup_valid(new_pte, subpg_index); - new_pte |= H_PAGE_HASHPTE; - /* - * check __real_pte for details on matching smp_rmb() - */ - smp_wmb(); + new_pte |= H_PAGE_HASHPTE; + new_pte |= set_hidx_slot(ptep, rpte, subpg_index, slot); + *ptep = __pte(new_pte & ~H_PAGE_BUSY); return 0; } -- 1.8.3.1