There are multiple instances of pfn_to_section_nr() and __pfn_to_section() when CONFIG_SPARSEMEM is enabled. This can be optimized if memory section is fetched earlier. This replaces the open coded PFN and ADDR conversion with PFN_PHYS() and PHYS_PFN() helpers. While there, also add a comment. This does not cause any functional change. Cc: Catalin Marinas <catalin.marinas@xxxxxxx> Cc: Will Deacon <will@xxxxxxxxxx> Cc: Ard Biesheuvel <ardb@xxxxxxxxxx> Cc: linux-arm-kernel@xxxxxxxxxxxxxxxxxxx Cc: linux-kernel@xxxxxxxxxxxxxxx Signed-off-by: Anshuman Khandual <anshuman.khandual@xxxxxxx> --- arch/arm64/mm/init.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 1141075e4d53..5d8fd5360a68 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -219,16 +219,25 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) int pfn_valid(unsigned long pfn) { - phys_addr_t addr = pfn << PAGE_SHIFT; + phys_addr_t addr = PFN_PHYS(pfn); - if ((addr >> PAGE_SHIFT) != pfn) + /* + * Ensure the upper PAGE_SHIFT bits are clear in the + * pfn. Else it might lead to false positives when + * some of the upper bits are set, but the lower bits + * match a valid pfn. + */ + if (PHYS_PFN(addr) != pfn) return 0; #ifdef CONFIG_SPARSEMEM +{ + struct mem_section *ms = __pfn_to_section(pfn); + if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) return 0; - if (!valid_section(__pfn_to_section(pfn))) + if (!valid_section(ms)) return 0; /* @@ -240,8 +249,9 @@ int pfn_valid(unsigned long pfn) * memory sections covering all of hotplug memory including * both normal and ZONE_DEVICE based. */ - if (!early_section(__pfn_to_section(pfn))) - return pfn_section_valid(__pfn_to_section(pfn), pfn); + if (!early_section(ms)) + return pfn_section_valid(ms, pfn); +} #endif return memblock_is_map_memory(addr); } -- 2.20.1