This patch makes memmap_valid_within return bool due to this particular function only using either one or zero as its return value. This patch also refactors memmap_valid_within for simplicity. No functional change. Signed-off-by: Yaowei Bai <baiyaowei@xxxxxxxxxxxxxxxxxxxx> --- include/linux/mmzone.h | 6 +++--- mm/mmzone.c | 10 ++-------- 2 files changed, 5 insertions(+), 11 deletions(-) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 9963846..b9b59bb8 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -1202,13 +1202,13 @@ unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); * the zone and PFN linkages are still valid. This is expensive, but walkers * of the full memmap are extremely rare. */ -int memmap_valid_within(unsigned long pfn, +bool memmap_valid_within(unsigned long pfn, struct page *page, struct zone *zone); #else -static inline int memmap_valid_within(unsigned long pfn, +static inline bool memmap_valid_within(unsigned long pfn, struct page *page, struct zone *zone) { - return 1; + return true; } #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ diff --git a/mm/mmzone.c b/mm/mmzone.c index 7d87ebb..de0824e 100644 --- a/mm/mmzone.c +++ b/mm/mmzone.c @@ -72,16 +72,10 @@ struct zoneref *next_zones_zonelist(struct zoneref *z, } #ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL -int memmap_valid_within(unsigned long pfn, +bool memmap_valid_within(unsigned long pfn, struct page *page, struct zone *zone) { - if (page_to_pfn(page) != pfn) - return 0; - - if (page_zone(page) != zone) - return 0; - - return 1; + return page_to_pfn(page) == pfn && page_zone(page) == zone; } #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ -- 1.9.1 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>