Greg Kroah-Hartman requested that this file use the .is_visible() method instead of #ifdefs for the attributes in memory.c. static struct attribute *memory_memblk_attrs[] = { &dev_attr_phys_index.attr, &dev_attr_state.attr, &dev_attr_phys_device.attr, &dev_attr_removable.attr, #ifdef CONFIG_MEMORY_HOTREMOVE &dev_attr_valid_zones.attr, #endif NULL }; and static struct attribute *memory_root_attrs[] = { #ifdef CONFIG_ARCH_MEMORY_PROBE &dev_attr_probe.attr, #endif #ifdef CONFIG_MEMORY_FAILURE &dev_attr_soft_offline_page.attr, &dev_attr_hard_offline_page.attr, #endif &dev_attr_block_size_bytes.attr, &dev_attr_auto_online_blocks.attr, NULL }; To that end: - the .is_visible() method is implemented, and IS_ENABLED(), rather than #ifdef, is used to determine the visibility of the attribute. - the DEVICE_ATTR_xx() attributes are moved outside of #ifdefs, so that those structs are always present for the memory_memblk_attrs[] and memory_root_attrs[]. - the function body of the callback functions are now wrapped with IS_ENABLED(); as the callback function must exist now that the attribute is always compiled-in (though not necessarily visible). No functionality change intended. Signed-off-by: Eric DeVolder <eric.devolder@xxxxxxxxxx> --- drivers/base/memory.c | 229 ++++++++++++++++++++++++++---------------- 1 file changed, 140 insertions(+), 89 deletions(-) diff --git a/drivers/base/memory.c b/drivers/base/memory.c index b456ac213610..7294112fe646 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -391,62 +391,66 @@ static ssize_t phys_device_show(struct device *dev, arch_get_memory_phys_device(start_pfn)); } -#ifdef CONFIG_MEMORY_HOTREMOVE static int print_allowed_zone(char *buf, int len, int nid, struct memory_group *group, unsigned long start_pfn, unsigned long nr_pages, int online_type, struct zone *default_zone) { - struct zone *zone; + if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) { + struct zone *zone; - zone = zone_for_pfn_range(online_type, nid, group, start_pfn, nr_pages); - if (zone == default_zone) - return 0; + zone = zone_for_pfn_range(online_type, nid, group, start_pfn, nr_pages); + if (zone == default_zone) + return 0; - return sysfs_emit_at(buf, len, " %s", zone->name); + return sysfs_emit_at(buf, len, " %s", zone->name); + } + return 0; } static ssize_t valid_zones_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct memory_block *mem = to_memory_block(dev); - unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); - unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; - struct memory_group *group = mem->group; - struct zone *default_zone; - int nid = mem->nid; - int len = 0; + if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) { + struct memory_block *mem = to_memory_block(dev); + unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); + unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; + struct memory_group *group = mem->group; + struct zone *default_zone; + int nid = mem->nid; + int len = 0; - /* - * Check the existing zone. Make sure that we do that only on the - * online nodes otherwise the page_zone is not reliable - */ - if (mem->state == MEM_ONLINE) { /* - * If !mem->zone, the memory block spans multiple zones and - * cannot get offlined. - */ - default_zone = mem->zone; - if (!default_zone) - return sysfs_emit(buf, "%s\n", "none"); - len += sysfs_emit_at(buf, len, "%s", default_zone->name); - goto out; - } + * Check the existing zone. Make sure that we do that only on the + * online nodes otherwise the page_zone is not reliable + */ + if (mem->state == MEM_ONLINE) { + /* + * If !mem->zone, the memory block spans multiple zones and + * cannot get offlined. + */ + default_zone = mem->zone; + if (!default_zone) + return sysfs_emit(buf, "%s\n", "none"); + len += sysfs_emit_at(buf, len, "%s", default_zone->name); + goto out; + } - default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, group, - start_pfn, nr_pages); + default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, group, + start_pfn, nr_pages); - len += sysfs_emit_at(buf, len, "%s", default_zone->name); - len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages, - MMOP_ONLINE_KERNEL, default_zone); - len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages, - MMOP_ONLINE_MOVABLE, default_zone); + len += sysfs_emit_at(buf, len, "%s", default_zone->name); + len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages, + MMOP_ONLINE_KERNEL, default_zone); + len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages, + MMOP_ONLINE_MOVABLE, default_zone); out: - len += sysfs_emit_at(buf, len, "\n"); - return len; + len += sysfs_emit_at(buf, len, "\n"); + return len; + } + return 0; } static DEVICE_ATTR_RO(valid_zones); -#endif static DEVICE_ATTR_RO(phys_index); static DEVICE_ATTR_RW(state); @@ -496,43 +500,43 @@ static DEVICE_ATTR_RW(auto_online_blocks); * as well as ppc64 will do all of their discovery in userspace * and will require this interface. */ -#ifdef CONFIG_ARCH_MEMORY_PROBE static ssize_t probe_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - u64 phys_addr; - int nid, ret; - unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block; + if (IS_ENABLED(CONFIG_ARCH_MEMORY_PROBE)) { + u64 phys_addr; + int nid, ret; + unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block; - ret = kstrtoull(buf, 0, &phys_addr); - if (ret) - return ret; + ret = kstrtoull(buf, 0, &phys_addr); + if (ret) + return ret; - if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1)) - return -EINVAL; + if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1)) + return -EINVAL; - ret = lock_device_hotplug_sysfs(); - if (ret) - return ret; + ret = lock_device_hotplug_sysfs(); + if (ret) + return ret; - nid = memory_add_physaddr_to_nid(phys_addr); - ret = __add_memory(nid, phys_addr, - MIN_MEMORY_BLOCK_SIZE * sections_per_block, - MHP_NONE); + nid = memory_add_physaddr_to_nid(phys_addr); + ret = __add_memory(nid, phys_addr, + MIN_MEMORY_BLOCK_SIZE * sections_per_block, + MHP_NONE); - if (ret) - goto out; + if (ret) + goto out; - ret = count; + ret = count; out: - unlock_device_hotplug(); - return ret; + unlock_device_hotplug(); + return ret; + } + return 0; } static DEVICE_ATTR_WO(probe); -#endif -#ifdef CONFIG_MEMORY_FAILURE /* * Support for offlining pages of memory */ @@ -542,15 +546,19 @@ static ssize_t soft_offline_page_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - int ret; - u64 pfn; - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - if (kstrtoull(buf, 0, &pfn) < 0) - return -EINVAL; - pfn >>= PAGE_SHIFT; - ret = soft_offline_page(pfn, 0); - return ret == 0 ? count : ret; + if (IS_ENABLED(CONFIG_MEMORY_FAILURE)) { + int ret; + u64 pfn; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (kstrtoull(buf, 0, &pfn) < 0) + return -EINVAL; + pfn >>= PAGE_SHIFT; + ret = soft_offline_page(pfn, 0); + return ret == 0 ? count : ret; + } + return 0; } /* Forcibly offline a page, including killing processes. */ @@ -558,22 +566,25 @@ static ssize_t hard_offline_page_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - int ret; - u64 pfn; - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - if (kstrtoull(buf, 0, &pfn) < 0) - return -EINVAL; - pfn >>= PAGE_SHIFT; - ret = memory_failure(pfn, MF_SW_SIMULATED); - if (ret == -EOPNOTSUPP) - ret = 0; - return ret ? ret : count; + if (IS_ENABLED(CONFIG_MEMORY_FAILURE)) { + int ret; + u64 pfn; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (kstrtoull(buf, 0, &pfn) < 0) + return -EINVAL; + pfn >>= PAGE_SHIFT; + ret = memory_failure(pfn, MF_SW_SIMULATED); + if (ret == -EOPNOTSUPP) + ret = 0; + return ret ? ret : count; + } + return 0; } static DEVICE_ATTR_WO(soft_offline_page); static DEVICE_ATTR_WO(hard_offline_page); -#endif /* See phys_device_show(). */ int __weak arch_get_memory_phys_device(unsigned long start_pfn) @@ -611,14 +622,35 @@ static struct attribute *memory_memblk_attrs[] = { &dev_attr_state.attr, &dev_attr_phys_device.attr, &dev_attr_removable.attr, -#ifdef CONFIG_MEMORY_HOTREMOVE &dev_attr_valid_zones.attr, -#endif NULL }; +static umode_t +memory_memblk_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int unused) +{ + umode_t mode = attr->mode; + + if (attr == &dev_attr_phys_index.attr) + return mode; + if (attr == &dev_attr_state.attr) + return mode; + if (attr == &dev_attr_phys_device.attr) + return mode; + if (attr == &dev_attr_removable.attr) + return mode; + if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) { + if (attr == &dev_attr_valid_zones.attr) + return mode; + } + + return 0; +} + static const struct attribute_group memory_memblk_attr_group = { .attrs = memory_memblk_attrs, + .is_visible = memory_memblk_attr_is_visible, }; static const struct attribute_group *memory_memblk_attr_groups[] = { @@ -878,22 +910,41 @@ void remove_memory_block_devices(unsigned long start, unsigned long size) } static struct attribute *memory_root_attrs[] = { -#ifdef CONFIG_ARCH_MEMORY_PROBE &dev_attr_probe.attr, -#endif - -#ifdef CONFIG_MEMORY_FAILURE &dev_attr_soft_offline_page.attr, &dev_attr_hard_offline_page.attr, -#endif - &dev_attr_block_size_bytes.attr, &dev_attr_auto_online_blocks.attr, NULL }; +static umode_t +memory_root_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int unused) +{ + umode_t mode = attr->mode; + + if (IS_ENABLED(CONFIG_ARCH_MEMORY_PROBE)) { + if (attr == &dev_attr_probe.attr) + return mode; + } + if (IS_ENABLED(CONFIG_MEMORY_FAILURE)) { + if (attr == &dev_attr_soft_offline_page.attr) + return mode; + if (attr == &dev_attr_hard_offline_page.attr) + return mode; + } + if (attr == &dev_attr_block_size_bytes.attr) + return mode; + if (attr == &dev_attr_auto_online_blocks.attr) + return mode; + + return 0; +} + static const struct attribute_group memory_root_attr_group = { .attrs = memory_root_attrs, + .is_visible = memory_root_attr_is_visible, }; static const struct attribute_group *memory_root_attr_groups[] = { -- 2.31.1