The patch titled Subject: mm/khugepaged: use helper macro __ATTR_RW has been added to the -mm mm-unstable branch. Its filename is mm-khugepaged-use-helper-macro-__attr_rw.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/mm-khugepaged-use-helper-macro-__attr_rw.patch This patch will later appear in the mm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: Miaohe Lin <linmiaohe@xxxxxxxxxx> Subject: mm/khugepaged: use helper macro __ATTR_RW Date: Sat, 11 Jun 2022 16:47:29 +0800 Use helper macro __ATTR_RW to define the khugepaged attributes. Minor readability improvement. Link: https://lkml.kernel.org/r/20220611084731.55155-6-linmiaohe@xxxxxxxxxx Signed-off-by: Miaohe Lin <linmiaohe@xxxxxxxxxx> Cc: Alistair Popple <apopple@xxxxxxxxxx> Cc: Andrea Arcangeli <aarcange@xxxxxxxxxx> Cc: David Hildenbrand <david@xxxxxxxxxx> Cc: David Howells <dhowells@xxxxxxxxxx> Cc: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Cc: Mike Kravetz <mike.kravetz@xxxxxxxxxx> Cc: NeilBrown <neilb@xxxxxxx> Cc: Peter Xu <peterx@xxxxxxxxxx> Cc: Suren Baghdasaryan <surenb@xxxxxxxxxx> Cc: Vlastimil Babka <vbabka@xxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/khugepaged.c | 37 +++++++++++++++---------------------- 1 file changed, 15 insertions(+), 22 deletions(-) --- a/mm/khugepaged.c~mm-khugepaged-use-helper-macro-__attr_rw +++ a/mm/khugepaged.c @@ -147,8 +147,7 @@ static ssize_t scan_sleep_millisecs_stor return count; } static struct kobj_attribute scan_sleep_millisecs_attr = - __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show, - scan_sleep_millisecs_store); + __ATTR_RW(scan_sleep_millisecs); static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj, struct kobj_attribute *attr, @@ -175,8 +174,7 @@ static ssize_t alloc_sleep_millisecs_sto return count; } static struct kobj_attribute alloc_sleep_millisecs_attr = - __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show, - alloc_sleep_millisecs_store); + __ATTR_RW(alloc_sleep_millisecs); static ssize_t pages_to_scan_show(struct kobject *kobj, struct kobj_attribute *attr, @@ -200,8 +198,7 @@ static ssize_t pages_to_scan_store(struc return count; } static struct kobj_attribute pages_to_scan_attr = - __ATTR(pages_to_scan, 0644, pages_to_scan_show, - pages_to_scan_store); + __ATTR_RW(pages_to_scan); static ssize_t pages_collapsed_show(struct kobject *kobj, struct kobj_attribute *attr, @@ -221,13 +218,13 @@ static ssize_t full_scans_show(struct ko static struct kobj_attribute full_scans_attr = __ATTR_RO(full_scans); -static ssize_t khugepaged_defrag_show(struct kobject *kobj, +static ssize_t defrag_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return single_hugepage_flag_show(kobj, attr, buf, TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); } -static ssize_t khugepaged_defrag_store(struct kobject *kobj, +static ssize_t defrag_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { @@ -235,8 +232,7 @@ static ssize_t khugepaged_defrag_store(s TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); } static struct kobj_attribute khugepaged_defrag_attr = - __ATTR(defrag, 0644, khugepaged_defrag_show, - khugepaged_defrag_store); + __ATTR_RW(defrag); /* * max_ptes_none controls if khugepaged should collapse hugepages over @@ -246,13 +242,13 @@ static struct kobj_attribute khugepaged_ * runs. Increasing max_ptes_none will instead potentially reduce the * free memory in the system during the khugepaged scan. */ -static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj, +static ssize_t max_ptes_none_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none); } -static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj, +static ssize_t max_ptes_none_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { @@ -268,17 +264,16 @@ static ssize_t khugepaged_max_ptes_none_ return count; } static struct kobj_attribute khugepaged_max_ptes_none_attr = - __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show, - khugepaged_max_ptes_none_store); + __ATTR_RW(max_ptes_none); -static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj, +static ssize_t max_ptes_swap_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap); } -static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj, +static ssize_t max_ptes_swap_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { @@ -295,17 +290,16 @@ static ssize_t khugepaged_max_ptes_swap_ } static struct kobj_attribute khugepaged_max_ptes_swap_attr = - __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show, - khugepaged_max_ptes_swap_store); + __ATTR_RW(max_ptes_swap); -static ssize_t khugepaged_max_ptes_shared_show(struct kobject *kobj, +static ssize_t max_ptes_shared_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared); } -static ssize_t khugepaged_max_ptes_shared_store(struct kobject *kobj, +static ssize_t max_ptes_shared_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { @@ -322,8 +316,7 @@ static ssize_t khugepaged_max_ptes_share } static struct kobj_attribute khugepaged_max_ptes_shared_attr = - __ATTR(max_ptes_shared, 0644, khugepaged_max_ptes_shared_show, - khugepaged_max_ptes_shared_store); + __ATTR_RW(max_ptes_shared); static struct attribute *khugepaged_attr[] = { &khugepaged_defrag_attr.attr, _ Patches currently in -mm which might be from linmiaohe@xxxxxxxxxx are maintainers-add-myself-as-a-memory-failure-reviewer.patch mm-shmemc-clean-up-comment-of-shmem_swapin_folio.patch mm-reduce-the-rcu-lock-duration.patch mm-migration-remove-unneeded-lock-page-and-pagemovable-check.patch mm-migration-return-errno-when-isolate_huge_page-failed.patch mm-migration-fix-potential-pte_unmap-on-an-not-mapped-pte.patch mm-memremap-fix-wrong-function-name-above-memremap_pages.patch mm-swapfile-make-security_vm_enough_memory_mm-work-as-expected.patch mm-swapfile-fix-possible-data-races-of-inuse_pages.patch mm-swap-remove-swap_cache_info-statistics.patch mm-vmscan-dont-try-to-reclaim-freed-folios.patch lib-test_hmm-avoid-accessing-uninitialized-pages.patch mm-memremap-fix-memunmap_pages-race-with-get_dev_pagemap.patch mm-khugepaged-remove-unneeded-shmem_huge_enabled-check.patch mm-khugepaged-stop-swapping-in-page-when-vm_fault_retry-occurs.patch mm-khugepaged-trivial-typo-and-codestyle-cleanup.patch mm-khugepaged-minor-cleanup-for-collapse_file.patch mm-khugepaged-use-helper-macro-__attr_rw.patch mm-khugepaged-remove-unneeded-return-value-of-khugepaged_add_pte_mapped_thp.patch mm-khugepaged-try-to-free-transhuge-swapcache-when-possible.patch