The patch titled Subject: mm/compaction: add tracepoint to observe behaviour of compaction defer has been removed from the -mm tree. Its filename was mm-compaction-add-tracepoint-to-observe-behaviour-of-compaction-defer.patch This patch was dropped because it was merged into mainline or a subsystem tree ------------------------------------------------------ From: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx> Subject: mm/compaction: add tracepoint to observe behaviour of compaction defer Compaction deferring logic is heavy hammer that block the way to the compaction. It doesn't consider overall system state, so it could prevent user from doing compaction falsely. In other words, even if system has enough range of memory to compact, compaction would be skipped due to compaction deferring logic. This patch add new tracepoint to understand work of deferring logic. This will also help to check compaction success and fail. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx> Cc: Vlastimil Babka <vbabka@xxxxxxx> Cc: Mel Gorman <mgorman@xxxxxxx> Cc: David Rientjes <rientjes@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/compaction.h | 65 +------------------------ include/trace/events/compaction.h | 56 ++++++++++++++++++++++ mm/compaction.c | 71 ++++++++++++++++++++++++++++ 3 files changed, 132 insertions(+), 60 deletions(-) diff -puN include/linux/compaction.h~mm-compaction-add-tracepoint-to-observe-behaviour-of-compaction-defer include/linux/compaction.h --- a/include/linux/compaction.h~mm-compaction-add-tracepoint-to-observe-behaviour-of-compaction-defer +++ a/include/linux/compaction.h @@ -44,66 +44,11 @@ extern void reset_isolation_suitable(pg_ extern unsigned long compaction_suitable(struct zone *zone, int order, int alloc_flags, int classzone_idx); -/* Do not skip compaction more than 64 times */ -#define COMPACT_MAX_DEFER_SHIFT 6 - -/* - * Compaction is deferred when compaction fails to result in a page - * allocation success. 1 << compact_defer_limit compactions are skipped up - * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT - */ -static inline void defer_compaction(struct zone *zone, int order) -{ - zone->compact_considered = 0; - zone->compact_defer_shift++; - - if (order < zone->compact_order_failed) - zone->compact_order_failed = order; - - if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) - zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; -} - -/* Returns true if compaction should be skipped this time */ -static inline bool compaction_deferred(struct zone *zone, int order) -{ - unsigned long defer_limit = 1UL << zone->compact_defer_shift; - - if (order < zone->compact_order_failed) - return false; - - /* Avoid possible overflow */ - if (++zone->compact_considered > defer_limit) - zone->compact_considered = defer_limit; - - return zone->compact_considered < defer_limit; -} - -/* - * Update defer tracking counters after successful compaction of given order, - * which means an allocation either succeeded (alloc_success == true) or is - * expected to succeed. - */ -static inline void compaction_defer_reset(struct zone *zone, int order, - bool alloc_success) -{ - if (alloc_success) { - zone->compact_considered = 0; - zone->compact_defer_shift = 0; - } - if (order >= zone->compact_order_failed) - zone->compact_order_failed = order + 1; -} - -/* Returns true if restarting compaction after many failures */ -static inline bool compaction_restarting(struct zone *zone, int order) -{ - if (order < zone->compact_order_failed) - return false; - - return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && - zone->compact_considered >= 1UL << zone->compact_defer_shift; -} +extern void defer_compaction(struct zone *zone, int order); +extern bool compaction_deferred(struct zone *zone, int order); +extern void compaction_defer_reset(struct zone *zone, int order, + bool alloc_success); +extern bool compaction_restarting(struct zone *zone, int order); #else static inline unsigned long try_to_compact_pages(gfp_t gfp_mask, diff -puN include/trace/events/compaction.h~mm-compaction-add-tracepoint-to-observe-behaviour-of-compaction-defer include/trace/events/compaction.h --- a/include/trace/events/compaction.h~mm-compaction-add-tracepoint-to-observe-behaviour-of-compaction-defer +++ a/include/trace/events/compaction.h @@ -238,6 +238,62 @@ DEFINE_EVENT(mm_compaction_suitable_temp TP_ARGS(zone, order, ret) ); +#ifdef CONFIG_COMPACTION +DECLARE_EVENT_CLASS(mm_compaction_defer_template, + + TP_PROTO(struct zone *zone, int order), + + TP_ARGS(zone, order), + + TP_STRUCT__entry( + __field(int, nid) + __field(char *, name) + __field(int, order) + __field(unsigned int, considered) + __field(unsigned int, defer_shift) + __field(int, order_failed) + ), + + TP_fast_assign( + __entry->nid = zone_to_nid(zone); + __entry->name = (char *)zone->name; + __entry->order = order; + __entry->considered = zone->compact_considered; + __entry->defer_shift = zone->compact_defer_shift; + __entry->order_failed = zone->compact_order_failed; + ), + + TP_printk("node=%d zone=%-8s order=%d order_failed=%d consider=%u limit=%lu", + __entry->nid, + __entry->name, + __entry->order, + __entry->order_failed, + __entry->considered, + 1UL << __entry->defer_shift) +); + +DEFINE_EVENT(mm_compaction_defer_template, mm_compaction_deferred, + + TP_PROTO(struct zone *zone, int order), + + TP_ARGS(zone, order) +); + +DEFINE_EVENT(mm_compaction_defer_template, mm_compaction_defer_compaction, + + TP_PROTO(struct zone *zone, int order), + + TP_ARGS(zone, order) +); + +DEFINE_EVENT(mm_compaction_defer_template, mm_compaction_defer_reset, + + TP_PROTO(struct zone *zone, int order), + + TP_ARGS(zone, order) +); +#endif + #endif /* _TRACE_COMPACTION_H */ /* This part must be outside protection */ diff -puN mm/compaction.c~mm-compaction-add-tracepoint-to-observe-behaviour-of-compaction-defer mm/compaction.c --- a/mm/compaction.c~mm-compaction-add-tracepoint-to-observe-behaviour-of-compaction-defer +++ a/mm/compaction.c @@ -124,6 +124,77 @@ static struct page *pageblock_pfn_to_pag } #ifdef CONFIG_COMPACTION + +/* Do not skip compaction more than 64 times */ +#define COMPACT_MAX_DEFER_SHIFT 6 + +/* + * Compaction is deferred when compaction fails to result in a page + * allocation success. 1 << compact_defer_limit compactions are skipped up + * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT + */ +void defer_compaction(struct zone *zone, int order) +{ + zone->compact_considered = 0; + zone->compact_defer_shift++; + + if (order < zone->compact_order_failed) + zone->compact_order_failed = order; + + if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) + zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; + + trace_mm_compaction_defer_compaction(zone, order); +} + +/* Returns true if compaction should be skipped this time */ +bool compaction_deferred(struct zone *zone, int order) +{ + unsigned long defer_limit = 1UL << zone->compact_defer_shift; + + if (order < zone->compact_order_failed) + return false; + + /* Avoid possible overflow */ + if (++zone->compact_considered > defer_limit) + zone->compact_considered = defer_limit; + + if (zone->compact_considered >= defer_limit) + return false; + + trace_mm_compaction_deferred(zone, order); + + return true; +} + +/* + * Update defer tracking counters after successful compaction of given order, + * which means an allocation either succeeded (alloc_success == true) or is + * expected to succeed. + */ +void compaction_defer_reset(struct zone *zone, int order, + bool alloc_success) +{ + if (alloc_success) { + zone->compact_considered = 0; + zone->compact_defer_shift = 0; + } + if (order >= zone->compact_order_failed) + zone->compact_order_failed = order + 1; + + trace_mm_compaction_defer_reset(zone, order); +} + +/* Returns true if restarting compaction after many failures */ +bool compaction_restarting(struct zone *zone, int order) +{ + if (order < zone->compact_order_failed) + return false; + + return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && + zone->compact_considered >= 1UL << zone->compact_defer_shift; +} + /* Returns true if the pageblock should be scanned for pages to isolate. */ static inline bool isolation_suitable(struct compact_control *cc, struct page *page) _ Patches currently in -mm which might be from iamjoonsoo.kim@xxxxxxx are origin.patch list_lru-introduce-list_lru_shrink_countwalk.patch fs-consolidate-nrfree_cached_objects-args-in-shrink_control.patch vmscan-per-memory-cgroup-slab-shrinkers.patch memcg-rename-some-cache-id-related-variables.patch memcg-add-rwsem-to-synchronize-against-memcg_caches-arrays-relocation.patch list_lru-get-rid-of-active_nodes.patch list_lru-organize-all-list_lrus-to-list.patch list_lru-introduce-per-memcg-lists.patch fs-make-shrinker-memcg-aware.patch slab-embed-memcg_cache_params-to-kmem_cache.patch slab-link-memcg-caches-of-the-same-kind-into-a-list.patch cgroup-release-css-id-after-css_free.patch slab-use-css-id-for-naming-per-memcg-caches.patch memcg-free-memcg_caches-slot-on-css-offline.patch list_lru-add-helpers-to-isolate-items.patch memcg-reparent-list_lrus-and-free-kmemcg_id-on-css-offline.patch slub-never-fail-to-shrink-cache.patch slub-never-fail-to-shrink-cache-init-discard-list-after-freeing-slabs.patch slub-fix-kmem_cache_shrink-return-value.patch slub-make-dead-caches-discard-free-slabs-immediately.patch mm-compaction-fix-wrong-order-check-in-compact_finished.patch mm-compaction-stop-the-isolation-when-we-isolate-enough-freepage.patch mm-internalh-dont-split-printk-call-in-two.patch mm-page_allocc-pull-out-init-code-from-build_all_zonelists.patch mm-mm_initc-mark-mminit_verify_zonelist-as-__init.patch mm-mm_initc-mark-mminit_loglevel-__meminitdata.patch kernel-cpusetc-mark-cpuset_init_current_mems_allowed-as-__init.patch mm-fix-negative-nr_isolated-counts.patch mm-util-add-kstrdup_const.patch kernfs-convert-node-name-allocation-to-kstrdup_const.patch clk-convert-clock-name-allocations-to-kstrdup_const.patch mm-slab-convert-cache-name-allocations-to-kstrdup_const.patch mm-slab-convert-cache-name-allocations-to-kstrdup_const-fix.patch fs-namespace-convert-devname-allocation-to-kstrdup_const.patch compiler-introduce-__aliassymbol-shortcut.patch add-kernel-address-sanitizer-infrastructure.patch kasan-disable-memory-hotplug.patch x86_64-add-kasan-support.patch mm-page_alloc-add-kasan-hooks-on-alloc-and-free-paths.patch mm-slub-introduce-virt_to_obj-function.patch mm-slub-share-object_err-function.patch mm-slub-introduce-metadata_access_enable-metadata_access_disable.patch mm-slub-add-kernel-address-sanitizer-support-for-slub-allocator.patch fs-dcache-manually-unpoison-dname-after-allocation-to-shut-up-kasans-reports.patch kmemleak-disable-kasan-instrumentation-for-kmemleak.patch lib-add-kasan-test-module.patch x86_64-kasan-add-interceptors-for-memset-memmove-memcpy-functions.patch kasan-enable-stack-instrumentation.patch mm-vmalloc-add-flag-preventing-guard-hole-allocation.patch mm-vmalloc-pass-additional-vm_flags-to-__vmalloc_node_range.patch kernel-add-support-for-init_array-constructors.patch module-fix-types-of-device-tables-aliases.patch kasan-enable-instrumentation-of-global-variables.patch -- To unsubscribe from this list: send the line "unsubscribe mm-commits" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html