Hello linux-rt, This has a null pointer bug. Kernel version: 4.14.202 + rt97.patch CPU architecture: ARM Cortex-A53 Platform description: Telechips TCC8030 Detailed description of the bug: Unable to handle kernel NULL pointer dereference at virtual address 00000034 [ 4.200407] Call trace: [ 4.200412] worker_thread+0x28/0x4b0 [ 4.200415] kthread+0x130/0x138 [ 4.200420] ret_from_fork+0x10/0x18 [ 4.200425] Code: d0006458 91274318 a9445401 91040318 (b9403420)
[ 4.200429] ---[ end trace 0000000000000003 ]--- [ 4.200433] Kernel panic - not syncing: Fatal exception [ 4.200439] SMP: stopping secondary CPUs [ 4.200446] Kernel Offset: disabled [ 4.200449] CPU features: 0x0002004 [ 4.200451] Memory Limit: none [ 4.200454] reboot mode: Set magic 0x77665002 Stack traces if you can capture them from dmesg or serial console: a53-05231515.log Steps to reproduce the bug: Power on 10s
→ Power off 200ms → Power on 10s
→ Power off 200ms ……… Can you help confirm the reason? Thanks! Confidentiality Notice: The information contained in this e-mail and any accompanying attachment(s) is intended only for the use of the intended recipient and may be confidential and/or privileged of Neusoft Corporation, its subsidiaries and/or its affiliates. If any reader of this communication is not the intended recipient,unauthorized use,forwarding, printing, storing, disclosure or copying is strictly prohibited, and may be unlawful.If you have received this communication in error,please immediately notify the sender by return e-mail, and delete the original message and all copies from your system. Thank you. --------------------------------------------------------------------------------------------------- |
Attachment:
a53-05231515.log
Description: Binary data
/home/miaon/workspace/fxssd_4t/33WA/Code/SHRV0763_SRC_33WA_v060200/OUTPUT/APL_BUILD/tmp/work-shared/nsp1-release/kernel-source/kernel/kthread.o: file format elf64-littleaarch64 Disassembly of section .text: 0000000000000000 <kthread_should_stop>: */ static __always_inline struct task_struct *get_current(void) { unsigned long sp_el0; asm ("mrs %0, sp_el0" : "=r" (sp_el0)); 0: d5384100 mrs x0, sp_el0 current->set_child_tid = (__force void __user *)kthread; } static inline struct kthread *to_kthread(struct task_struct *k) { WARN_ON(!(k->flags & PF_KTHREAD)); 4: b9403401 ldr w1, [x0, #52] 8: 36a800a1 tbz w1, #21, 1c <kthread_should_stop+0x1c> return (__force void *)k->set_child_tid; c: f9434000 ldr x0, [x0, #1664] * @nr: bit number to test * @addr: Address to start counting from */ static inline int test_bit(int nr, const volatile unsigned long *addr) { return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); 10: f9400000 ldr x0, [x0] * and this will return true. You should then return, and your return * value will be passed through to kthread_stop(). */ bool kthread_should_stop(void) { return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags); 14: 53010400 ubfx w0, w0, #1, #1 } 18: d65f03c0 ret WARN_ON(!(k->flags & PF_KTHREAD)); 1c: d4210000 brk #0x800 20: 17fffffb b c <kthread_should_stop+0xc> 24: d503201f nop 0000000000000028 <kthread_should_park>: 28: d5384100 mrs x0, sp_el0 2c: b9403401 ldr w1, [x0, #52] 30: 36a800a1 tbz w1, #21, 44 <kthread_should_park+0x1c> return (__force void *)k->set_child_tid; 34: f9434000 ldr x0, [x0, #1664] 38: f9400000 ldr x0, [x0] * and in a park position. kthread_unpark() "restarts" the thread and * calls the thread function again. */ bool kthread_should_park(void) { return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags); 3c: 53020800 ubfx w0, w0, #2, #1 } 40: d65f03c0 ret WARN_ON(!(k->flags & PF_KTHREAD)); 44: d4210000 brk #0x800 48: 17fffffb b 34 <kthread_should_park+0xc> 4c: d503201f nop 0000000000000050 <kthread_freezable_should_stop>: * refrigerator if necessary. This function is safe from kthread_stop() / * freezer deadlock and freezable kthreads should use this function instead * of calling try_to_freeze() directly. */ bool kthread_freezable_should_stop(bool *was_frozen) { 50: a9be7bfd stp x29, x30, [sp, #-32]! bool frozen = false; might_sleep(); 54: 52800fe1 mov w1, #0x7f // #127 58: 52800002 mov w2, #0x0 // #0 { 5c: 910003fd mov x29, sp 60: f9000bf3 str x19, [sp, #16] 64: aa0003f3 mov x19, x0 might_sleep(); 68: 90000000 adrp x0, 0 <kthread_should_stop> 6c: 91000000 add x0, x0, #0x0 70: 94000000 bl 0 <__might_sleep> }) static __always_inline void __read_once_size(const volatile void *p, void *res, int size) { __READ_ONCE_SIZE; 74: 90000001 adrp x1, 0 <system_freezing_cnt> 78: d5384100 mrs x0, sp_el0 7c: b9400021 ldr w1, [x1] /* * Check if there is a request to freeze a process */ static inline bool freezing(struct task_struct *p) { if (likely(!atomic_read(&system_freezing_cnt))) 80: 350001a1 cbnz w1, b4 <kthread_freezable_should_stop+0x64> bool frozen = false; 84: 52800000 mov w0, #0x0 // #0 if (unlikely(freezing(current))) frozen = __refrigerator(true); if (was_frozen) 88: b4000053 cbz x19, 90 <kthread_freezable_should_stop+0x40> *was_frozen = frozen; 8c: 39000260 strb w0, [x19] 90: d5384100 mrs x0, sp_el0 WARN_ON(!(k->flags & PF_KTHREAD)); 94: b9403401 ldr w1, [x0, #52] 98: 36a801c1 tbz w1, #21, d0 <kthread_freezable_should_stop+0x80> return (__force void *)k->set_child_tid; 9c: f9434000 ldr x0, [x0, #1664] return kthread_should_stop(); } a0: f9400bf3 ldr x19, [sp, #16] a4: f9400000 ldr x0, [x0] a8: a8c27bfd ldp x29, x30, [sp], #32 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags); ac: 53010400 ubfx w0, w0, #1, #1 } b0: d65f03c0 ret return false; return freezing_slow_path(p); b4: 94000000 bl 0 <freezing_slow_path> if (unlikely(freezing(current))) b8: 72001c1f tst w0, #0xff bc: 54fffe40 b.eq 84 <kthread_freezable_should_stop+0x34> // b.none frozen = __refrigerator(true); c0: 52800020 mov w0, #0x1 // #1 c4: 94000000 bl 0 <__refrigerator> c8: 12001c00 and w0, w0, #0xff cc: 17ffffef b 88 <kthread_freezable_should_stop+0x38> WARN_ON(!(k->flags & PF_KTHREAD)); d0: d4210000 brk #0x800 d4: 17fffff2 b 9c <kthread_freezable_should_stop+0x4c> 00000000000000d8 <kthread_flush_work_fn>: struct kthread_work work; struct completion done; }; static void kthread_flush_work_fn(struct kthread_work *work) { d8: a9bf7bfd stp x29, x30, [sp, #-16]! struct kthread_flush_work *fwork = container_of(work, struct kthread_flush_work, work); complete(&fwork->done); dc: 9100a000 add x0, x0, #0x28 { e0: 910003fd mov x29, sp complete(&fwork->done); e4: 94000000 bl 0 <complete> } e8: a8c17bfd ldp x29, x30, [sp], #16 ec: d65f03c0 ret 00000000000000f0 <__kthread_parkme>: { f0: d10143ff sub sp, sp, #0x50 f4: a9017bfd stp x29, x30, [sp, #16] f8: 910043fd add x29, sp, #0x10 fc: a90463f7 stp x23, x24, [sp, #64] 100: 90000017 adrp x23, 0 <__stack_chk_guard> 104: 910002f7 add x23, x23, #0x0 complete(&self->parked); 108: 91006018 add x24, x0, #0x18 { 10c: a90253f3 stp x19, x20, [sp, #32] 110: aa0003f4 mov x20, x0 114: f94002e0 ldr x0, [x23] 118: f90007e0 str x0, [sp, #8] 11c: d2800000 mov x0, #0x0 // #0 120: a9035bf5 stp x21, x22, [sp, #48] 124: d5384113 mrs x19, sp_el0 { switch (size) { case 1: *(volatile __u8 *)p = *(__u8 *)res; break; case 2: *(volatile __u16 *)p = *(__u16 *)res; break; case 4: *(volatile __u32 *)p = *(__u32 *)res; break; case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 128: d2800816 mov x22, #0x40 // #64 12c: 90000015 adrp x21, 0 <kthread_should_stop> 130: 910002b5 add x21, x21, #0x0 134: f9000e76 str x22, [x19, #24] set_current_state(TASK_PARKED); 138: f90b8a75 str x21, [x19, #5904] 13c: d5033bbf dmb ish 140: f9400280 ldr x0, [x20] clear_bit(KTHREAD_IS_PARKED, &self->flags); 144: aa1403e1 mov x1, x20 if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags)) 148: 361001a0 tbz w0, #2, 17c <__kthread_parkme+0x8c> 14c: d503201f nop if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags)) 150: 52800060 mov w0, #0x3 // #3 154: 94000000 bl 0 <test_and_set_bit> 158: 34000320 cbz w0, 1bc <__kthread_parkme+0xcc> schedule(); 15c: 94000000 bl 0 <schedule> 160: d5384113 mrs x19, sp_el0 164: f9000e76 str x22, [x19, #24] set_current_state(TASK_PARKED); 168: f90b8a75 str x21, [x19, #5904] 16c: d5033bbf dmb ish 170: f9400280 ldr x0, [x20] clear_bit(KTHREAD_IS_PARKED, &self->flags); 174: aa1403e1 mov x1, x20 if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags)) 178: 3717fec0 tbnz w0, #2, 150 <__kthread_parkme+0x60> clear_bit(KTHREAD_IS_PARKED, &self->flags); 17c: 52800060 mov w0, #0x3 // #3 180: 94000000 bl 0 <clear_bit> __set_current_state(TASK_RUNNING); 184: 90000000 adrp x0, 0 <kthread_should_stop> 188: 91000000 add x0, x0, #0x0 18c: f9000e7f str xzr, [x19, #24] 190: f90b8a60 str x0, [x19, #5904] } 194: f94007e1 ldr x1, [sp, #8] 198: f94002e0 ldr x0, [x23] 19c: ca000020 eor x0, x1, x0 1a0: b5000160 cbnz x0, 1cc <__kthread_parkme+0xdc> 1a4: a9417bfd ldp x29, x30, [sp, #16] 1a8: a94253f3 ldp x19, x20, [sp, #32] 1ac: a9435bf5 ldp x21, x22, [sp, #48] 1b0: a94463f7 ldp x23, x24, [sp, #64] 1b4: 910143ff add sp, sp, #0x50 1b8: d65f03c0 ret complete(&self->parked); 1bc: aa1803e0 mov x0, x24 1c0: 94000000 bl 0 <complete> schedule(); 1c4: 94000000 bl 0 <schedule> 1c8: 17ffffe6 b 160 <__kthread_parkme+0x70> } 1cc: 94000000 bl 0 <__stack_chk_fail> 00000000000001d0 <kthread_parkme>: { 1d0: a9bf7bfd stp x29, x30, [sp, #-16]! 1d4: d5384100 mrs x0, sp_el0 1d8: 910003fd mov x29, sp WARN_ON(!(k->flags & PF_KTHREAD)); 1dc: b9403401 ldr w1, [x0, #52] 1e0: 36a800a1 tbz w1, #21, 1f4 <kthread_parkme+0x24> __kthread_parkme(to_kthread(current)); 1e4: f9434000 ldr x0, [x0, #1664] 1e8: 97ffffc2 bl f0 <__kthread_parkme> } 1ec: a8c17bfd ldp x29, x30, [sp], #16 1f0: d65f03c0 ret WARN_ON(!(k->flags & PF_KTHREAD)); 1f4: d4210000 brk #0x800 1f8: 17fffffb b 1e4 <kthread_parkme+0x14> 1fc: d503201f nop 0000000000000200 <__kthread_create_on_node>: { 200: d10403ff sub sp, sp, #0x100 204: a90a7bfd stp x29, x30, [sp, #160] 208: 910283fd add x29, sp, #0xa0 20c: a90c5bf5 stp x21, x22, [sp, #192] 210: 90000015 adrp x21, 0 <__stack_chk_guard> 214: 910002b5 add x21, x21, #0x0 * structure. */ static inline void __init_completion(struct completion *x) { x->done = 0; init_swait_queue_head(&x->wait); 218: 90000016 adrp x22, 0 <kthread_should_stop> 21c: 910002d6 add x22, x22, #0x0 220: a90e6bf9 stp x25, x26, [sp, #224] 224: aa0003f9 mov x25, x0 228: f94002a0 ldr x0, [x21] 22c: f9004fe0 str x0, [sp, #152] 230: d2800000 mov x0, #0x0 // #0 234: 9100c3e0 add x0, sp, #0x30 238: aa0403fa mov x26, x4 x->done = 0; 23c: b9002bff str wzr, [sp, #40] 240: a90b53f3 stp x19, x20, [sp, #176] return ERR_PTR(-ENOMEM); 244: 92800174 mov x20, #0xfffffffffffffff4 // #-12 { 248: a90d63f7 stp x23, x24, [sp, #208] 24c: aa0103f8 mov x24, x1 250: 2a0203f7 mov w23, w2 init_swait_queue_head(&x->wait); 254: 90000001 adrp x1, 0 <kthread_should_stop> 258: aa1603e2 mov x2, x22 25c: 91000021 add x1, x1, #0x0 260: f9007bfb str x27, [sp, #240] 264: aa0303fb mov x27, x3 268: 94000000 bl 0 <__init_swait_queue_head> index = kmalloc_index(size); if (!index) return ZERO_SIZE_PTR; return kmem_cache_alloc_trace( 26c: 90000000 adrp x0, 0 <kmalloc_caches> void *ret = kmem_cache_alloc(s, flags); 270: 52801801 mov w1, #0xc0 // #192 274: 72a02801 movk w1, #0x140, lsl #16 278: f9400000 ldr x0, [x0] 27c: 94000000 bl 0 <kmem_cache_alloc> if (!create) 280: b4000380 cbz x0, 2f0 <__kthread_create_on_node+0xf0> create->threadfn = threadfn; 284: aa0003f3 mov x19, x0 create->done = &done; 288: 9100a3e1 add x1, sp, #0x28 spin_lock(&kthread_create_lock); 28c: 90000014 adrp x20, 0 <kthread_should_stop> 290: 91000294 add x20, x20, #0x0 294: aa1403e0 mov x0, x20 create->data = data; 298: a9006279 stp x25, x24, [x19] create->node = node; 29c: b9001277 str w23, [x19, #16] create->done = &done; 2a0: f9001261 str x1, [x19, #32] spin_lock(&kthread_create_lock); 2a4: 94000000 bl 0 <rt_spin_lock> * Insert a new entry before the specified head. * This is useful for implementing queues. */ static inline void list_add_tail(struct list_head *new, struct list_head *head) { __list_add(new, head->prev, head); 2a8: f9407e82 ldr x2, [x20, #248] list_add_tail(&create->list, &kthread_create_list); 2ac: 9100a261 add x1, x19, #0x28 new->next = next; 2b0: 9103c280 add x0, x20, #0xf0 new->prev = prev; 2b4: a9028a60 stp x0, x2, [x19, #40] spin_unlock(&kthread_create_lock); 2b8: aa1403e0 mov x0, x20 next->prev = new; 2bc: f9007e81 str x1, [x20, #248] 2c0: f9000041 str x1, [x2] 2c4: 94000000 bl 0 <rt_spin_unlock> wake_up_process(kthreadd_task); 2c8: f94006c0 ldr x0, [x22, #8] 2cc: 94000000 bl 0 <wake_up_process> if (unlikely(wait_for_completion_killable(&done))) { 2d0: 9100a3e0 add x0, sp, #0x28 2d4: 94000000 bl 0 <wait_for_completion_killable> 2d8: 35000540 cbnz w0, 380 <__kthread_create_on_node+0x180> task = create->result; 2dc: f9400e74 ldr x20, [x19, #24] if (!IS_ERR(task)) { 2e0: b140069f cmn x20, #0x1, lsl #12 2e4: 54000209 b.ls 324 <__kthread_create_on_node+0x124> // b.plast kfree(create); 2e8: aa1303e0 mov x0, x19 2ec: 94000000 bl 0 <kfree> } 2f0: aa1403e0 mov x0, x20 2f4: f9404fe2 ldr x2, [sp, #152] 2f8: f94002a1 ldr x1, [x21] 2fc: ca010041 eor x1, x2, x1 300: b5000581 cbnz x1, 3b0 <__kthread_create_on_node+0x1b0> 304: a94a7bfd ldp x29, x30, [sp, #160] 308: a94b53f3 ldp x19, x20, [sp, #176] 30c: a94c5bf5 ldp x21, x22, [sp, #192] 310: a94d63f7 ldp x23, x24, [sp, #208] 314: a94e6bf9 ldp x25, x26, [sp, #224] 318: f9407bfb ldr x27, [sp, #240] 31c: 910403ff add sp, sp, #0x100 320: d65f03c0 ret vsnprintf(name, sizeof(name), namefmt, args); 324: a9401f46 ldp x6, x7, [x26] 328: 910003e3 mov x3, sp 32c: a9411744 ldp x4, x5, [x26, #16] 330: aa1b03e2 mov x2, x27 334: d2800201 mov x1, #0x10 // #16 338: 910223e0 add x0, sp, #0x88 33c: a9001fe6 stp x6, x7, [sp] 340: a90117e4 stp x4, x5, [sp, #16] 344: 94000000 bl 0 <vsnprintf> extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); static inline void set_task_comm(struct task_struct *tsk, const char *from) { __set_task_comm(tsk, from, false); 348: aa1403e0 mov x0, x20 34c: 910223e1 add x1, sp, #0x88 350: 52800002 mov w2, #0x0 // #0 354: 94000000 bl 0 <__set_task_comm> sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m); 358: 90000002 adrp x2, 0 <kthread_should_stop> 35c: 91000042 add x2, x2, #0x0 360: 52800001 mov w1, #0x0 // #0 364: aa1403e0 mov x0, x20 368: 94000000 bl 0 <sched_setscheduler_nocheck> set_cpus_allowed_ptr(task, cpu_all_mask); 36c: aa1403e0 mov x0, x20 370: 90000001 adrp x1, 0 <cpu_all_bits> 374: 91000021 add x1, x1, #0x0 378: 94000000 bl 0 <set_cpus_allowed_ptr> 37c: 17ffffdb b 2e8 <__kthread_create_on_node+0xe8> __XCHG_CASE(w, , rel_4, , , , , l, "memory") __XCHG_CASE( , , rel_8, , , , , l, "memory") __XCHG_CASE(w, b, mb_1, dmb ish, nop, , a, l, "memory") __XCHG_CASE(w, h, mb_2, dmb ish, nop, , a, l, "memory") __XCHG_CASE(w, , mb_4, dmb ish, nop, , a, l, "memory") __XCHG_CASE( , , mb_8, dmb ish, nop, , a, l, "memory") 380: d2800000 mov x0, #0x0 // #0 384: 91008263 add x3, x19, #0x20 388: f9800071 prfm pstl1strm, [x3] 38c: c85f7c61 ldxr x1, [x3] 390: c802fc60 stlxr w2, x0, [x3] 394: 35ffffc2 cbnz w2, 38c <__kthread_create_on_node+0x18c> 398: d5033bbf dmb ish return ERR_PTR(-EINTR); 39c: 92800074 mov x20, #0xfffffffffffffffc // #-4 if (xchg(&create->done, NULL)) 3a0: b5fffa81 cbnz x1, 2f0 <__kthread_create_on_node+0xf0> wait_for_completion(&done); 3a4: 9100a3e0 add x0, sp, #0x28 3a8: 94000000 bl 0 <wait_for_completion> 3ac: 17ffffcc b 2dc <__kthread_create_on_node+0xdc> } 3b0: 94000000 bl 0 <__stack_chk_fail> 3b4: d503201f nop 00000000000003b8 <kthread_create_on_node>: { 3b8: d10243ff sub sp, sp, #0x90 va_start(args, namefmt); 3bc: 128003e8 mov w8, #0xffffffe0 // #-32 3c0: 9101c3e9 add x9, sp, #0x70 3c4: 910243ea add x10, sp, #0x90 { 3c8: a9057bfd stp x29, x30, [sp, #80] 3cc: 910143fd add x29, sp, #0x50 va_start(args, namefmt); 3d0: a902abea stp x10, x10, [sp, #40] { 3d4: f90033f3 str x19, [sp, #96] 3d8: 90000013 adrp x19, 0 <__stack_chk_guard> 3dc: 91000273 add x19, x19, #0x0 va_start(args, namefmt); 3e0: f9001fe9 str x9, [sp, #56] { 3e4: f9400269 ldr x9, [x19] 3e8: f90027e9 str x9, [sp, #72] 3ec: d2800009 mov x9, #0x0 // #0 va_start(args, namefmt); 3f0: 29087fe8 stp w8, wzr, [sp, #64] task = __kthread_create_on_node(threadfn, data, node, namefmt, args); 3f4: a942a7e8 ldp x8, x9, [sp, #40] 3f8: a90027e8 stp x8, x9, [sp] 3fc: a943a7e8 ldp x8, x9, [sp, #56] 400: a90127e8 stp x8, x9, [sp, #16] { 404: a90717e4 stp x4, x5, [sp, #112] task = __kthread_create_on_node(threadfn, data, node, namefmt, args); 408: 910003e4 mov x4, sp { 40c: a9081fe6 stp x6, x7, [sp, #128] task = __kthread_create_on_node(threadfn, data, node, namefmt, args); 410: 97ffff7c bl 200 <__kthread_create_on_node> } 414: f94027e2 ldr x2, [sp, #72] 418: f9400261 ldr x1, [x19] 41c: ca010041 eor x1, x2, x1 420: b50000a1 cbnz x1, 434 <kthread_create_on_node+0x7c> 424: a9457bfd ldp x29, x30, [sp, #80] 428: f94033f3 ldr x19, [sp, #96] 42c: 910243ff add sp, sp, #0x90 430: d65f03c0 ret 434: 94000000 bl 0 <__stack_chk_fail> 0000000000000438 <kthread_park>: { 438: a9bd7bfd stp x29, x30, [sp, #-48]! 43c: 910003fd mov x29, sp 440: a90153f3 stp x19, x20, [sp, #16] 444: f90013f5 str x21, [sp, #32] WARN_ON(!(k->flags & PF_KTHREAD)); 448: b9403401 ldr w1, [x0, #52] 44c: 36a80381 tbz w1, #21, 4bc <kthread_park+0x84> return (__force void *)k->set_child_tid; 450: f9434015 ldr x21, [x0, #1664] if (WARN_ON(k->flags & PF_EXITING)) 454: 371003a1 tbnz w1, #2, 4c8 <kthread_park+0x90> 458: f94002b3 ldr x19, [x21] 45c: 53030e73 ubfx w19, w19, #3, #1 if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) { 460: 340000f3 cbz w19, 47c <kthread_park+0x44> return 0; 464: 52800013 mov w19, #0x0 // #0 } 468: 2a1303e0 mov w0, w19 46c: a94153f3 ldp x19, x20, [sp, #16] 470: f94013f5 ldr x21, [sp, #32] 474: a8c37bfd ldp x29, x30, [sp], #48 478: d65f03c0 ret 47c: aa0003f4 mov x20, x0 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); 480: aa1503e1 mov x1, x21 484: 52800040 mov w0, #0x2 // #2 488: 94000000 bl 0 <set_bit> 48c: d5384100 mrs x0, sp_el0 if (k != current) { 490: eb00029f cmp x20, x0 494: 54fffe80 b.eq 464 <kthread_park+0x2c> // b.none wake_up_process(k); 498: aa1403e0 mov x0, x20 49c: 94000000 bl 0 <wake_up_process> wait_for_completion(&kthread->parked); 4a0: 910062a0 add x0, x21, #0x18 4a4: 94000000 bl 0 <wait_for_completion> } 4a8: 2a1303e0 mov w0, w19 4ac: a94153f3 ldp x19, x20, [sp, #16] 4b0: f94013f5 ldr x21, [sp, #32] 4b4: a8c37bfd ldp x29, x30, [sp], #48 4b8: d65f03c0 ret WARN_ON(!(k->flags & PF_KTHREAD)); 4bc: d4210000 brk #0x800 4c0: b9403401 ldr w1, [x0, #52] 4c4: 17ffffe3 b 450 <kthread_park+0x18> if (WARN_ON(k->flags & PF_EXITING)) 4c8: d4210000 brk #0x800 return -ENOSYS; 4cc: 128004b3 mov w19, #0xffffffda // #-38 4d0: 17ffffe6 b 468 <kthread_park+0x30> 4d4: d503201f nop 00000000000004d8 <kthread>: { 4d8: a9ba7bfd stp x29, x30, [sp, #-96]! return kmem_cache_alloc_trace( 4dc: 90000002 adrp x2, 0 <kmalloc_caches> void *ret = kmem_cache_alloc(s, flags); 4e0: 52801801 mov w1, #0xc0 // #192 4e4: 910003fd mov x29, sp 4e8: a90153f3 stp x19, x20, [sp, #16] 4ec: aa0003f3 mov x19, x0 4f0: 72a02801 movk w1, #0x140, lsl #16 4f4: f9400040 ldr x0, [x2] 4f8: a9025bf5 stp x21, x22, [sp, #32] 4fc: d5384115 mrs x21, sp_el0 500: a9046bf9 stp x25, x26, [sp, #64] void *data = create->data; 504: a940667a ldp x26, x25, [x19] 508: 94000000 bl 0 <kmem_cache_alloc> 50c: 91008262 add x2, x19, #0x20 510: aa0003f4 mov x20, x0 514: d2800000 mov x0, #0x0 // #0 current->set_child_tid = (__force void __user *)kthread; 518: f90342b4 str x20, [x21, #1664] 51c: f9800051 prfm pstl1strm, [x2] 520: c85f7c56 ldxr x22, [x2] 524: c801fc40 stlxr w1, x0, [x2] 528: 35ffffc1 cbnz w1, 520 <kthread+0x48> 52c: d5033bbf dmb ish if (!done) { 530: a90363f7 stp x23, x24, [sp, #48] 534: f9002bfb str x27, [sp, #80] 538: b4000536 cbz x22, 5dc <kthread+0x104> if (!self) { 53c: b4000454 cbz x20, 5c4 <kthread+0xec> self->flags = 0; 540: f900029f str xzr, [x20] 544: 90000018 adrp x24, 0 <kthread_should_stop> self->data = data; 548: f9000a99 str x25, [x20, #16] 54c: 91000318 add x24, x24, #0x0 x->done = 0; 550: b9007a9f str wzr, [x20, #120] init_swait_queue_head(&x->wait); 554: 90000017 adrp x23, 0 <kthread_should_stop> 558: 910002f7 add x23, x23, #0x0 55c: aa1803e2 mov x2, x24 560: aa1703e1 mov x1, x23 564: 91020280 add x0, x20, #0x80 568: 94000000 bl 0 <__init_swait_queue_head> x->done = 0; 56c: b9001a9f str wzr, [x20, #24] init_swait_queue_head(&x->wait); 570: aa1803e2 mov x2, x24 574: aa1703e1 mov x1, x23 578: 91008280 add x0, x20, #0x20 57c: 94000000 bl 0 <__init_swait_queue_head> complete(done); 580: aa1603e0 mov x0, x22 __set_current_state(TASK_UNINTERRUPTIBLE); 584: d2800042 mov x2, #0x2 // #2 x->done = 0; 588: 9101e29b add x27, x20, #0x78 58c: 90000001 adrp x1, 0 <kthread_should_stop> 590: 91000021 add x1, x1, #0x0 594: f9000ea2 str x2, [x21, #24] current->vfork_done = &self->exited; 598: f9033ebb str x27, [x21, #1656] __set_current_state(TASK_UNINTERRUPTIBLE); 59c: f90b8aa1 str x1, [x21, #5904] create->result = current; 5a0: f9000e75 str x21, [x19, #24] complete(done); 5a4: 94000000 bl 0 <complete> schedule(); 5a8: 94000000 bl 0 <schedule> 5ac: f9400280 ldr x0, [x20] if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) { 5b0: 721f001f tst w0, #0x2 5b4: 540001c0 b.eq 5ec <kthread+0x114> // b.none ret = -EINTR; 5b8: 12800060 mov w0, #0xfffffffc // #-4 do_exit(ret); 5bc: 93407c00 sxtw x0, w0 5c0: 94000000 bl 0 <do_exit> create->result = ERR_PTR(-ENOMEM); 5c4: 92800174 mov x20, #0xfffffffffffffff4 // #-12 5c8: f9000e74 str x20, [x19, #24] complete(done); 5cc: aa1603e0 mov x0, x22 5d0: 94000000 bl 0 <complete> do_exit(-ENOMEM); 5d4: aa1403e0 mov x0, x20 5d8: 94000000 bl 0 <do_exit> kfree(create); 5dc: aa1303e0 mov x0, x19 5e0: 94000000 bl 0 <kfree> do_exit(-EINTR); 5e4: 92800060 mov x0, #0xfffffffffffffffc // #-4 5e8: 94000000 bl 0 <do_exit> { /* * This kthread finished initialization. The creator should have * set PF_NO_SETAFFINITY if this kthread should stay in the root. */ current->no_cgroup_migration = 0; 5ec: 3955a2a1 ldrb w1, [x21, #1384] __kthread_parkme(self); 5f0: aa1403e0 mov x0, x20 5f4: 121c7821 and w1, w1, #0xfffffff7 5f8: 3915a2a1 strb w1, [x21, #1384] 5fc: 97fffebd bl f0 <__kthread_parkme> ret = threadfn(data); 600: aa1903e0 mov x0, x25 604: d63f0340 blr x26 608: 17ffffed b 5bc <kthread+0xe4> 60c: d503201f nop 0000000000000610 <__kthread_init_worker>: { 610: a9bd7bfd stp x29, x30, [sp, #-48]! raw_spin_lock_init(&worker->lock); 614: 90000003 adrp x3, 0 <kthread_should_stop> 618: 91000063 add x3, x3, #0x0 { 61c: 910003fd mov x29, sp 620: a90153f3 stp x19, x20, [sp, #16] 624: aa0003f3 mov x19, x0 628: aa0103f4 mov x20, x1 62c: f90013f5 str x21, [sp, #32] raw_spin_lock_init(&worker->lock); 630: 91002000 add x0, x0, #0x8 memset(worker, 0, sizeof(struct kthread_worker)); 634: a9007e7f stp xzr, xzr, [x19] { 638: aa0203f5 mov x21, x2 raw_spin_lock_init(&worker->lock); 63c: 90000001 adrp x1, 0 <kthread_should_stop> memset(worker, 0, sizeof(struct kthread_worker)); 640: a9017e7f stp xzr, xzr, [x19, #16] raw_spin_lock_init(&worker->lock); 644: 91004062 add x2, x3, #0x10 648: 91000021 add x1, x1, #0x0 memset(worker, 0, sizeof(struct kthread_worker)); 64c: a9027e7f stp xzr, xzr, [x19, #32] 650: a9037e7f stp xzr, xzr, [x19, #48] 654: a9047e7f stp xzr, xzr, [x19, #64] 658: a9057e7f stp xzr, xzr, [x19, #80] 65c: a9067e7f stp xzr, xzr, [x19, #96] 660: a9077e7f stp xzr, xzr, [x19, #112] raw_spin_lock_init(&worker->lock); 664: 94000000 bl 0 <__raw_spin_lock_init> lockdep_set_class_and_name(&worker->lock, key, name); 668: aa1503e2 mov x2, x21 66c: aa1403e1 mov x1, x20 670: 91008260 add x0, x19, #0x20 674: 52800003 mov w3, #0x0 // #0 678: 94000000 bl 0 <lockdep_init_map> INIT_LIST_HEAD(&worker->work_list); 67c: 91014261 add x1, x19, #0x50 680: f9002a61 str x1, [x19, #80] INIT_LIST_HEAD(&worker->delayed_work_list); 684: 91018260 add x0, x19, #0x60 list->prev = list; 688: f9002e61 str x1, [x19, #88] 68c: f9003260 str x0, [x19, #96] 690: f9003660 str x0, [x19, #104] } 694: a94153f3 ldp x19, x20, [sp, #16] 698: f94013f5 ldr x21, [sp, #32] 69c: a8c37bfd ldp x29, x30, [sp], #48 6a0: d65f03c0 ret 6a4: d503201f nop 00000000000006a8 <kthread_worker_fn>: { 6a8: d10183ff sub sp, sp, #0x60 6ac: a9017bfd stp x29, x30, [sp, #16] 6b0: 910043fd add x29, sp, #0x10 6b4: a90253f3 stp x19, x20, [sp, #32] 6b8: aa0003f4 mov x20, x0 6bc: a9035bf5 stp x21, x22, [sp, #48] 6c0: a90463f7 stp x23, x24, [sp, #64] 6c4: a9056bf9 stp x25, x26, [sp, #80] 6c8: 9000001a adrp x26, 0 <__stack_chk_guard> 6cc: 9100035a add x26, x26, #0x0 6d0: f9400340 ldr x0, [x26] 6d4: f90007e0 str x0, [sp, #8] 6d8: d2800000 mov x0, #0x0 // #0 WARN_ON(worker->task && worker->task != current); 6dc: f9403a80 ldr x0, [x20, #112] 6e0: b40000a0 cbz x0, 6f4 <kthread_worker_fn+0x4c> 6e4: d5384101 mrs x1, sp_el0 6e8: eb01001f cmp x0, x1 6ec: 54000040 b.eq 6f4 <kthread_worker_fn+0x4c> // b.none 6f0: d4210000 brk #0x800 if (worker->flags & KTW_FREEZABLE) 6f4: b9400280 ldr w0, [x20] 6f8: d5384101 mrs x1, sp_el0 worker->task = current; 6fc: f9003a81 str x1, [x20, #112] if (worker->flags & KTW_FREEZABLE) 700: 37000da0 tbnz w0, #0, 8b4 <kthread_worker_fn+0x20c> 704: 90000017 adrp x23, 0 <kthread_should_stop> __READ_ONCE_SIZE; 708: 90000016 adrp x22, 0 <system_freezing_cnt> __set_current_state(TASK_RUNNING); 70c: 90000019 adrp x25, 0 <kthread_should_stop> 710: 910002f7 add x23, x23, #0x0 714: 910002d6 add x22, x22, #0x0 718: 91002295 add x21, x20, #0x8 71c: 91000339 add x25, x25, #0x0 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 720: d2800038 mov x24, #0x1 // #1 724: 1400000e b 75c <kthread_worker_fn+0xb4> * DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION * If try_to_freeze causes a lockdep warning it means the caller may deadlock */ static inline bool try_to_freeze_unsafe(void) { might_sleep(); 728: 52800721 mov w1, #0x39 // #57 72c: 90000000 adrp x0, 0 <kthread_should_stop> 730: 52800002 mov w2, #0x0 // #0 734: 91000000 add x0, x0, #0x0 738: 94000000 bl 0 <__might_sleep> __READ_ONCE_SIZE; 73c: b94002c1 ldr w1, [x22] 740: d5384100 mrs x0, sp_el0 if (likely(!atomic_read(&system_freezing_cnt))) 744: 35000621 cbnz w1, 808 <kthread_worker_fn+0x160> cond_resched(); 748: 90000000 adrp x0, 0 <kthread_should_stop> 74c: 52800002 mov w2, #0x0 // #0 750: 91000000 add x0, x0, #0x0 754: 528050e1 mov w1, #0x287 // #647 758: 94000000 bl 0 <___might_sleep> 75c: d5384100 mrs x0, sp_el0 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 760: f9000c18 str x24, [x0, #24] set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */ 764: f90b8817 str x23, [x0, #5904] 768: d5033bbf dmb ish WARN_ON(!(k->flags & PF_KTHREAD)); 76c: b9403401 ldr w1, [x0, #52] 770: 36a80941 tbz w1, #21, 898 <kthread_worker_fn+0x1f0> return (__force void *)k->set_child_tid; 774: f9434000 ldr x0, [x0, #1664] 778: f9400000 ldr x0, [x0] if (kthread_should_stop()) { 77c: 37080620 tbnz w0, #1, 840 <kthread_worker_fn+0x198> raw_spin_lock_irq(&worker->lock); 780: aa1503e0 mov x0, x21 784: 94000000 bl 0 <_raw_spin_lock_irq> __READ_ONCE_SIZE; 788: aa1403e0 mov x0, x20 78c: f8450c01 ldr x1, [x0, #80]! if (!list_empty(&worker->work_list)) { 790: eb01001f cmp x0, x1 794: 54000460 b.eq 820 <kthread_worker_fn+0x178> // b.none work = list_first_entry(&worker->work_list, 798: f9402a93 ldr x19, [x20, #80] raw_spin_unlock_irq(&worker->lock); 79c: aa1503e0 mov x0, x21 static inline void __list_del_entry(struct list_head *entry) { if (!__list_del_entry_valid(entry)) return; __list_del(entry->prev, entry->next); 7a0: a9400662 ldp x2, x1, [x19] next->prev = prev; 7a4: f9000441 str x1, [x2, #8] case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 7a8: f9000022 str x2, [x1] 7ac: f9000273 str x19, [x19] list->prev = list; 7b0: f9000673 str x19, [x19, #8] worker->current_work = work; 7b4: f9003e93 str x19, [x20, #120] raw_spin_unlock_irq(&worker->lock); 7b8: 94000000 bl 0 <_raw_spin_unlock_irq> if (work) { 7bc: b4000393 cbz x19, 82c <kthread_worker_fn+0x184> 7c0: d5384101 mrs x1, sp_el0 __set_current_state(TASK_RUNNING); 7c4: f9000c3f str xzr, [x1, #24] work->func(work); 7c8: aa1303e0 mov x0, x19 __set_current_state(TASK_RUNNING); 7cc: f90b8839 str x25, [x1, #5904] work->func(work); 7d0: f9400a61 ldr x1, [x19, #16] 7d4: d63f0020 blr x1 7d8: d5384100 mrs x0, sp_el0 return __refrigerator(false); } static inline bool try_to_freeze(void) { if (!(current->flags & PF_NOFREEZE)) 7dc: b9403400 ldr w0, [x0, #52] 7e0: 377ffa40 tbnz w0, #15, 728 <kthread_worker_fn+0x80> debug_check_no_locks_held(); 7e4: 94000000 bl 0 <debug_check_no_locks_held> might_sleep(); 7e8: 52800721 mov w1, #0x39 // #57 7ec: 90000000 adrp x0, 0 <kthread_should_stop> 7f0: 52800002 mov w2, #0x0 // #0 7f4: 91000000 add x0, x0, #0x0 7f8: 94000000 bl 0 <__might_sleep> __READ_ONCE_SIZE; 7fc: b94002c1 ldr w1, [x22] 800: d5384100 mrs x0, sp_el0 if (likely(!atomic_read(&system_freezing_cnt))) 804: 34fffa21 cbz w1, 748 <kthread_worker_fn+0xa0> return freezing_slow_path(p); 808: 94000000 bl 0 <freezing_slow_path> if (likely(!freezing(current))) 80c: 72001c1f tst w0, #0xff 810: 54fff9c0 b.eq 748 <kthread_worker_fn+0xa0> // b.none return __refrigerator(false); 814: 52800000 mov w0, #0x0 // #0 818: 94000000 bl 0 <__refrigerator> 81c: 17ffffcb b 748 <kthread_worker_fn+0xa0> worker->current_work = work; 820: f9003e9f str xzr, [x20, #120] raw_spin_unlock_irq(&worker->lock); 824: aa1503e0 mov x0, x21 828: 94000000 bl 0 <_raw_spin_unlock_irq> 82c: b94002c1 ldr w1, [x22] 830: d5384100 mrs x0, sp_el0 if (likely(!atomic_read(&system_freezing_cnt))) 834: 35000361 cbnz w1, 8a0 <kthread_worker_fn+0x1f8> schedule(); 838: 94000000 bl 0 <schedule> 83c: 17ffffe7 b 7d8 <kthread_worker_fn+0x130> __set_current_state(TASK_RUNNING); 840: 90000001 adrp x1, 0 <kthread_should_stop> 844: 91000021 add x1, x1, #0x0 848: d5384102 mrs x2, sp_el0 84c: f9000c5f str xzr, [x2, #24] raw_spin_lock_irq(&worker->lock); 850: aa1503e0 mov x0, x21 __set_current_state(TASK_RUNNING); 854: f90b8841 str x1, [x2, #5904] raw_spin_lock_irq(&worker->lock); 858: 94000000 bl 0 <_raw_spin_lock_irq> worker->task = NULL; 85c: f9003a9f str xzr, [x20, #112] raw_spin_unlock_irq(&worker->lock); 860: aa1503e0 mov x0, x21 864: 94000000 bl 0 <_raw_spin_unlock_irq> } 868: f94007e0 ldr x0, [sp, #8] 86c: f9400341 ldr x1, [x26] 870: ca010001 eor x1, x0, x1 874: 52800000 mov w0, #0x0 // #0 878: b5000221 cbnz x1, 8bc <kthread_worker_fn+0x214> 87c: a9417bfd ldp x29, x30, [sp, #16] 880: a94253f3 ldp x19, x20, [sp, #32] 884: a9435bf5 ldp x21, x22, [sp, #48] 888: a94463f7 ldp x23, x24, [sp, #64] 88c: a9456bf9 ldp x25, x26, [sp, #80] 890: 910183ff add sp, sp, #0x60 894: d65f03c0 ret WARN_ON(!(k->flags & PF_KTHREAD)); 898: d4210000 brk #0x800 89c: 17ffffb6 b 774 <kthread_worker_fn+0xcc> return freezing_slow_path(p); 8a0: 94000000 bl 0 <freezing_slow_path> } else if (!freezing(current)) 8a4: 72001c1f tst w0, #0xff 8a8: 54fff981 b.ne 7d8 <kthread_worker_fn+0x130> // b.any schedule(); 8ac: 94000000 bl 0 <schedule> 8b0: 17ffffca b 7d8 <kthread_worker_fn+0x130> set_freezable(); 8b4: 94000000 bl 0 <set_freezable> 8b8: 17ffff93 b 704 <kthread_worker_fn+0x5c> } 8bc: 94000000 bl 0 <__stack_chk_fail> 00000000000008c0 <__kthread_cancel_work>: * Return: %true if @work was pending and successfully canceled, * %false if @work was not pending */ static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork, unsigned long *flags) { 8c0: a9bd7bfd stp x29, x30, [sp, #-48]! /* Try to cancel the timer if exists. */ if (is_dwork) { 8c4: 72001c3f tst w1, #0xff { 8c8: 910003fd mov x29, sp 8cc: a90153f3 stp x19, x20, [sp, #16] 8d0: aa0003f3 mov x19, x0 if (is_dwork) { 8d4: 540001c1 b.ne 90c <__kthread_cancel_work+0x4c> // b.any 8d8: f9400261 ldr x1, [x19] if (!list_empty(&work->node)) { list_del_init(&work->node); return true; } return false; 8dc: 52800000 mov w0, #0x0 // #0 if (!list_empty(&work->node)) { 8e0: eb01027f cmp x19, x1 8e4: 540000e0 b.eq 900 <__kthread_cancel_work+0x40> // b.none __list_del(entry->prev, entry->next); 8e8: a9400662 ldp x2, x1, [x19] next->prev = prev; 8ec: f9000441 str x1, [x2, #8] return true; 8f0: 52800020 mov w0, #0x1 // #1 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 8f4: f9000022 str x2, [x1] 8f8: f9000273 str x19, [x19] list->prev = list; 8fc: f9000673 str x19, [x19, #8] } 900: a94153f3 ldp x19, x20, [sp, #16] 904: a8c37bfd ldp x29, x30, [sp], #48 908: d65f03c0 ret struct kthread_worker *worker = work->worker; 90c: f90013f5 str x21, [sp, #32] 910: aa0203f4 mov x20, x2 work->canceling++; 914: b9402000 ldr w0, [x0, #32] struct kthread_worker *worker = work->worker; 918: f9400e75 ldr x21, [x19, #24] work->canceling++; 91c: 11000400 add w0, w0, #0x1 920: b9002260 str w0, [x19, #32] raw_spin_unlock_irqrestore(&worker->lock, *flags); 924: 910022b5 add x21, x21, #0x8 928: f9400041 ldr x1, [x2] 92c: aa1503e0 mov x0, x21 930: 94000000 bl 0 <_raw_spin_unlock_irqrestore> del_timer_sync(&dwork->timer); 934: 9100a260 add x0, x19, #0x28 938: 94000000 bl 0 <del_timer_sync> raw_spin_lock_irqsave(&worker->lock, *flags); 93c: aa1503e0 mov x0, x21 940: 94000000 bl 0 <_raw_spin_lock_irqsave> 944: f9000280 str x0, [x20] work->canceling--; 948: b9402260 ldr w0, [x19, #32] 94c: 51000400 sub w0, w0, #0x1 950: b9002260 str w0, [x19, #32] 954: f94013f5 ldr x21, [sp, #32] 958: 17ffffe0 b 8d8 <__kthread_cancel_work+0x18> 95c: d503201f nop 0000000000000960 <kthread_insert_work_sanity_check>: { 960: a9be7bfd stp x29, x30, [sp, #-32]! lockdep_assert_held(&worker->lock); 964: 90000002 adrp x2, 0 <debug_locks> { 968: 910003fd mov x29, sp 96c: a90153f3 stp x19, x20, [sp, #16] 970: aa0003f4 mov x20, x0 lockdep_assert_held(&worker->lock); 974: b9400040 ldr w0, [x2] { 978: aa0103f3 mov x19, x1 lockdep_assert_held(&worker->lock); 97c: 35000180 cbnz w0, 9ac <kthread_insert_work_sanity_check+0x4c> __READ_ONCE_SIZE; 980: f9400260 ldr x0, [x19] WARN_ON_ONCE(!list_empty(&work->node)); 984: eb00027f cmp x19, x0 988: 54000241 b.ne 9d0 <kthread_insert_work_sanity_check+0x70> // b.any WARN_ON_ONCE(work->worker && work->worker != worker); 98c: f9400e60 ldr x0, [x19, #24] 990: f100001f cmp x0, #0x0 994: fa541004 ccmp x0, x20, #0x4, ne // ne = any 998: 54000040 b.eq 9a0 <kthread_insert_work_sanity_check+0x40> // b.none 99c: d4210000 brk #0x800 } 9a0: a94153f3 ldp x19, x20, [sp, #16] 9a4: a8c27bfd ldp x29, x30, [sp], #32 9a8: d65f03c0 ret */ extern int lock_is_held_type(struct lockdep_map *lock, int read); static inline int lock_is_held(struct lockdep_map *lock) { return lock_is_held_type(lock, -1); 9ac: 12800001 mov w1, #0xffffffff // #-1 9b0: 91008280 add x0, x20, #0x20 9b4: 94000000 bl 0 <lock_is_held_type> lockdep_assert_held(&worker->lock); 9b8: 35fffe40 cbnz w0, 980 <kthread_insert_work_sanity_check+0x20> 9bc: d4210000 brk #0x800 9c0: f9400260 ldr x0, [x19] WARN_ON_ONCE(!list_empty(&work->node)); 9c4: eb00027f cmp x19, x0 9c8: 54fffe20 b.eq 98c <kthread_insert_work_sanity_check+0x2c> // b.none 9cc: d503201f nop 9d0: d4210000 brk #0x800 9d4: 17ffffee b 98c <kthread_insert_work_sanity_check+0x2c> 00000000000009d8 <kthread_insert_work>: { 9d8: a9bd7bfd stp x29, x30, [sp, #-48]! 9dc: 910003fd mov x29, sp 9e0: a90153f3 stp x19, x20, [sp, #16] 9e4: aa0203f4 mov x20, x2 9e8: aa0103f3 mov x19, x1 9ec: f90013f5 str x21, [sp, #32] 9f0: aa0003f5 mov x21, x0 kthread_insert_work_sanity_check(worker, work); 9f4: 97ffffdb bl 960 <kthread_insert_work_sanity_check> __list_add(new, head->prev, head); 9f8: f9400680 ldr x0, [x20, #8] next->prev = new; 9fc: f9000693 str x19, [x20, #8] new->prev = prev; a00: a9000274 stp x20, x0, [x19] case 8: *(volatile __u64 *)p = *(__u64 *)res; break; a04: f9000013 str x19, [x0] work->worker = worker; a08: f9000e75 str x21, [x19, #24] if (!worker->current_work && likely(worker->task)) a0c: f9403ea0 ldr x0, [x21, #120] a10: b40000a0 cbz x0, a24 <kthread_insert_work+0x4c> } a14: a94153f3 ldp x19, x20, [sp, #16] a18: f94013f5 ldr x21, [sp, #32] a1c: a8c37bfd ldp x29, x30, [sp], #48 a20: d65f03c0 ret if (!worker->current_work && likely(worker->task)) a24: f9403aa0 ldr x0, [x21, #112] a28: b4ffff60 cbz x0, a14 <kthread_insert_work+0x3c> wake_up_process(worker->task); a2c: 94000000 bl 0 <wake_up_process> } a30: a94153f3 ldp x19, x20, [sp, #16] a34: f94013f5 ldr x21, [sp, #32] a38: a8c37bfd ldp x29, x30, [sp], #48 a3c: d65f03c0 ret 0000000000000a40 <kthread_queue_work>: { a40: a9bc7bfd stp x29, x30, [sp, #-64]! a44: 910003fd mov x29, sp a48: a90153f3 stp x19, x20, [sp, #16] raw_spin_lock_irqsave(&worker->lock, flags); a4c: 91002014 add x20, x0, #0x8 { a50: aa0103f3 mov x19, x1 a54: a9025bf5 stp x21, x22, [sp, #32] a58: aa0003f5 mov x21, x0 raw_spin_lock_irqsave(&worker->lock, flags); a5c: aa1403e0 mov x0, x20 { a60: f9001bf7 str x23, [sp, #48] raw_spin_lock_irqsave(&worker->lock, flags); a64: 94000000 bl 0 <_raw_spin_lock_irqsave> a68: aa0003f7 mov x23, x0 lockdep_assert_held(&worker->lock); a6c: 90000000 adrp x0, 0 <debug_locks> a70: b9400000 ldr w0, [x0] a74: 350001c0 cbnz w0, aac <kthread_queue_work+0x6c> __READ_ONCE_SIZE; a78: f9400260 ldr x0, [x19] bool ret = false; a7c: 52800016 mov w22, #0x0 // #0 return !list_empty(&work->node) || work->canceling; a80: eb00027f cmp x19, x0 a84: 54000260 b.eq ad0 <kthread_queue_work+0x90> // b.none raw_spin_unlock_irqrestore(&worker->lock, flags); a88: aa1703e1 mov x1, x23 a8c: aa1403e0 mov x0, x20 a90: 94000000 bl 0 <_raw_spin_unlock_irqrestore> } a94: 2a1603e0 mov w0, w22 a98: a94153f3 ldp x19, x20, [sp, #16] a9c: a9425bf5 ldp x21, x22, [sp, #32] aa0: f9401bf7 ldr x23, [sp, #48] aa4: a8c47bfd ldp x29, x30, [sp], #64 aa8: d65f03c0 ret aac: 12800001 mov w1, #0xffffffff // #-1 ab0: 910082a0 add x0, x21, #0x20 ab4: 94000000 bl 0 <lock_is_held_type> lockdep_assert_held(&worker->lock); ab8: 35fffe00 cbnz w0, a78 <kthread_queue_work+0x38> abc: d4210000 brk #0x800 ac0: f9400260 ldr x0, [x19] bool ret = false; ac4: 52800016 mov w22, #0x0 // #0 return !list_empty(&work->node) || work->canceling; ac8: eb00027f cmp x19, x0 acc: 54fffde1 b.ne a88 <kthread_queue_work+0x48> // b.any ad0: b9402260 ldr w0, [x19, #32] ad4: 35fffda0 cbnz w0, a88 <kthread_queue_work+0x48> kthread_insert_work(worker, work, &worker->work_list); ad8: aa1303e1 mov x1, x19 adc: 910142a2 add x2, x21, #0x50 ae0: aa1503e0 mov x0, x21 ret = true; ae4: 52800036 mov w22, #0x1 // #1 kthread_insert_work(worker, work, &worker->work_list); ae8: 97ffffbc bl 9d8 <kthread_insert_work> ret = true; aec: 17ffffe7 b a88 <kthread_queue_work+0x48> 0000000000000af0 <kthread_flush_worker>: * * Wait until all currently executing or pending works on @worker are * finished. */ void kthread_flush_worker(struct kthread_worker *worker) { af0: d102c3ff sub sp, sp, #0xb0 struct kthread_flush_work fwork = { af4: 90000003 adrp x3, 0 <kthread_should_stop> af8: 910003e4 mov x4, sp afc: 91000063 add x3, x3, #0x0 init_swait_queue_head(&x->wait); b00: 90000002 adrp x2, 0 <kthread_should_stop> b04: 90000001 adrp x1, 0 <kthread_should_stop> { b08: a9097bfd stp x29, x30, [sp, #144] b0c: 910243fd add x29, sp, #0x90 b10: 91000042 add x2, x2, #0x0 b14: 91000021 add x1, x1, #0x0 b18: a90a53f3 stp x19, x20, [sp, #160] b1c: 90000013 adrp x19, 0 <__stack_chk_guard> b20: 91000273 add x19, x19, #0x0 b24: aa0003f4 mov x20, x0 b28: f9400260 ldr x0, [x19] b2c: f90047e0 str x0, [sp, #136] b30: d2800000 mov x0, #0x0 // #0 b34: 9100c3e0 add x0, sp, #0x30 struct kthread_flush_work fwork = { b38: a90013e4 stp x4, x4, [sp] b3c: f9000be3 str x3, [sp, #16] b40: a901ffff stp xzr, xzr, [sp, #24] b44: a902ffff stp xzr, xzr, [sp, #40] b48: a903ffff stp xzr, xzr, [sp, #56] b4c: a904ffff stp xzr, xzr, [sp, #72] b50: a905ffff stp xzr, xzr, [sp, #88] b54: a906ffff stp xzr, xzr, [sp, #104] b58: a907ffff stp xzr, xzr, [sp, #120] b5c: 94000000 bl 0 <__init_swait_queue_head> KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), COMPLETION_INITIALIZER_ONSTACK(fwork.done), }; kthread_queue_work(worker, &fwork.work); b60: 910003e1 mov x1, sp b64: aa1403e0 mov x0, x20 b68: 94000000 bl a40 <kthread_queue_work> wait_for_completion(&fwork.done); b6c: 9100a3e0 add x0, sp, #0x28 b70: 94000000 bl 0 <wait_for_completion> } b74: f94047e1 ldr x1, [sp, #136] b78: f9400260 ldr x0, [x19] b7c: ca000020 eor x0, x1, x0 b80: b50000a0 cbnz x0, b94 <kthread_flush_worker+0xa4> b84: a9497bfd ldp x29, x30, [sp, #144] b88: a94a53f3 ldp x19, x20, [sp, #160] b8c: 9102c3ff add sp, sp, #0xb0 b90: d65f03c0 ret b94: 94000000 bl 0 <__stack_chk_fail> 0000000000000b98 <kthread_delayed_work_timer_fn>: { b98: a9bd7bfd stp x29, x30, [sp, #-48]! b9c: 910003fd mov x29, sp ba0: a90153f3 stp x19, x20, [sp, #16] struct kthread_worker *worker = work->worker; ba4: f9400c14 ldr x20, [x0, #24] if (WARN_ON_ONCE(!worker)) ba8: b4000374 cbz x20, c14 <kthread_delayed_work_timer_fn+0x7c> bac: aa0003f3 mov x19, x0 raw_spin_lock(&worker->lock); bb0: f90013f5 str x21, [sp, #32] bb4: 91002295 add x21, x20, #0x8 bb8: aa1503e0 mov x0, x21 bbc: 94000000 bl 0 <_raw_spin_lock> WARN_ON_ONCE(work->worker != worker); bc0: f9400e60 ldr x0, [x19, #24] bc4: eb14001f cmp x0, x20 bc8: 540002a1 b.ne c1c <kthread_delayed_work_timer_fn+0x84> // b.any bcc: f9400260 ldr x0, [x19] WARN_ON_ONCE(list_empty(&work->node)); bd0: eb00027f cmp x19, x0 bd4: 54000280 b.eq c24 <kthread_delayed_work_timer_fn+0x8c> // b.none __list_del(entry->prev, entry->next); bd8: a9400e64 ldp x4, x3, [x19] next->prev = prev; bdc: f9000483 str x3, [x4, #8] kthread_insert_work(worker, work, &worker->work_list); be0: aa1303e1 mov x1, x19 be4: 91014282 add x2, x20, #0x50 be8: aa1403e0 mov x0, x20 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; bec: f9000064 str x4, [x3] bf0: f9000273 str x19, [x19] list->prev = list; bf4: f9000673 str x19, [x19, #8] bf8: 97ffff78 bl 9d8 <kthread_insert_work> raw_spin_unlock(&worker->lock); bfc: aa1503e0 mov x0, x21 c00: 94000000 bl 0 <_raw_spin_unlock> c04: f94013f5 ldr x21, [sp, #32] } c08: a94153f3 ldp x19, x20, [sp, #16] c0c: a8c37bfd ldp x29, x30, [sp], #48 c10: d65f03c0 ret if (WARN_ON_ONCE(!worker)) c14: d4210000 brk #0x800 c18: 17fffffc b c08 <kthread_delayed_work_timer_fn+0x70> WARN_ON_ONCE(work->worker != worker); c1c: d4210000 brk #0x800 c20: 17ffffeb b bcc <kthread_delayed_work_timer_fn+0x34> WARN_ON_ONCE(list_empty(&work->node)); c24: d4210000 brk #0x800 c28: 17ffffec b bd8 <kthread_delayed_work_timer_fn+0x40> c2c: d503201f nop 0000000000000c30 <kthread_flush_work>: { c30: d10303ff sub sp, sp, #0xc0 struct kthread_flush_work fwork = { c34: 90000003 adrp x3, 0 <kthread_should_stop> c38: 910003e4 mov x4, sp c3c: 91000063 add x3, x3, #0x0 c40: 90000002 adrp x2, 0 <kthread_should_stop> c44: 90000001 adrp x1, 0 <kthread_should_stop> { c48: a9097bfd stp x29, x30, [sp, #144] c4c: 910243fd add x29, sp, #0x90 c50: 91000042 add x2, x2, #0x0 c54: 91000021 add x1, x1, #0x0 c58: a90a53f3 stp x19, x20, [sp, #160] c5c: aa0003f4 mov x20, x0 c60: 90000013 adrp x19, 0 <__stack_chk_guard> c64: 91000273 add x19, x19, #0x0 struct kthread_flush_work fwork = { c68: a90013e4 stp x4, x4, [sp] { c6c: f9400260 ldr x0, [x19] c70: f90047e0 str x0, [sp, #136] c74: d2800000 mov x0, #0x0 // #0 c78: 9100c3e0 add x0, sp, #0x30 struct kthread_flush_work fwork = { c7c: f9000be3 str x3, [sp, #16] c80: a901ffff stp xzr, xzr, [sp, #24] c84: a902ffff stp xzr, xzr, [sp, #40] c88: a903ffff stp xzr, xzr, [sp, #56] c8c: a904ffff stp xzr, xzr, [sp, #72] c90: a905ffff stp xzr, xzr, [sp, #88] c94: a906ffff stp xzr, xzr, [sp, #104] c98: a907ffff stp xzr, xzr, [sp, #120] { c9c: a90b5bf5 stp x21, x22, [sp, #176] ca0: 94000000 bl 0 <__init_swait_queue_head> worker = work->worker; ca4: f9400e95 ldr x21, [x20, #24] if (!worker) ca8: b4000255 cbz x21, cf0 <kthread_flush_work+0xc0> raw_spin_lock_irq(&worker->lock); cac: 910022b6 add x22, x21, #0x8 cb0: aa1603e0 mov x0, x22 cb4: 94000000 bl 0 <_raw_spin_lock_irq> WARN_ON_ONCE(work->worker != worker); cb8: f9400e80 ldr x0, [x20, #24] cbc: eb15001f cmp x0, x21 cc0: 54000361 b.ne d2c <kthread_flush_work+0xfc> // b.any __READ_ONCE_SIZE; cc4: f9400280 ldr x0, [x20] if (!list_empty(&work->node)) cc8: eb00029f cmp x20, x0 ccc: 54000240 b.eq d14 <kthread_flush_work+0xe4> // b.none kthread_insert_work(worker, &fwork.work, work->node.next); cd0: f9400282 ldr x2, [x20] kthread_insert_work(worker, &fwork.work, cd4: 910003e1 mov x1, sp cd8: aa1503e0 mov x0, x21 cdc: 97ffff3f bl 9d8 <kthread_insert_work> raw_spin_unlock_irq(&worker->lock); ce0: aa1603e0 mov x0, x22 ce4: 94000000 bl 0 <_raw_spin_unlock_irq> wait_for_completion(&fwork.done); ce8: 9100a3e0 add x0, sp, #0x28 cec: 94000000 bl 0 <wait_for_completion> } cf0: f94047e1 ldr x1, [sp, #136] cf4: f9400260 ldr x0, [x19] cf8: ca000020 eor x0, x1, x0 cfc: b5000200 cbnz x0, d3c <kthread_flush_work+0x10c> d00: a9497bfd ldp x29, x30, [sp, #144] d04: a94a53f3 ldp x19, x20, [sp, #160] d08: a94b5bf5 ldp x21, x22, [sp, #176] d0c: 910303ff add sp, sp, #0xc0 d10: d65f03c0 ret else if (worker->current_work == work) d14: f9403ea0 ldr x0, [x21, #120] d18: eb14001f cmp x0, x20 d1c: 540000c0 b.eq d34 <kthread_flush_work+0x104> // b.none raw_spin_unlock_irq(&worker->lock); d20: aa1603e0 mov x0, x22 d24: 94000000 bl 0 <_raw_spin_unlock_irq> if (!noop) d28: 17fffff2 b cf0 <kthread_flush_work+0xc0> WARN_ON_ONCE(work->worker != worker); d2c: d4210000 brk #0x800 d30: 17ffffe5 b cc4 <kthread_flush_work+0x94> kthread_insert_work(worker, &fwork.work, d34: f9402aa2 ldr x2, [x21, #80] d38: 17ffffe7 b cd4 <kthread_flush_work+0xa4> } d3c: 94000000 bl 0 <__stack_chk_fail> 0000000000000d40 <__kthread_cancel_work_sync>: { d40: d10143ff sub sp, sp, #0x50 d44: a9017bfd stp x29, x30, [sp, #16] d48: 910043fd add x29, sp, #0x10 d4c: a90253f3 stp x19, x20, [sp, #32] d50: aa0003f3 mov x19, x0 d54: 90000014 adrp x20, 0 <__stack_chk_guard> d58: a9035bf5 stp x21, x22, [sp, #48] d5c: 91000294 add x20, x20, #0x0 d60: f9400280 ldr x0, [x20] d64: f90007e0 str x0, [sp, #8] d68: d2800000 mov x0, #0x0 // #0 struct kthread_worker *worker = work->worker; d6c: f9400e75 ldr x21, [x19, #24] if (!worker) d70: 52800016 mov w22, #0x0 // #0 d74: b40002d5 cbz x21, dcc <__kthread_cancel_work_sync+0x8c> raw_spin_lock_irqsave(&worker->lock, flags); d78: a90463f7 stp x23, x24, [sp, #64] d7c: 910022b7 add x23, x21, #0x8 d80: 12001c38 and w24, w1, #0xff d84: aa1703e0 mov x0, x23 d88: 94000000 bl 0 <_raw_spin_lock_irqsave> WARN_ON_ONCE(work->worker != worker); d8c: f9400e61 ldr x1, [x19, #24] raw_spin_lock_irqsave(&worker->lock, flags); d90: f90003e0 str x0, [sp] WARN_ON_ONCE(work->worker != worker); d94: eb15003f cmp x1, x21 d98: 540004c1 b.ne e30 <__kthread_cancel_work_sync+0xf0> // b.any ret = __kthread_cancel_work(work, is_dwork, &flags); d9c: 2a1803e1 mov w1, w24 da0: 910003e2 mov x2, sp da4: aa1303e0 mov x0, x19 da8: 97fffec6 bl 8c0 <__kthread_cancel_work> if (worker->current_work != work) dac: f9403ea1 ldr x1, [x21, #120] ret = __kthread_cancel_work(work, is_dwork, &flags); db0: 12001c16 and w22, w0, #0xff if (worker->current_work != work) db4: eb13003f cmp x1, x19 db8: f94003e1 ldr x1, [sp] dbc: 540001c0 b.eq df4 <__kthread_cancel_work_sync+0xb4> // b.none raw_spin_unlock_irqrestore(&worker->lock, flags); dc0: aa1703e0 mov x0, x23 dc4: 94000000 bl 0 <_raw_spin_unlock_irqrestore> dc8: a94463f7 ldp x23, x24, [sp, #64] } dcc: 2a1603e0 mov w0, w22 dd0: f94007e2 ldr x2, [sp, #8] dd4: f9400281 ldr x1, [x20] dd8: ca010041 eor x1, x2, x1 ddc: b50002e1 cbnz x1, e38 <__kthread_cancel_work_sync+0xf8> de0: a9417bfd ldp x29, x30, [sp, #16] de4: a94253f3 ldp x19, x20, [sp, #32] de8: a9435bf5 ldp x21, x22, [sp, #48] dec: 910143ff add sp, sp, #0x50 df0: d65f03c0 ret work->canceling++; df4: b9402262 ldr w2, [x19, #32] raw_spin_unlock_irqrestore(&worker->lock, flags); df8: aa1703e0 mov x0, x23 work->canceling++; dfc: 11000442 add w2, w2, #0x1 e00: b9002262 str w2, [x19, #32] raw_spin_unlock_irqrestore(&worker->lock, flags); e04: 94000000 bl 0 <_raw_spin_unlock_irqrestore> kthread_flush_work(work); e08: aa1303e0 mov x0, x19 e0c: 94000000 bl c30 <kthread_flush_work> raw_spin_lock_irqsave(&worker->lock, flags); e10: aa1703e0 mov x0, x23 e14: 94000000 bl 0 <_raw_spin_lock_irqsave> e18: aa0003e1 mov x1, x0 work->canceling--; e1c: b9402260 ldr w0, [x19, #32] raw_spin_lock_irqsave(&worker->lock, flags); e20: f90003e1 str x1, [sp] work->canceling--; e24: 51000400 sub w0, w0, #0x1 e28: b9002260 str w0, [x19, #32] e2c: 17ffffe5 b dc0 <__kthread_cancel_work_sync+0x80> WARN_ON_ONCE(work->worker != worker); e30: d4210000 brk #0x800 e34: 17ffffda b d9c <__kthread_cancel_work_sync+0x5c> e38: a90463f7 stp x23, x24, [sp, #64] } e3c: 94000000 bl 0 <__stack_chk_fail> 0000000000000e40 <kthread_cancel_work_sync>: { e40: a9bf7bfd stp x29, x30, [sp, #-16]! return __kthread_cancel_work_sync(work, false); e44: 52800001 mov w1, #0x0 // #0 { e48: 910003fd mov x29, sp return __kthread_cancel_work_sync(work, false); e4c: 97ffffbd bl d40 <__kthread_cancel_work_sync> } e50: a8c17bfd ldp x29, x30, [sp], #16 e54: d65f03c0 ret 0000000000000e58 <kthread_cancel_delayed_work_sync>: { e58: a9bf7bfd stp x29, x30, [sp, #-16]! return __kthread_cancel_work_sync(&dwork->work, true); e5c: 52800021 mov w1, #0x1 // #1 { e60: 910003fd mov x29, sp return __kthread_cancel_work_sync(&dwork->work, true); e64: 97ffffb7 bl d40 <__kthread_cancel_work_sync> } e68: a8c17bfd ldp x29, x30, [sp], #16 e6c: d65f03c0 ret 0000000000000e70 <__kthread_bind_mask>: { e70: a9bd7bfd stp x29, x30, [sp, #-48]! e74: 910003fd mov x29, sp e78: a90153f3 stp x19, x20, [sp, #16] e7c: aa0103f4 mov x20, x1 e80: aa0003f3 mov x19, x0 if (!wait_task_inactive(p, state)) { e84: aa0203e1 mov x1, x2 e88: 94000000 bl 0 <wait_task_inactive> e8c: b50000a0 cbnz x0, ea0 <__kthread_bind_mask+0x30> WARN_ON(1); e90: d4210000 brk #0x800 } e94: a94153f3 ldp x19, x20, [sp, #16] e98: a8c37bfd ldp x29, x30, [sp], #48 e9c: d65f03c0 ret raw_spin_lock_irqsave(&p->pi_lock, flags); ea0: a9025bf5 stp x21, x22, [sp, #32] ea4: 91278275 add x21, x19, #0x9e0 ea8: aa1503e0 mov x0, x21 eac: 94000000 bl 0 <_raw_spin_lock_irqsave> eb0: aa0003f6 mov x22, x0 do_set_cpus_allowed(p, mask); eb4: aa1403e1 mov x1, x20 eb8: aa1303e0 mov x0, x19 ebc: 94000000 bl 0 <do_set_cpus_allowed> p->flags |= PF_NO_SETAFFINITY; ec0: b9403662 ldr w2, [x19, #52] raw_spin_unlock_irqrestore(&p->pi_lock, flags); ec4: aa1603e1 mov x1, x22 ec8: aa1503e0 mov x0, x21 p->flags |= PF_NO_SETAFFINITY; ecc: 32060042 orr w2, w2, #0x4000000 ed0: b9003662 str w2, [x19, #52] raw_spin_unlock_irqrestore(&p->pi_lock, flags); ed4: 94000000 bl 0 <_raw_spin_unlock_irqrestore> } ed8: a94153f3 ldp x19, x20, [sp, #16] raw_spin_unlock_irqrestore(&p->pi_lock, flags); edc: a9425bf5 ldp x21, x22, [sp, #32] } ee0: a8c37bfd ldp x29, x30, [sp], #48 ee4: d65f03c0 ret 0000000000000ee8 <kthread_bind>: extern const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; static inline const struct cpumask *get_cpu_mask(unsigned int cpu) { const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; ee8: 92401422 and x2, x1, #0x3f { eec: a9bf7bfd stp x29, x30, [sp, #-16]! p -= cpu / BITS_PER_LONG; ef0: 91000442 add x2, x2, #0x1 ef4: 90000003 adrp x3, 0 <cpu_bit_bitmap> ef8: 53067c21 lsr w1, w1, #6 efc: 91000063 add x3, x3, #0x0 f00: cb010041 sub x1, x2, x1 f04: 910003fd mov x29, sp __kthread_bind_mask(p, cpumask_of(cpu), state); f08: d2800042 mov x2, #0x2 // #2 f0c: 8b010c61 add x1, x3, x1, lsl #3 f10: 97ffffd8 bl e70 <__kthread_bind_mask> } f14: a8c17bfd ldp x29, x30, [sp], #16 f18: d65f03c0 ret f1c: d503201f nop 0000000000000f20 <kthread_unpark>: { f20: a9be7bfd stp x29, x30, [sp, #-32]! f24: 910003fd mov x29, sp f28: a90153f3 stp x19, x20, [sp, #16] f2c: aa0003f3 mov x19, x0 WARN_ON(!(k->flags & PF_KTHREAD)); f30: b9403400 ldr w0, [x0, #52] f34: 36a80400 tbz w0, #21, fb4 <kthread_unpark+0x94> return (__force void *)k->set_child_tid; f38: f9434274 ldr x20, [x19, #1664] clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); f3c: 52800040 mov w0, #0x2 // #2 f40: aa1403e1 mov x1, x20 f44: 94000000 bl 0 <clear_bit> if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) { f48: aa1403e1 mov x1, x20 f4c: 52800060 mov w0, #0x3 // #3 f50: 94000000 bl 0 <test_and_clear_bit> f54: 340000c0 cbz w0, f6c <kthread_unpark+0x4c> f58: f9400280 ldr x0, [x20] if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) f5c: 370000e0 tbnz w0, #0, f78 <kthread_unpark+0x58> wake_up_state(k, TASK_PARKED); f60: aa1303e0 mov x0, x19 f64: 52800801 mov w1, #0x40 // #64 f68: 94000000 bl 0 <wake_up_state> } f6c: a94153f3 ldp x19, x20, [sp, #16] f70: a8c27bfd ldp x29, x30, [sp], #32 f74: d65f03c0 ret __kthread_bind(k, kthread->cpu, TASK_PARKED); f78: b9400a84 ldr w4, [x20, #8] f7c: 90000005 adrp x5, 0 <cpu_bit_bitmap> f80: 910000a5 add x5, x5, #0x0 __kthread_bind_mask(p, cpumask_of(cpu), state); f84: d2800802 mov x2, #0x40 // #64 const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; f88: 92401483 and x3, x4, #0x3f f8c: aa1303e0 mov x0, x19 p -= cpu / BITS_PER_LONG; f90: 91000463 add x3, x3, #0x1 f94: 53067c84 lsr w4, w4, #6 f98: cb040061 sub x1, x3, x4 f9c: 8b010ca1 add x1, x5, x1, lsl #3 fa0: 97ffffb4 bl e70 <__kthread_bind_mask> wake_up_state(k, TASK_PARKED); fa4: aa1303e0 mov x0, x19 fa8: 52800801 mov w1, #0x40 // #64 fac: 94000000 bl 0 <wake_up_state> fb0: 17ffffef b f6c <kthread_unpark+0x4c> WARN_ON(!(k->flags & PF_KTHREAD)); fb4: d4210000 brk #0x800 fb8: 17ffffe0 b f38 <kthread_unpark+0x18> fbc: d503201f nop 0000000000000fc0 <kthread_stop>: { fc0: a9be7bfd stp x29, x30, [sp, #-32]! ATOMIC_FETCH_OP ( , dmb ish, , l, "memory", __VA_ARGS__)\ ATOMIC_FETCH_OP (_relaxed, , , , , __VA_ARGS__)\ ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\ ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__) ATOMIC_OPS(add, add) fc4: 9100c002 add x2, x0, #0x30 fc8: 910003fd mov x29, sp fcc: a90153f3 stp x19, x20, [sp, #16] fd0: aa0003f3 mov x19, x0 fd4: f9800051 prfm pstl1strm, [x2] fd8: 885f7c40 ldxr w0, [x2] fdc: 11000400 add w0, w0, #0x1 fe0: 88017c40 stxr w1, w0, [x2] fe4: 35ffffa1 cbnz w1, fd8 <kthread_stop+0x18> WARN_ON(!(k->flags & PF_KTHREAD)); fe8: b9403660 ldr w0, [x19, #52] fec: 36a80420 tbz w0, #21, 1070 <kthread_stop+0xb0> return (__force void *)k->set_child_tid; ff0: f9434274 ldr x20, [x19, #1664] set_bit(KTHREAD_SHOULD_STOP, &kthread->flags); ff4: 52800020 mov w0, #0x1 // #1 ff8: aa1403e1 mov x1, x20 ffc: 94000000 bl 0 <set_bit> kthread_unpark(k); 1000: aa1303e0 mov x0, x19 1004: 94000000 bl f20 <kthread_unpark> wake_up_process(k); 1008: aa1303e0 mov x0, x19 100c: 94000000 bl 0 <wake_up_process> wait_for_completion(&kthread->exited); 1010: 9101e280 add x0, x20, #0x78 1014: 94000000 bl 0 <wait_for_completion> ATOMIC_OPS(sub, sub) 1018: 9100c262 add x2, x19, #0x30 ret = k->exit_code; 101c: b9454a74 ldr w20, [x19, #1352] 1020: f9800051 prfm pstl1strm, [x2] 1024: 885f7c40 ldxr w0, [x2] 1028: 51000400 sub w0, w0, #0x1 102c: 8801fc40 stlxr w1, w0, [x2] 1030: 35ffffa1 cbnz w1, 1024 <kthread_stop+0x64> 1034: d5033bbf dmb ish #ifdef CONFIG_PREEMPT_RT_BASE extern void __put_task_struct_cb(struct rcu_head *rhp); static inline void put_task_struct(struct task_struct *t) { if (atomic_dec_and_test(&t->usage)) 1038: 340000a0 cbz w0, 104c <kthread_stop+0x8c> } 103c: 2a1403e0 mov w0, w20 1040: a94153f3 ldp x19, x20, [sp, #16] 1044: a8c27bfd ldp x29, x30, [sp], #32 1048: d65f03c0 ret call_rcu(&t->put_rcu, __put_task_struct_cb); 104c: d282df00 mov x0, #0x16f8 // #5880 1050: 90000001 adrp x1, 0 <__put_task_struct_cb> 1054: 8b000260 add x0, x19, x0 1058: 91000021 add x1, x1, #0x0 105c: 94000000 bl 0 <call_rcu> 1060: 2a1403e0 mov w0, w20 1064: a94153f3 ldp x19, x20, [sp, #16] 1068: a8c27bfd ldp x29, x30, [sp], #32 106c: d65f03c0 ret WARN_ON(!(k->flags & PF_KTHREAD)); 1070: d4210000 brk #0x800 1074: 17ffffdf b ff0 <kthread_stop+0x30> 0000000000001078 <kthread_destroy_worker>: * Flush and destroy @worker. The simple flush is enough because the kthread * worker API is used only in trivial scenarios. There are no multi-step state * machines needed. */ void kthread_destroy_worker(struct kthread_worker *worker) { 1078: a9be7bfd stp x29, x30, [sp, #-32]! 107c: 910003fd mov x29, sp 1080: a90153f3 stp x19, x20, [sp, #16] struct task_struct *task; task = worker->task; 1084: f9403814 ldr x20, [x0, #112] if (WARN_ON(!task)) 1088: b40001d4 cbz x20, 10c0 <kthread_destroy_worker+0x48> 108c: aa0003f3 mov x19, x0 return; kthread_flush_worker(worker); 1090: 94000000 bl af0 <kthread_flush_worker> kthread_stop(task); 1094: aa1403e0 mov x0, x20 1098: 94000000 bl fc0 <kthread_stop> 109c: aa1303e0 mov x0, x19 10a0: f8450c01 ldr x1, [x0, #80]! WARN_ON(!list_empty(&worker->work_list)); 10a4: eb01001f cmp x0, x1 10a8: 54000101 b.ne 10c8 <kthread_destroy_worker+0x50> // b.any kfree(worker); 10ac: aa1303e0 mov x0, x19 10b0: 94000000 bl 0 <kfree> } 10b4: a94153f3 ldp x19, x20, [sp, #16] 10b8: a8c27bfd ldp x29, x30, [sp], #32 10bc: d65f03c0 ret if (WARN_ON(!task)) 10c0: d4210000 brk #0x800 10c4: 17fffffc b 10b4 <kthread_destroy_worker+0x3c> WARN_ON(!list_empty(&worker->work_list)); 10c8: d4210000 brk #0x800 10cc: 17fffff8 b 10ac <kthread_destroy_worker+0x34> 00000000000010d0 <__kthread_create_worker>: { 10d0: a9ba7bfd stp x29, x30, [sp, #-96]! return kmem_cache_alloc_trace( 10d4: 90000004 adrp x4, 0 <kmalloc_caches> 10d8: 910003fd mov x29, sp 10dc: a9025bf5 stp x21, x22, [sp, #32] 10e0: 2a0003f5 mov w21, w0 10e4: aa0203f6 mov x22, x2 void *ret = kmem_cache_alloc(s, flags); 10e8: f9400080 ldr x0, [x4] 10ec: f9001bf7 str x23, [sp, #48] 10f0: 2a0103f7 mov w23, w1 10f4: 52901801 mov w1, #0x80c0 // #32960 10f8: 72a02801 movk w1, #0x140, lsl #16 10fc: a90153f3 stp x19, x20, [sp, #16] 1100: aa0303f4 mov x20, x3 return ERR_PTR(-ENOMEM); 1104: 92800173 mov x19, #0xfffffffffffffff4 // #-12 1108: 94000000 bl 0 <kmem_cache_alloc> if (!worker) 110c: b4000480 cbz x0, 119c <__kthread_create_worker+0xcc> kthread_init_worker(worker); 1110: 90000002 adrp x2, 0 <kthread_should_stop> 1114: 91000042 add x2, x2, #0x0 1118: 90000001 adrp x1, 0 <kthread_should_stop> 111c: aa0003f3 mov x19, x0 1120: 91006042 add x2, x2, #0x18 1124: 91000021 add x1, x1, #0x0 1128: 94000000 bl 610 <__kthread_init_worker> if (cpu >= 0) 112c: 37f80455 tbnz w21, #31, 11b4 <__kthread_create_worker+0xe4> task = __kthread_create_on_node(kthread_worker_fn, worker, 1130: a9402688 ldp x8, x9, [x20] 1134: aa1603e3 mov x3, x22 1138: a9411e86 ldp x6, x7, [x20, #16] 113c: 90000000 adrp x0, 6a8 <kthread_worker_fn> 1140: 910103e4 add x4, sp, #0x40 1144: 91000000 add x0, x0, #0x0 1148: 52800002 mov w2, #0x0 // #0 114c: aa1303e1 mov x1, x19 1150: a90427e8 stp x8, x9, [sp, #64] 1154: a9051fe6 stp x6, x7, [sp, #80] 1158: 97fffc2a bl 200 <__kthread_create_on_node> 115c: aa0003f4 mov x20, x0 if (IS_ERR(task)) 1160: b140041f cmn x0, #0x1, lsl #12 1164: 54000448 b.hi 11ec <__kthread_create_worker+0x11c> // b.pmore const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; 1168: 924016a1 and x1, x21, #0x3f p -= cpu / BITS_PER_LONG; 116c: 53067eb5 lsr w21, w21, #6 1170: 91000421 add x1, x1, #0x1 1174: 90000002 adrp x2, 0 <cpu_bit_bitmap> 1178: cb150035 sub x21, x1, x21 117c: 91000041 add x1, x2, #0x0 __kthread_bind_mask(p, cpumask_of(cpu), state); 1180: d2800042 mov x2, #0x2 // #2 1184: 8b150c21 add x1, x1, x21, lsl #3 1188: 97ffff3a bl e70 <__kthread_bind_mask> worker->flags = flags; 118c: b9000277 str w23, [x19] wake_up_process(task); 1190: aa1403e0 mov x0, x20 worker->task = task; 1194: f9003a74 str x20, [x19, #112] wake_up_process(task); 1198: 94000000 bl 0 <wake_up_process> } 119c: aa1303e0 mov x0, x19 11a0: a94153f3 ldp x19, x20, [sp, #16] 11a4: a9425bf5 ldp x21, x22, [sp, #32] 11a8: f9401bf7 ldr x23, [sp, #48] 11ac: a8c67bfd ldp x29, x30, [sp], #96 11b0: d65f03c0 ret task = __kthread_create_on_node(kthread_worker_fn, worker, 11b4: a9402688 ldp x8, x9, [x20] 11b8: aa1603e3 mov x3, x22 11bc: a9411e86 ldp x6, x7, [x20, #16] 11c0: 90000000 adrp x0, 6a8 <kthread_worker_fn> 11c4: 910103e4 add x4, sp, #0x40 11c8: 91000000 add x0, x0, #0x0 11cc: 12800002 mov w2, #0xffffffff // #-1 11d0: aa1303e1 mov x1, x19 11d4: a90427e8 stp x8, x9, [sp, #64] 11d8: a9051fe6 stp x6, x7, [sp, #80] 11dc: 97fffc09 bl 200 <__kthread_create_on_node> 11e0: aa0003f4 mov x20, x0 if (IS_ERR(task)) 11e4: b140041f cmn x0, #0x1, lsl #12 11e8: 54fffd29 b.ls 118c <__kthread_create_worker+0xbc> // b.plast kfree(worker); 11ec: aa1303e0 mov x0, x19 return ERR_CAST(task); 11f0: aa1403f3 mov x19, x20 kfree(worker); 11f4: 94000000 bl 0 <kfree> } 11f8: aa1303e0 mov x0, x19 11fc: a94153f3 ldp x19, x20, [sp, #16] 1200: a9425bf5 ldp x21, x22, [sp, #32] 1204: f9401bf7 ldr x23, [sp, #48] 1208: a8c67bfd ldp x29, x30, [sp], #96 120c: d65f03c0 ret 0000000000001210 <kthread_create_worker>: { 1210: d10283ff sub sp, sp, #0xa0 va_start(args, namefmt); 1214: 128005e8 mov w8, #0xffffffd0 // #-48 1218: 9101c3e9 add x9, sp, #0x70 121c: 910283ea add x10, sp, #0xa0 { 1220: a9057bfd stp x29, x30, [sp, #80] 1224: 910143fd add x29, sp, #0x50 va_start(args, namefmt); 1228: a902abea stp x10, x10, [sp, #40] { 122c: f90033f3 str x19, [sp, #96] 1230: 90000013 adrp x19, 0 <__stack_chk_guard> 1234: 91000273 add x19, x19, #0x0 va_start(args, namefmt); 1238: f9001fe9 str x9, [sp, #56] { 123c: f9400269 ldr x9, [x19] 1240: f90027e9 str x9, [sp, #72] 1244: d2800009 mov x9, #0x0 // #0 va_start(args, namefmt); 1248: 29087fe8 stp w8, wzr, [sp, #64] worker = __kthread_create_worker(-1, flags, namefmt, args); 124c: a942a7e8 ldp x8, x9, [sp, #40] 1250: a90027e8 stp x8, x9, [sp] 1254: a943a7e8 ldp x8, x9, [sp, #56] { 1258: a9070fe2 stp x2, x3, [sp, #112] worker = __kthread_create_worker(-1, flags, namefmt, args); 125c: aa0103e2 mov x2, x1 1260: 2a0003e1 mov w1, w0 1264: 910003e3 mov x3, sp 1268: 12800000 mov w0, #0xffffffff // #-1 126c: a90127e8 stp x8, x9, [sp, #16] { 1270: a90817e4 stp x4, x5, [sp, #128] 1274: a9091fe6 stp x6, x7, [sp, #144] worker = __kthread_create_worker(-1, flags, namefmt, args); 1278: 97ffff96 bl 10d0 <__kthread_create_worker> } 127c: f94027e2 ldr x2, [sp, #72] 1280: f9400261 ldr x1, [x19] 1284: ca010041 eor x1, x2, x1 1288: b50000a1 cbnz x1, 129c <kthread_create_worker+0x8c> 128c: a9457bfd ldp x29, x30, [sp, #80] 1290: f94033f3 ldr x19, [sp, #96] 1294: 910283ff add sp, sp, #0xa0 1298: d65f03c0 ret 129c: 94000000 bl 0 <__stack_chk_fail> 00000000000012a0 <kthread_create_worker_on_cpu>: { 12a0: d10283ff sub sp, sp, #0xa0 va_start(args, namefmt); 12a4: 128004e8 mov w8, #0xffffffd8 // #-40 12a8: 9101c3e9 add x9, sp, #0x70 12ac: 910283ea add x10, sp, #0xa0 { 12b0: a9057bfd stp x29, x30, [sp, #80] 12b4: 910143fd add x29, sp, #0x50 va_start(args, namefmt); 12b8: a902abea stp x10, x10, [sp, #40] { 12bc: f90033f3 str x19, [sp, #96] 12c0: 90000013 adrp x19, 0 <__stack_chk_guard> 12c4: 91000273 add x19, x19, #0x0 va_start(args, namefmt); 12c8: f9001fe9 str x9, [sp, #56] { 12cc: f9400269 ldr x9, [x19] 12d0: f90027e9 str x9, [sp, #72] 12d4: d2800009 mov x9, #0x0 // #0 va_start(args, namefmt); 12d8: 29087fe8 stp w8, wzr, [sp, #64] worker = __kthread_create_worker(cpu, flags, namefmt, args); 12dc: a942a7e8 ldp x8, x9, [sp, #40] 12e0: a90027e8 stp x8, x9, [sp] 12e4: a943a7e8 ldp x8, x9, [sp, #56] 12e8: a90127e8 stp x8, x9, [sp, #16] { 12ec: a90793e3 stp x3, x4, [sp, #120] worker = __kthread_create_worker(cpu, flags, namefmt, args); 12f0: 910003e3 mov x3, sp { 12f4: a9089be5 stp x5, x6, [sp, #136] 12f8: f9004fe7 str x7, [sp, #152] worker = __kthread_create_worker(cpu, flags, namefmt, args); 12fc: 97ffff75 bl 10d0 <__kthread_create_worker> } 1300: f94027e2 ldr x2, [sp, #72] 1304: f9400261 ldr x1, [x19] 1308: ca010041 eor x1, x2, x1 130c: b50000a1 cbnz x1, 1320 <kthread_create_worker_on_cpu+0x80> 1310: a9457bfd ldp x29, x30, [sp, #80] 1314: f94033f3 ldr x19, [sp, #96] 1318: 910283ff add sp, sp, #0xa0 131c: d65f03c0 ret 1320: 94000000 bl 0 <__stack_chk_fail> 1324: d503201f nop 0000000000001328 <free_kthread_struct>: { 1328: a9bf7bfd stp x29, x30, [sp, #-16]! 132c: 910003fd mov x29, sp WARN_ON(!(k->flags & PF_KTHREAD)); 1330: b9403401 ldr w1, [x0, #52] 1334: 36a800a1 tbz w1, #21, 1348 <free_kthread_struct+0x20> kfree(to_kthread(k)); 1338: f9434000 ldr x0, [x0, #1664] 133c: 94000000 bl 0 <kfree> } 1340: a8c17bfd ldp x29, x30, [sp], #16 1344: d65f03c0 ret WARN_ON(!(k->flags & PF_KTHREAD)); 1348: d4210000 brk #0x800 134c: 17fffffb b 1338 <free_kthread_struct+0x10> 0000000000001350 <kthread_data>: 1350: b9403401 ldr w1, [x0, #52] 1354: 36a80081 tbz w1, #21, 1364 <kthread_data+0x14> return to_kthread(task)->data; 1358: f9434000 ldr x0, [x0, #1664] } 135c: f9400800 ldr x0, [x0, #16] 1360: d65f03c0 ret WARN_ON(!(k->flags & PF_KTHREAD)); 1364: d4210000 brk #0x800 1368: 17fffffc b 1358 <kthread_data+0x8> 136c: d503201f nop 0000000000001370 <kthread_probe_data>: { 1370: d100c3ff sub sp, sp, #0x30 1374: a9017bfd stp x29, x30, [sp, #16] 1378: 910043fd add x29, sp, #0x10 137c: f90013f3 str x19, [sp, #32] 1380: 90000013 adrp x19, 0 <__stack_chk_guard> 1384: 91000273 add x19, x19, #0x0 1388: f9400261 ldr x1, [x19] 138c: f90007e1 str x1, [sp, #8] 1390: d2800001 mov x1, #0x0 // #0 WARN_ON(!(k->flags & PF_KTHREAD)); 1394: b9403401 ldr w1, [x0, #52] 1398: 36a80201 tbz w1, #21, 13d8 <kthread_probe_data+0x68> return (__force void *)k->set_child_tid; 139c: f9434001 ldr x1, [x0, #1664] probe_kernel_read(&data, &kthread->data, sizeof(data)); 13a0: d2800102 mov x2, #0x8 // #8 13a4: 910003e0 mov x0, sp void *data = NULL; 13a8: f90003ff str xzr, [sp] probe_kernel_read(&data, &kthread->data, sizeof(data)); 13ac: 91004021 add x1, x1, #0x10 13b0: 94000000 bl 0 <probe_kernel_read> } 13b4: f94007e0 ldr x0, [sp, #8] 13b8: f9400261 ldr x1, [x19] 13bc: ca010001 eor x1, x0, x1 13c0: f94003e0 ldr x0, [sp] 13c4: b50000e1 cbnz x1, 13e0 <kthread_probe_data+0x70> 13c8: a9417bfd ldp x29, x30, [sp, #16] 13cc: f94013f3 ldr x19, [sp, #32] 13d0: 9100c3ff add sp, sp, #0x30 13d4: d65f03c0 ret WARN_ON(!(k->flags & PF_KTHREAD)); 13d8: d4210000 brk #0x800 13dc: 17fffff0 b 139c <kthread_probe_data+0x2c> } 13e0: 94000000 bl 0 <__stack_chk_fail> 13e4: d503201f nop 00000000000013e8 <tsk_fork_get_node>: } 13e8: 12800000 mov w0, #0xffffffff // #-1 13ec: d65f03c0 ret 00000000000013f0 <kthread_bind_mask>: { 13f0: a9bf7bfd stp x29, x30, [sp, #-16]! __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE); 13f4: d2800042 mov x2, #0x2 // #2 { 13f8: 910003fd mov x29, sp __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE); 13fc: 97fffe9d bl e70 <__kthread_bind_mask> } 1400: a8c17bfd ldp x29, x30, [sp], #16 1404: d65f03c0 ret 0000000000001408 <kthread_create_on_cpu>: { 1408: a9be7bfd stp x29, x30, [sp, #-32]! 140c: 910003fd mov x29, sp 1410: a90153f3 stp x19, x20, [sp, #16] 1414: 2a0203f4 mov w20, w2 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt, 1418: 2a1403e4 mov w4, w20 141c: 52800002 mov w2, #0x0 // #0 1420: 94000000 bl 3b8 <kthread_create_on_node> 1424: aa0003f3 mov x19, x0 if (IS_ERR(p)) 1428: b140041f cmn x0, #0x1, lsl #12 142c: 540000a9 b.ls 1440 <kthread_create_on_cpu+0x38> // b.plast } 1430: aa1303e0 mov x0, x19 1434: a94153f3 ldp x19, x20, [sp, #16] 1438: a8c27bfd ldp x29, x30, [sp], #32 143c: d65f03c0 ret const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; 1440: 92401681 and x1, x20, #0x3f p -= cpu / BITS_PER_LONG; 1444: 53067e83 lsr w3, w20, #6 1448: 91000421 add x1, x1, #0x1 144c: 90000002 adrp x2, 0 <cpu_bit_bitmap> 1450: cb030023 sub x3, x1, x3 1454: 91000041 add x1, x2, #0x0 __kthread_bind_mask(p, cpumask_of(cpu), state); 1458: d2800042 mov x2, #0x2 // #2 145c: 8b030c21 add x1, x1, x3, lsl #3 1460: 97fffe84 bl e70 <__kthread_bind_mask> WARN_ON(!(k->flags & PF_KTHREAD)); 1464: b9403660 ldr w0, [x19, #52] 1468: 36a80180 tbz w0, #21, 1498 <kthread_create_on_cpu+0x90> set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags); 146c: f9434261 ldr x1, [x19, #1664] 1470: 52800000 mov w0, #0x0 // #0 1474: 94000000 bl 0 <set_bit> WARN_ON(!(k->flags & PF_KTHREAD)); 1478: b9403660 ldr w0, [x19, #52] 147c: 36a80120 tbz w0, #21, 14a0 <kthread_create_on_cpu+0x98> return (__force void *)k->set_child_tid; 1480: f9434260 ldr x0, [x19, #1664] to_kthread(p)->cpu = cpu; 1484: b9000814 str w20, [x0, #8] } 1488: aa1303e0 mov x0, x19 148c: a94153f3 ldp x19, x20, [sp, #16] 1490: a8c27bfd ldp x29, x30, [sp], #32 1494: d65f03c0 ret WARN_ON(!(k->flags & PF_KTHREAD)); 1498: d4210000 brk #0x800 149c: 17fffff4 b 146c <kthread_create_on_cpu+0x64> 14a0: d4210000 brk #0x800 14a4: 17fffff7 b 1480 <kthread_create_on_cpu+0x78> 00000000000014a8 <kthreadd>: { 14a8: d101c3ff sub sp, sp, #0x70 14ac: 90000000 adrp x0, 0 <__stack_chk_guard> 14b0: 91000000 add x0, x0, #0x0 14b4: 90000001 adrp x1, 0 <kthread_should_stop> 14b8: 91000021 add x1, x1, #0x0 14bc: 52800002 mov w2, #0x0 // #0 14c0: a9017bfd stp x29, x30, [sp, #16] 14c4: 910043fd add x29, sp, #0x10 14c8: a90253f3 stp x19, x20, [sp, #32] 14cc: 90000014 adrp x20, 0 <kthread_should_stop> 14d0: d5384113 mrs x19, sp_el0 14d4: a9035bf5 stp x21, x22, [sp, #48] 14d8: 91000294 add x20, x20, #0x0 if (list_empty(&kthread_create_list)) 14dc: 9103c296 add x22, x20, #0xf0 { 14e0: a90463f7 stp x23, x24, [sp, #64] 14e4: d2800017 mov x23, #0x0 // #0 14e8: 90000018 adrp x24, 0 <kthread_should_stop> 14ec: a9056bf9 stp x25, x26, [sp, #80] 14f0: 91000318 add x24, x24, #0x0 14f4: 9000001a adrp x26, 0 <kthread_should_stop> 14f8: f90033fb str x27, [sp, #96] 14fc: 9100035a add x26, x26, #0x0 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 1500: d280003b mov x27, #0x1 // #1 1504: 90000019 adrp x25, 0 <kthread_should_stop> 1508: f9400003 ldr x3, [x0] 150c: f90007e3 str x3, [sp, #8] 1510: d2800003 mov x3, #0x0 // #0 1514: aa1303e0 mov x0, x19 1518: 91000339 add x25, x25, #0x0 151c: 94000000 bl 0 <__set_task_comm> ignore_signals(tsk); 1520: aa1303e0 mov x0, x19 1524: 94000000 bl 0 <ignore_signals> set_cpus_allowed_ptr(tsk, cpu_all_mask); 1528: aa1303e0 mov x0, x19 152c: 90000001 adrp x1, 0 <cpu_all_bits> 1530: 91000021 add x1, x1, #0x0 1534: 94000000 bl 0 <set_cpus_allowed_ptr> current->no_cgroup_migration = 1; 1538: 3955a260 ldrb w0, [x19, #1384] current->flags |= PF_NOFREEZE; 153c: b9403661 ldr w1, [x19, #52] 1540: 321d0000 orr w0, w0, #0x8 1544: 3915a260 strb w0, [x19, #1384] 1548: 32110021 orr w1, w1, #0x8000 154c: b9003661 str w1, [x19, #52] 1550: d5384100 mrs x0, sp_el0 1554: f9000c1b str x27, [x0, #24] set_current_state(TASK_INTERRUPTIBLE); 1558: f90b881a str x26, [x0, #5904] 155c: d5033bbf dmb ish __READ_ONCE_SIZE; 1560: f9407a80 ldr x0, [x20, #240] if (list_empty(&kthread_create_list)) 1564: eb16001f cmp x0, x22 1568: 540004c0 b.eq 1600 <kthreadd+0x158> // b.none 156c: d503201f nop 1570: d5384101 mrs x1, sp_el0 __set_current_state(TASK_RUNNING); 1574: f9000c3f str xzr, [x1, #24] spin_lock(&kthread_create_lock); 1578: aa1403e0 mov x0, x20 __set_current_state(TASK_RUNNING); 157c: f90b8839 str x25, [x1, #5904] spin_lock(&kthread_create_lock); 1580: 94000000 bl 0 <rt_spin_lock> 1584: f9407a80 ldr x0, [x20, #240] while (!list_empty(&kthread_create_list)) { 1588: eb16001f cmp x0, x22 158c: 54000280 b.eq 15dc <kthreadd+0x134> // b.none create = list_entry(kthread_create_list.next, 1590: f9407a93 ldr x19, [x20, #240] spin_unlock(&kthread_create_lock); 1594: aa1403e0 mov x0, x20 create = list_entry(kthread_create_list.next, 1598: d100a275 sub x21, x19, #0x28 __list_del(entry->prev, entry->next); 159c: a9400662 ldp x2, x1, [x19] next->prev = prev; 15a0: f9000441 str x1, [x2, #8] case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 15a4: f9000022 str x2, [x1] 15a8: f9000273 str x19, [x19] list->prev = list; 15ac: f9000673 str x19, [x19, #8] spin_unlock(&kthread_create_lock); 15b0: 94000000 bl 0 <rt_spin_unlock> pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD); 15b4: aa1503e1 mov x1, x21 15b8: d280c222 mov x2, #0x611 // #1553 15bc: aa1803e0 mov x0, x24 15c0: 94000000 bl 0 <kernel_thread> if (pid < 0) { 15c4: 37f80220 tbnz w0, #31, 1608 <kthreadd+0x160> spin_lock(&kthread_create_lock); 15c8: aa1403e0 mov x0, x20 15cc: 94000000 bl 0 <rt_spin_lock> __READ_ONCE_SIZE; 15d0: f9407a80 ldr x0, [x20, #240] while (!list_empty(&kthread_create_list)) { 15d4: eb16001f cmp x0, x22 15d8: 54fffdc1 b.ne 1590 <kthreadd+0xe8> // b.any spin_unlock(&kthread_create_lock); 15dc: aa1403e0 mov x0, x20 15e0: 94000000 bl 0 <rt_spin_unlock> 15e4: d5384100 mrs x0, sp_el0 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 15e8: f9000c1b str x27, [x0, #24] set_current_state(TASK_INTERRUPTIBLE); 15ec: f90b881a str x26, [x0, #5904] 15f0: d5033bbf dmb ish __READ_ONCE_SIZE; 15f4: f9407a80 ldr x0, [x20, #240] if (list_empty(&kthread_create_list)) 15f8: eb16001f cmp x0, x22 15fc: 54fffba1 b.ne 1570 <kthreadd+0xc8> // b.any schedule(); 1600: 94000000 bl 0 <schedule> __set_current_state(TASK_RUNNING); 1604: 17ffffdb b 1570 <kthreadd+0xc8> 1608: d1002263 sub x3, x19, #0x8 160c: f9800071 prfm pstl1strm, [x3] 1610: c85f7c62 ldxr x2, [x3] 1614: c801fc77 stlxr w1, x23, [x3] 1618: 35ffffc1 cbnz w1, 1610 <kthreadd+0x168> 161c: d5033bbf dmb ish if (!done) { 1620: b40000c2 cbz x2, 1638 <kthreadd+0x190> create->result = ERR_PTR(pid); 1624: 93407c01 sxtw x1, w0 1628: f81f0261 stur x1, [x19, #-16] complete(done); 162c: aa0203e0 mov x0, x2 1630: 94000000 bl 0 <complete> 1634: 17ffffe5 b 15c8 <kthreadd+0x120> kfree(create); 1638: aa1503e0 mov x0, x21 163c: 94000000 bl 0 <kfree> return; 1640: 17ffffe2 b 15c8 <kthreadd+0x120> 1644: d503201f nop 0000000000001648 <__kthread_queue_delayed_work>: { 1648: a9bd7bfd stp x29, x30, [sp, #-48]! 164c: 910003fd mov x29, sp 1650: a90153f3 stp x19, x20, [sp, #16] 1654: aa0103f3 mov x19, x1 1658: aa0003f4 mov x20, x0 165c: a9025bf5 stp x21, x22, [sp, #32] WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn || 1660: 9100a275 add x21, x19, #0x28 1664: 90000001 adrp x1, b98 <kthread_delayed_work_timer_fn> 1668: f9400ea0 ldr x0, [x21, #24] 166c: 91000021 add x1, x1, #0x0 { 1670: aa0203f6 mov x22, x2 WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn || 1674: eb01001f cmp x0, x1 1678: 540003a0 b.eq 16ec <__kthread_queue_delayed_work+0xa4> // b.none 167c: d4210000 brk #0x800 kthread_insert_work(worker, work, &worker->work_list); 1680: aa1303e1 mov x1, x19 if (!delay) { 1684: b4000276 cbz x22, 16d0 <__kthread_queue_delayed_work+0x88> kthread_insert_work_sanity_check(worker, work); 1688: aa1403e0 mov x0, x20 168c: 97fffcb5 bl 960 <kthread_insert_work_sanity_check> __list_add(new, head, head->next); 1690: aa1403e1 mov x1, x20 timer->expires = jiffies + delay; 1694: 90000003 adrp x3, 0 <jiffies> add_timer(timer); 1698: aa1503e0 mov x0, x21 169c: f8460c22 ldr x2, [x1, #96]! next->prev = new; 16a0: f9000453 str x19, [x2, #8] new->prev = prev; 16a4: a9000662 stp x2, x1, [x19] case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 16a8: f9003293 str x19, [x20, #96] work->worker = worker; 16ac: f9000e74 str x20, [x19, #24] timer->expires = jiffies + delay; 16b0: f9400062 ldr x2, [x3] 16b4: 8b160042 add x2, x2, x22 16b8: f9000aa2 str x2, [x21, #16] add_timer(timer); 16bc: 94000000 bl 0 <add_timer> } 16c0: a94153f3 ldp x19, x20, [sp, #16] 16c4: a9425bf5 ldp x21, x22, [sp, #32] 16c8: a8c37bfd ldp x29, x30, [sp], #48 16cc: d65f03c0 ret kthread_insert_work(worker, work, &worker->work_list); 16d0: 91014282 add x2, x20, #0x50 16d4: aa1403e0 mov x0, x20 16d8: 97fffcc0 bl 9d8 <kthread_insert_work> } 16dc: a94153f3 ldp x19, x20, [sp, #16] 16e0: a9425bf5 ldp x21, x22, [sp, #32] 16e4: a8c37bfd ldp x29, x30, [sp], #48 16e8: d65f03c0 ret WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn || 16ec: f94012a0 ldr x0, [x21, #32] 16f0: eb00027f cmp x19, x0 16f4: 54fffc41 b.ne 167c <__kthread_queue_delayed_work+0x34> // b.any 16f8: 17ffffe2 b 1680 <__kthread_queue_delayed_work+0x38> 16fc: d503201f nop 0000000000001700 <kthread_queue_delayed_work>: { 1700: a9bc7bfd stp x29, x30, [sp, #-64]! 1704: 910003fd mov x29, sp 1708: a90153f3 stp x19, x20, [sp, #16] raw_spin_lock_irqsave(&worker->lock, flags); 170c: 91002014 add x20, x0, #0x8 { 1710: aa0103f3 mov x19, x1 1714: a9025bf5 stp x21, x22, [sp, #32] 1718: aa0003f5 mov x21, x0 raw_spin_lock_irqsave(&worker->lock, flags); 171c: aa1403e0 mov x0, x20 { 1720: a90363f7 stp x23, x24, [sp, #48] 1724: aa0203f8 mov x24, x2 raw_spin_lock_irqsave(&worker->lock, flags); 1728: 94000000 bl 0 <_raw_spin_lock_irqsave> 172c: aa0003f7 mov x23, x0 lockdep_assert_held(&worker->lock); 1730: 90000000 adrp x0, 0 <debug_locks> 1734: b9400000 ldr w0, [x0] 1738: 350001c0 cbnz w0, 1770 <kthread_queue_delayed_work+0x70> __READ_ONCE_SIZE; 173c: f9400260 ldr x0, [x19] bool ret = false; 1740: 52800016 mov w22, #0x0 // #0 return !list_empty(&work->node) || work->canceling; 1744: eb00027f cmp x19, x0 1748: 54000280 b.eq 1798 <kthread_queue_delayed_work+0x98> // b.none raw_spin_unlock_irqrestore(&worker->lock, flags); 174c: aa1703e1 mov x1, x23 1750: aa1403e0 mov x0, x20 1754: 94000000 bl 0 <_raw_spin_unlock_irqrestore> } 1758: 2a1603e0 mov w0, w22 175c: a94153f3 ldp x19, x20, [sp, #16] 1760: a9425bf5 ldp x21, x22, [sp, #32] 1764: a94363f7 ldp x23, x24, [sp, #48] 1768: a8c47bfd ldp x29, x30, [sp], #64 176c: d65f03c0 ret 1770: 12800001 mov w1, #0xffffffff // #-1 1774: 910082a0 add x0, x21, #0x20 1778: 94000000 bl 0 <lock_is_held_type> lockdep_assert_held(&worker->lock); 177c: 35fffe00 cbnz w0, 173c <kthread_queue_delayed_work+0x3c> 1780: d4210000 brk #0x800 1784: f9400260 ldr x0, [x19] bool ret = false; 1788: 52800016 mov w22, #0x0 // #0 return !list_empty(&work->node) || work->canceling; 178c: eb00027f cmp x19, x0 1790: 54fffde1 b.ne 174c <kthread_queue_delayed_work+0x4c> // b.any 1794: d503201f nop 1798: b9402260 ldr w0, [x19, #32] 179c: 35fffd80 cbnz w0, 174c <kthread_queue_delayed_work+0x4c> __kthread_queue_delayed_work(worker, dwork, delay); 17a0: aa1803e2 mov x2, x24 17a4: aa1303e1 mov x1, x19 17a8: aa1503e0 mov x0, x21 ret = true; 17ac: 52800036 mov w22, #0x1 // #1 __kthread_queue_delayed_work(worker, dwork, delay); 17b0: 94000000 bl 1648 <__kthread_queue_delayed_work> ret = true; 17b4: 17ffffe6 b 174c <kthread_queue_delayed_work+0x4c> 00000000000017b8 <kthread_mod_delayed_work>: { 17b8: d10143ff sub sp, sp, #0x50 17bc: a9017bfd stp x29, x30, [sp, #16] 17c0: 910043fd add x29, sp, #0x10 17c4: a90253f3 stp x19, x20, [sp, #32] 17c8: 90000013 adrp x19, 0 <__stack_chk_guard> 17cc: 91000273 add x19, x19, #0x0 17d0: aa0103f4 mov x20, x1 17d4: f9400261 ldr x1, [x19] 17d8: f90007e1 str x1, [sp, #8] 17dc: d2800001 mov x1, #0x0 // #0 17e0: a9035bf5 stp x21, x22, [sp, #48] raw_spin_lock_irqsave(&worker->lock, flags); 17e4: 91002016 add x22, x0, #0x8 { 17e8: aa0003f5 mov x21, x0 raw_spin_lock_irqsave(&worker->lock, flags); 17ec: aa1603e0 mov x0, x22 { 17f0: a90463f7 stp x23, x24, [sp, #64] 17f4: aa0203f8 mov x24, x2 raw_spin_lock_irqsave(&worker->lock, flags); 17f8: 94000000 bl 0 <_raw_spin_lock_irqsave> 17fc: f90003e0 str x0, [sp] if (!work->worker) 1800: f9400e80 ldr x0, [x20, #24] 1804: b40003c0 cbz x0, 187c <kthread_mod_delayed_work+0xc4> WARN_ON_ONCE(work->worker != worker); 1808: eb15001f cmp x0, x21 180c: 540003c1 b.ne 1884 <kthread_mod_delayed_work+0xcc> // b.any if (work->canceling) 1810: b9402280 ldr w0, [x20, #32] 1814: 52800017 mov w23, #0x0 // #0 1818: 340001e0 cbz w0, 1854 <kthread_mod_delayed_work+0x9c> raw_spin_unlock_irqrestore(&worker->lock, flags); 181c: f94003e1 ldr x1, [sp] 1820: aa1603e0 mov x0, x22 1824: 94000000 bl 0 <_raw_spin_unlock_irqrestore> } 1828: f94007e2 ldr x2, [sp, #8] 182c: f9400261 ldr x1, [x19] 1830: ca010041 eor x1, x2, x1 1834: 2a1703e0 mov w0, w23 1838: b50002a1 cbnz x1, 188c <kthread_mod_delayed_work+0xd4> 183c: a9417bfd ldp x29, x30, [sp, #16] 1840: a94253f3 ldp x19, x20, [sp, #32] 1844: a9435bf5 ldp x21, x22, [sp, #48] 1848: a94463f7 ldp x23, x24, [sp, #64] 184c: 910143ff add sp, sp, #0x50 1850: d65f03c0 ret ret = __kthread_cancel_work(work, true, &flags); 1854: 910003e2 mov x2, sp 1858: 52800021 mov w1, #0x1 // #1 185c: aa1403e0 mov x0, x20 1860: 97fffc18 bl 8c0 <__kthread_cancel_work> 1864: 12001c17 and w23, w0, #0xff __kthread_queue_delayed_work(worker, dwork, delay); 1868: aa1803e2 mov x2, x24 186c: aa1403e1 mov x1, x20 1870: aa1503e0 mov x0, x21 1874: 94000000 bl 1648 <__kthread_queue_delayed_work> 1878: 17ffffe9 b 181c <kthread_mod_delayed_work+0x64> 187c: 52800017 mov w23, #0x0 // #0 1880: 17fffffa b 1868 <kthread_mod_delayed_work+0xb0> WARN_ON_ONCE(work->worker != worker); 1884: d4210000 brk #0x800 1888: 17ffffe2 b 1810 <kthread_mod_delayed_work+0x58> } 188c: 94000000 bl 0 <__stack_chk_fail> Disassembly of section .init.text: 0000000000000000 <kthread_init_global_worker>: DEFINE_KTHREAD_WORKER(kthread_global_worker); EXPORT_SYMBOL(kthread_global_worker); __init void kthread_init_global_worker(void) { 0: a9be7bfd stp x29, x30, [sp, #-32]! kthread_global_worker.task = kthread_create(kthread_worker_fn, 4: 90000003 adrp x3, 0 <kthread_init_global_worker> 8: 90000000 adrp x0, 6a8 <kthread_worker_fn> { c: 910003fd mov x29, sp 10: f9000bf3 str x19, [sp, #16] kthread_global_worker.task = kthread_create(kthread_worker_fn, 14: 90000013 adrp x19, 0 <kthread_init_global_worker> 18: 91000273 add x19, x19, #0x0 1c: 91000063 add x3, x3, #0x0 20: 91040273 add x19, x19, #0x100 24: 12800002 mov w2, #0xffffffff // #-1 28: 91000000 add x0, x0, #0x0 2c: aa1303e1 mov x1, x19 30: 94000000 bl 3b8 <kthread_create_on_node> &kthread_global_worker, "kswork"); if (WARN_ON(IS_ERR(kthread_global_worker.task))) 34: b140041f cmn x0, #0x1, lsl #12 kthread_global_worker.task = kthread_create(kthread_worker_fn, 38: f9003a60 str x0, [x19, #112] if (WARN_ON(IS_ERR(kthread_global_worker.task))) 3c: 54000069 b.ls 48 <kthread_init_global_worker+0x48> // b.plast 40: d4210000 brk #0x800 44: 14000002 b 4c <kthread_init_global_worker+0x4c> return; wake_up_process(kthread_global_worker.task); 48: 94000000 bl 0 <wake_up_process> } 4c: f9400bf3 ldr x19, [sp, #16] 50: a8c27bfd ldp x29, x30, [sp], #32 54: d65f03c0 ret
/home/miaon/workspace/fxssd_4t/33WA/Code/SHRV0763_SRC_33WA_v060200/OUTPUT/APL_BUILD/tmp/work-shared/nsp1-release/kernel-source/kernel/workqueue.o: file format elf64-littleaarch64 Disassembly of section .text: 0000000000000000 <pwq_activate_delayed_work>: }) static __always_inline void __read_once_size(const volatile void *p, void *res, int size) { __READ_ONCE_SIZE; 0: f9400001 ldr x1, [x0] static struct pool_workqueue *get_work_pwq(struct work_struct *work) { unsigned long data = atomic_long_read(&work->data); if (data & WORK_STRUCT_PWQ) return (void *)(data & WORK_STRUCT_WQ_DATA_MASK); 4: 9278dc28 and x8, x1, #0xffffffffffffff00 8: f27e003f tst x1, #0x4 c: 9a9f1108 csel x8, x8, xzr, ne // ne = any static void pwq_activate_delayed_work(struct work_struct *work) { struct pool_workqueue *pwq = get_work_pwq(work); trace_workqueue_activate_work(work); if (list_empty(&pwq->pool->worklist)) 10: f9400105 ldr x5, [x8] 14: 910420a7 add x7, x5, #0x108 18: f94084a1 ldr x1, [x5, #264] 1c: eb0100ff cmp x7, x1 20: 540004a0 b.eq b4 <pwq_activate_delayed_work+0xb4> // b.none list_for_each_entry_safe_from(work, n, NULL, entry) { 24: aa0003e1 mov x1, x0 28: f8408c23 ldr x3, [x1, #8]! 2c: d1002062 sub x2, x3, #0x8 30: b4000341 cbz x1, 98 <pwq_activate_delayed_work+0x98> static inline void __list_del_entry(struct list_head *entry) { if (!__list_del_entry_valid(entry)) return; __list_del(entry->prev, entry->next); 34: f9400804 ldr x4, [x0, #16] __list_add(new, head->prev, head); 38: 910800a5 add x5, x5, #0x200 next->prev = prev; 3c: f9000464 str x4, [x3, #8] { switch (size) { case 1: *(volatile __u8 *)p = *(__u8 *)res; break; case 2: *(volatile __u16 *)p = *(__u16 *)res; break; case 4: *(volatile __u32 *)p = *(__u32 *)res; break; case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 40: f9000083 str x3, [x4] __list_add(new, head->prev, head); 44: f85100a3 ldur x3, [x5, #-240] next->prev = new; 48: f81100a1 stur x1, [x5, #-240] new->prev = prev; 4c: a9008c07 stp x7, x3, [x0, #8] 50: f9000061 str x1, [x3] if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) 54: f9400001 ldr x1, [x0] 58: 37180181 tbnz w1, #3, 88 <pwq_activate_delayed_work+0x88> 5c: 14000010 b 9c <pwq_activate_delayed_work+0x9c> __list_del(entry->prev, entry->next); 60: f9400844 ldr x4, [x2, #16] next->prev = prev; 64: f9000464 str x4, [x3, #8] 68: f9000083 str x3, [x4] __list_add(new, head->prev, head); 6c: f85100a3 ldur x3, [x5, #-240] next->prev = new; 70: f81100a1 stur x1, [x5, #-240] new->prev = prev; 74: a9008c47 stp x7, x3, [x2, #8] 78: f9000061 str x1, [x3] 7c: f9400041 ldr x1, [x2] list_for_each_entry_safe_from(work, n, NULL, entry) { 80: aa0603e2 mov x2, x6 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) 84: 361800a1 tbz w1, #3, 98 <pwq_activate_delayed_work+0x98> list_for_each_entry_safe_from(work, n, NULL, entry) { 88: aa0203e1 mov x1, x2 8c: f8408c23 ldr x3, [x1, #8]! 90: d1002066 sub x6, x3, #0x8 94: b5fffe61 cbnz x1, 60 <pwq_activate_delayed_work+0x60> 98: f9400001 ldr x1, [x0] static inline void __clear_bit(int nr, volatile unsigned long *addr) { unsigned long mask = BIT_MASK(nr); unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); *p &= ~mask; 9c: 927ef821 and x1, x1, #0xfffffffffffffffd a0: f9000001 str x1, [x0] pwq->pool->watchdog_ts = jiffies; move_linked_works(work, &pwq->pool->worklist, NULL); __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); pwq->nr_active++; a4: b9405900 ldr w0, [x8, #88] a8: 11000400 add w0, w0, #0x1 ac: b9005900 str w0, [x8, #88] } b0: d65f03c0 ret pwq->pool->watchdog_ts = jiffies; b4: 90000001 adrp x1, 0 <jiffies> b8: f9400021 ldr x1, [x1] bc: f90080a1 str x1, [x5, #256] c0: f9400105 ldr x5, [x8] c4: 910420a7 add x7, x5, #0x108 c8: 17ffffd7 b 24 <pwq_activate_delayed_work+0x24> cc: d503201f nop 00000000000000d0 <work_for_cpu_fn>: void *arg; long ret; }; static void work_for_cpu_fn(struct work_struct *work) { d0: a9be7bfd stp x29, x30, [sp, #-32]! d4: 910003fd mov x29, sp d8: f9000bf3 str x19, [sp, #16] dc: aa0003f3 mov x19, x0 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work); wfc->ret = wfc->fn(wfc->arg); e0: a9450001 ldp x1, x0, [x0, #80] e4: d63f0020 blr x1 e8: f9003260 str x0, [x19, #96] } ec: f9400bf3 ldr x19, [sp, #16] f0: a8c27bfd ldp x29, x30, [sp], #32 f4: d65f03c0 ret 00000000000000f8 <worker_enter_idle>: { f8: a9bd7bfd stp x29, x30, [sp, #-48]! fc: 910003fd mov x29, sp if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) || 100: b9406801 ldr w1, [x0, #104] 104: 371000c1 tbnz w1, #2, 11c <worker_enter_idle+0x24> __READ_ONCE_SIZE; 108: f9400002 ldr x2, [x0] WARN_ON_ONCE(!list_empty(&worker->entry) && 10c: eb02001f cmp x0, x2 110: 54000100 b.eq 130 <worker_enter_idle+0x38> // b.none 114: f9400002 ldr x2, [x0] 118: b4000082 cbz x2, 128 <worker_enter_idle+0x30> 11c: d4210000 brk #0x800 } 120: a8c37bfd ldp x29, x30, [sp], #48 124: d65f03c0 ret WARN_ON_ONCE(!list_empty(&worker->entry) && 128: f9400402 ldr x2, [x0, #8] 12c: b5ffff82 cbnz x2, 11c <worker_enter_idle+0x24> 130: a90153f3 stp x19, x20, [sp, #16] 134: aa0003f3 mov x19, x0 worker->flags |= WORKER_IDLE; 138: 321e0021 orr w1, w1, #0x4 13c: f90013f5 str x21, [sp, #32] worker->last_active = jiffies; 140: 90000015 adrp x21, 0 <jiffies> worker->flags |= WORKER_IDLE; 144: b9006a61 str w1, [x19, #104] preempt_disable(); 148: 52800020 mov w0, #0x1 // #1 struct worker_pool *pool = worker->pool; 14c: f9402674 ldr x20, [x19, #72] worker->last_active = jiffies; 150: f94002a2 ldr x2, [x21] pool->nr_idle++; 154: b9411e81 ldr w1, [x20, #284] 158: 0b000021 add w1, w1, w0 15c: b9011e81 str w1, [x20, #284] worker->last_active = jiffies; 160: f9003262 str x2, [x19, #96] preempt_disable(); 164: 94000000 bl 0 <preempt_count_add> __list_add(new, head, head->next); 168: f9409280 ldr x0, [x20, #288] list_add(&worker->entry, &pool->idle_list); 16c: 91048281 add x1, x20, #0x120 next->prev = new; 170: f9000413 str x19, [x0, #8] new->prev = prev; 174: a9000660 stp x0, x1, [x19] case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 178: f9009293 str x19, [x20, #288] preempt_enable(); 17c: 52800020 mov w0, #0x1 // #1 180: 94000000 bl 0 <preempt_count_sub> */ static __always_inline struct task_struct *get_current(void) { unsigned long sp_el0; asm ("mrs %0, sp_el0" : "=r" (sp_el0)); 184: d5384100 mrs x0, sp_el0 __READ_ONCE_SIZE; 188: b9401001 ldr w1, [x0, #16] /* * Returns true when we need to resched and can (barring IRQ state). */ static __always_inline bool should_resched(int preempt_offset) { return unlikely(preempt_count() == preempt_offset && 18c: 34000481 cbz w1, 21c <worker_enter_idle+0x124> bool managing = pool->flags & POOL_MANAGER_ACTIVE; 190: b940fe81 ldr w1, [x20, #252] int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ 194: b9411e82 ldr w2, [x20, #284] 198: 12000020 and w0, w1, #0x1 19c: 0b020000 add w0, w0, w2 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; 1a0: 7100081f cmp w0, #0x2 1a4: 5400012d b.le 1c8 <worker_enter_idle+0xd0> int nr_busy = pool->nr_workers - nr_idle; 1a8: b9411a82 ldr w2, [x20, #280] return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; 1ac: 51000803 sub w3, w0, #0x2 int nr_busy = pool->nr_workers - nr_idle; 1b0: 4b000040 sub w0, w2, w0 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; 1b4: 6b03081f cmp w0, w3, lsl #2 1b8: 5400008c b.gt 1c8 <worker_enter_idle+0xd0> * * return value: 1 if the timer is pending, 0 if not. */ static inline int timer_pending(const struct timer_list * timer) { return timer->entry.pprev != NULL; 1bc: 9104c280 add x0, x20, #0x130 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) 1c0: f9400402 ldr x2, [x0, #8] 1c4: b4000142 cbz x2, 1ec <worker_enter_idle+0xf4> WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && 1c8: 371000a1 tbnz w1, #2, 1dc <worker_enter_idle+0xe4> 1cc: b9411a81 ldr w1, [x20, #280] 1d0: b9411e80 ldr w0, [x20, #284] 1d4: 6b00003f cmp w1, w0 1d8: 54000160 b.eq 204 <worker_enter_idle+0x10c> // b.none 1dc: a94153f3 ldp x19, x20, [sp, #16] 1e0: f94013f5 ldr x21, [sp, #32] } 1e4: a8c37bfd ldp x29, x30, [sp], #48 1e8: d65f03c0 ret mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); 1ec: f94002a1 ldr x1, [x21] 1f0: 91404821 add x1, x1, #0x12, lsl #12 1f4: 9113e021 add x1, x1, #0x4f8 1f8: 94000000 bl 0 <mod_timer> 1fc: b940fe81 ldr w1, [x20, #252] 200: 17fffff2 b 1c8 <worker_enter_idle+0xd0> 204: b9458280 ldr w0, [x20, #1408] WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && 208: 34fffea0 cbz w0, 1dc <worker_enter_idle+0xe4> 20c: d4210000 brk #0x800 210: a94153f3 ldp x19, x20, [sp, #16] 214: f94013f5 ldr x21, [sp, #32] 218: 17fffff3 b 1e4 <worker_enter_idle+0xec> * @nr: bit number to test * @addr: Address to start counting from */ static inline int test_bit(int nr, const volatile unsigned long *addr) { return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); 21c: f9400001 ldr x1, [x0] 220: 721f003f tst w1, #0x2 224: 54000081 b.ne 234 <worker_enter_idle+0x13c> // b.any 228: f9400000 ldr x0, [x0] 22c: 721a001f tst w0, #0x40 230: 54fffb00 b.eq 190 <worker_enter_idle+0x98> // b.none preempt_enable(); 234: 94000000 bl 0 <preempt_schedule> 238: 17ffffd6 b 190 <worker_enter_idle+0x98> 23c: d503201f nop 0000000000000240 <destroy_worker>: { 240: a9be7bfd stp x29, x30, [sp, #-32]! lockdep_assert_held(&pool->lock); 244: 90000001 adrp x1, 0 <debug_locks> { 248: 910003fd mov x29, sp 24c: a90153f3 stp x19, x20, [sp, #16] 250: aa0003f3 mov x19, x0 lockdep_assert_held(&pool->lock); 254: b9400020 ldr w0, [x1] struct worker_pool *pool = worker->pool; 258: f9402674 ldr x20, [x19, #72] lockdep_assert_held(&pool->lock); 25c: 35000460 cbnz w0, 2e8 <destroy_worker+0xa8> if (WARN_ON(worker->current_work) || 260: f9400a60 ldr x0, [x19, #16] 264: b5000500 cbnz x0, 304 <destroy_worker+0xc4> 268: aa1303e0 mov x0, x19 26c: f8430c01 ldr x1, [x0, #48]! WARN_ON(!list_empty(&worker->scheduled)) || 270: eb01001f cmp x0, x1 274: 54000481 b.ne 304 <destroy_worker+0xc4> // b.any WARN_ON(!(worker->flags & WORKER_IDLE))) 278: b9406a60 ldr w0, [x19, #104] 27c: 36100440 tbz w0, #2, 304 <destroy_worker+0xc4> pool->nr_workers--; 280: b9411a82 ldr w2, [x20, #280] preempt_disable(); 284: 52800020 mov w0, #0x1 // #1 pool->nr_idle--; 288: b9411e81 ldr w1, [x20, #284] pool->nr_workers--; 28c: 51000442 sub w2, w2, #0x1 290: b9011a82 str w2, [x20, #280] pool->nr_idle--; 294: 51000421 sub w1, w1, #0x1 298: b9011e81 str w1, [x20, #284] preempt_disable(); 29c: 94000000 bl 0 <preempt_count_add> __list_del(entry->prev, entry->next); 2a0: a9400261 ldp x1, x0, [x19] next->prev = prev; 2a4: f9000420 str x0, [x1, #8] case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 2a8: f9000001 str x1, [x0] 2ac: f9000273 str x19, [x19] list->prev = list; 2b0: f9000673 str x19, [x19, #8] preempt_enable(); 2b4: 52800020 mov w0, #0x1 // #1 2b8: 94000000 bl 0 <preempt_count_sub> 2bc: d5384100 mrs x0, sp_el0 __READ_ONCE_SIZE; 2c0: b9401001 ldr w1, [x0, #16] 2c4: 34000281 cbz w1, 314 <destroy_worker+0xd4> worker->flags |= WORKER_DIE; 2c8: b9406a61 ldr w1, [x19, #104] wake_up_process(worker->task); 2cc: f9402260 ldr x0, [x19, #64] worker->flags |= WORKER_DIE; 2d0: 321f0021 orr w1, w1, #0x2 2d4: b9006a61 str w1, [x19, #104] wake_up_process(worker->task); 2d8: 94000000 bl 0 <wake_up_process> } 2dc: a94153f3 ldp x19, x20, [sp, #16] 2e0: a8c27bfd ldp x29, x30, [sp], #32 2e4: d65f03c0 ret */ extern int lock_is_held_type(struct lockdep_map *lock, int read); static inline int lock_is_held(struct lockdep_map *lock) { return lock_is_held_type(lock, -1); 2e8: 12800001 mov w1, #0xffffffff // #-1 2ec: 91030280 add x0, x20, #0xc0 2f0: 94000000 bl 0 <lock_is_held_type> lockdep_assert_held(&pool->lock); 2f4: 35fffb60 cbnz w0, 260 <destroy_worker+0x20> 2f8: d4210000 brk #0x800 if (WARN_ON(worker->current_work) || 2fc: f9400a60 ldr x0, [x19, #16] 300: b4fffb40 cbz x0, 268 <destroy_worker+0x28> WARN_ON(!(worker->flags & WORKER_IDLE))) 304: d4210000 brk #0x800 } 308: a94153f3 ldp x19, x20, [sp, #16] 30c: a8c27bfd ldp x29, x30, [sp], #32 310: d65f03c0 ret 314: f9400001 ldr x1, [x0] 318: 721f003f tst w1, #0x2 31c: 54000081 b.ne 32c <destroy_worker+0xec> // b.any 320: f9400000 ldr x0, [x0] 324: 721a001f tst w0, #0x40 328: 54fffd00 b.eq 2c8 <destroy_worker+0x88> // b.none preempt_enable(); 32c: 94000000 bl 0 <preempt_schedule> 330: 17ffffe6 b 2c8 <destroy_worker+0x88> 334: d503201f nop 0000000000000338 <idle_worker_timeout>: { 338: a9bd7bfd stp x29, x30, [sp, #-48]! 33c: 910003fd mov x29, sp 340: a90153f3 stp x19, x20, [sp, #16] 344: aa0003f3 mov x19, x0 spin_lock_irq(&pool->lock); 348: 94000000 bl 0 <rt_spin_lock> int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ 34c: b940fe60 ldr w0, [x19, #252] 350: b9411e61 ldr w1, [x19, #284] 354: 12000000 and w0, w0, #0x1 358: 0b010000 add w0, w0, w1 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; 35c: 7100081f cmp w0, #0x2 360: 5400048d b.le 3f0 <idle_worker_timeout+0xb8> 364: f90013f5 str x21, [sp, #32] 368: 51000802 sub w2, w0, #0x2 int nr_busy = pool->nr_workers - nr_idle; 36c: b9411a61 ldr w1, [x19, #280] 370: 4b000020 sub w0, w1, w0 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; 374: 6b02081f cmp w0, w2, lsl #2 378: 540003ac b.gt 3ec <idle_worker_timeout+0xb4> worker = list_entry(pool->idle_list.prev, struct worker, entry); 37c: f9409660 ldr x0, [x19, #296] if (time_before(jiffies, expires)) { 380: 90000014 adrp x20, 0 <jiffies> expires = worker->last_active + IDLE_WORKER_TIMEOUT; 384: d2849f02 mov x2, #0x24f8 // #9464 if (time_before(jiffies, expires)) { 388: f9400283 ldr x3, [x20] expires = worker->last_active + IDLE_WORKER_TIMEOUT; 38c: f2a00022 movk x2, #0x1, lsl #16 390: f9403001 ldr x1, [x0, #96] 394: aa0203f5 mov x21, x2 398: 8b020021 add x1, x1, x2 if (time_before(jiffies, expires)) { 39c: eb01007f cmp x3, x1 3a0: 54000185 b.pl 3d0 <idle_worker_timeout+0x98> // b.nfrst 3a4: 14000018 b 404 <idle_worker_timeout+0xcc> int nr_busy = pool->nr_workers - nr_idle; 3a8: b9411a61 ldr w1, [x19, #280] 3ac: 4b000020 sub w0, w1, w0 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; 3b0: 6b02081f cmp w0, w2, lsl #2 3b4: 540001cc b.gt 3ec <idle_worker_timeout+0xb4> worker = list_entry(pool->idle_list.prev, struct worker, entry); 3b8: f9409660 ldr x0, [x19, #296] if (time_before(jiffies, expires)) { 3bc: f9400282 ldr x2, [x20] expires = worker->last_active + IDLE_WORKER_TIMEOUT; 3c0: f9403001 ldr x1, [x0, #96] 3c4: 8b150021 add x1, x1, x21 if (time_before(jiffies, expires)) { 3c8: eb01005f cmp x2, x1 3cc: 540001c4 b.mi 404 <idle_worker_timeout+0xcc> // b.first destroy_worker(worker); 3d0: 97ffff9c bl 240 <destroy_worker> int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ 3d4: b940fe60 ldr w0, [x19, #252] 3d8: b9411e61 ldr w1, [x19, #284] 3dc: 12000000 and w0, w0, #0x1 3e0: 0b010000 add w0, w0, w1 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; 3e4: 71000802 subs w2, w0, #0x2 3e8: 54fffe0c b.gt 3a8 <idle_worker_timeout+0x70> 3ec: f94013f5 ldr x21, [sp, #32] spin_unlock_irq(&pool->lock); 3f0: aa1303e0 mov x0, x19 3f4: 94000000 bl 0 <rt_spin_unlock> } 3f8: a94153f3 ldp x19, x20, [sp, #16] 3fc: a8c37bfd ldp x29, x30, [sp], #48 400: d65f03c0 ret mod_timer(&pool->idle_timer, expires); 404: 9104c260 add x0, x19, #0x130 408: 94000000 bl 0 <mod_timer> spin_unlock_irq(&pool->lock); 40c: aa1303e0 mov x0, x19 break; 410: f94013f5 ldr x21, [sp, #32] spin_unlock_irq(&pool->lock); 414: 94000000 bl 0 <rt_spin_unlock> } 418: a94153f3 ldp x19, x20, [sp, #16] 41c: a8c37bfd ldp x29, x30, [sp], #48 420: d65f03c0 ret 424: d503201f nop 0000000000000428 <wake_up_worker>: { 428: a9be7bfd stp x29, x30, [sp, #-32]! 42c: 910003fd mov x29, sp 430: f9000bf3 str x19, [sp, #16] 434: aa0003f3 mov x19, x0 preempt_disable(); 438: 52800020 mov w0, #0x1 // #1 43c: 94000000 bl 0 <preempt_count_add> 440: f9409260 ldr x0, [x19, #288] if (unlikely(list_empty(&pool->idle_list))) 444: 91048261 add x1, x19, #0x120 448: eb00003f cmp x1, x0 44c: 540000a0 b.eq 460 <wake_up_worker+0x38> // b.none return list_first_entry(&pool->idle_list, struct worker, entry); 450: f9409260 ldr x0, [x19, #288] if (likely(worker)) 454: b4000060 cbz x0, 460 <wake_up_worker+0x38> wake_up_process(worker->task); 458: f9402000 ldr x0, [x0, #64] 45c: 94000000 bl 0 <wake_up_process> preempt_enable(); 460: 52800020 mov w0, #0x1 // #1 464: 94000000 bl 0 <preempt_count_sub> 468: d5384100 mrs x0, sp_el0 46c: b9401001 ldr w1, [x0, #16] 470: 34000081 cbz w1, 480 <wake_up_worker+0x58> } 474: f9400bf3 ldr x19, [sp, #16] 478: a8c27bfd ldp x29, x30, [sp], #32 47c: d65f03c0 ret 480: f9400001 ldr x1, [x0] 484: 721f003f tst w1, #0x2 488: 54000081 b.ne 498 <wake_up_worker+0x70> // b.any 48c: f9400000 ldr x0, [x0] 490: 721a001f tst w0, #0x40 494: 54ffff00 b.eq 474 <wake_up_worker+0x4c> // b.none preempt_enable(); 498: 94000000 bl 0 <preempt_schedule> 49c: 17fffff6 b 474 <wake_up_worker+0x4c> 00000000000004a0 <pwq_adjust_max_active>: { 4a0: a9bd7bfd stp x29, x30, [sp, #-48]! 4a4: 910003fd mov x29, sp 4a8: a90153f3 stp x19, x20, [sp, #16] 4ac: aa0003f3 mov x19, x0 lockdep_assert_held(&wq->mutex); 4b0: 90000000 adrp x0, 0 <debug_locks> { 4b4: f90013f5 str x21, [sp, #32] lockdep_assert_held(&wq->mutex); 4b8: b9400000 ldr w0, [x0] struct workqueue_struct *wq = pwq->wq; 4bc: f9400675 ldr x21, [x19, #8] bool freezable = wq->flags & WQ_FREEZABLE; 4c0: b94202b4 ldr w20, [x21, #512] 4c4: 121e0294 and w20, w20, #0x4 lockdep_assert_held(&wq->mutex); 4c8: 35000460 cbnz w0, 554 <pwq_adjust_max_active+0xb4> if (!freezable && pwq->max_active == wq->saved_max_active) 4cc: 35000534 cbnz w20, 570 <pwq_adjust_max_active+0xd0> 4d0: b9405e61 ldr w1, [x19, #92] 4d4: b9415ea0 ldr w0, [x21, #348] 4d8: 6b00003f cmp w1, w0 4dc: 54000340 b.eq 544 <pwq_adjust_max_active+0xa4> // b.none spin_lock_irqsave(&pwq->pool->lock, flags); 4e0: f9400260 ldr x0, [x19] 4e4: 94000000 bl 0 <rt_spin_lock> 4e8: f9403261 ldr x1, [x19, #96] while (!list_empty(&pwq->delayed_works) && 4ec: 91018269 add x9, x19, #0x60 pwq->max_active = wq->saved_max_active; 4f0: b9415ea0 ldr w0, [x21, #348] 4f4: b9005e60 str w0, [x19, #92] while (!list_empty(&pwq->delayed_works) && 4f8: eb01013f cmp x9, x1 4fc: 540001c0 b.eq 534 <pwq_adjust_max_active+0x94> // b.none 500: b9405a61 ldr w1, [x19, #88] 504: 6b00003f cmp w1, w0 508: 540000ab b.lt 51c <pwq_adjust_max_active+0x7c> // b.tstop 50c: 1400000a b 534 <pwq_adjust_max_active+0x94> 510: 294b0261 ldp w1, w0, [x19, #88] 514: 6b00003f cmp w1, w0 518: 540000ea b.ge 534 <pwq_adjust_max_active+0x94> // b.tcont struct work_struct *work = list_first_entry(&pwq->delayed_works, 51c: f9403260 ldr x0, [x19, #96] pwq_activate_delayed_work(work); 520: d1002000 sub x0, x0, #0x8 524: 97fffeb7 bl 0 <pwq_activate_delayed_work> 528: f9403260 ldr x0, [x19, #96] while (!list_empty(&pwq->delayed_works) && 52c: eb00013f cmp x9, x0 530: 54ffff01 b.ne 510 <pwq_adjust_max_active+0x70> // b.any wake_up_worker(pwq->pool); 534: f9400260 ldr x0, [x19] 538: 97ffffbc bl 428 <wake_up_worker> spin_unlock_irqrestore(&pwq->pool->lock, flags); 53c: f9400260 ldr x0, [x19] 540: 94000000 bl 0 <rt_spin_unlock> } 544: a94153f3 ldp x19, x20, [sp, #16] 548: f94013f5 ldr x21, [sp, #32] 54c: a8c37bfd ldp x29, x30, [sp], #48 550: d65f03c0 ret 554: 12800001 mov w1, #0xffffffff // #-1 558: 910362a0 add x0, x21, #0xd8 55c: 94000000 bl 0 <lock_is_held_type> lockdep_assert_held(&wq->mutex); 560: 35fffb60 cbnz w0, 4cc <pwq_adjust_max_active+0x2c> 564: d4210000 brk #0x800 if (!freezable && pwq->max_active == wq->saved_max_active) 568: 34fffb54 cbz w20, 4d0 <pwq_adjust_max_active+0x30> 56c: d503201f nop spin_lock_irqsave(&pwq->pool->lock, flags); 570: f9400260 ldr x0, [x19] 574: 94000000 bl 0 <rt_spin_lock> if (!freezable || !workqueue_freezing) { 578: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 57c: 39400000 ldrb w0, [x0] 580: 34fffb40 cbz w0, 4e8 <pwq_adjust_max_active+0x48> pwq->max_active = 0; 584: b9005e7f str wzr, [x19, #92] 588: 17ffffed b 53c <pwq_adjust_max_active+0x9c> 58c: d503201f nop 0000000000000590 <link_pwq>: { 590: a9bd7bfd stp x29, x30, [sp, #-48]! lockdep_assert_held(&wq->mutex); 594: 90000001 adrp x1, 0 <debug_locks> { 598: 910003fd mov x29, sp 59c: a90153f3 stp x19, x20, [sp, #16] 5a0: aa0003f3 mov x19, x0 lockdep_assert_held(&wq->mutex); 5a4: b9400020 ldr w0, [x1] { 5a8: f90013f5 str x21, [sp, #32] struct workqueue_struct *wq = pwq->wq; 5ac: f9400675 ldr x21, [x19, #8] lockdep_assert_held(&wq->mutex); 5b0: 35000120 cbnz w0, 5d4 <link_pwq+0x44> 5b4: f9403a60 ldr x0, [x19, #112] if (!list_empty(&pwq->pwqs_node)) 5b8: 9101c274 add x20, x19, #0x70 5bc: eb00029f cmp x20, x0 5c0: 540001c0 b.eq 5f8 <link_pwq+0x68> // b.none } 5c4: a94153f3 ldp x19, x20, [sp, #16] 5c8: f94013f5 ldr x21, [sp, #32] 5cc: a8c37bfd ldp x29, x30, [sp], #48 5d0: d65f03c0 ret 5d4: 12800001 mov w1, #0xffffffff // #-1 5d8: 910362a0 add x0, x21, #0xd8 5dc: 94000000 bl 0 <lock_is_held_type> lockdep_assert_held(&wq->mutex); 5e0: 35fffea0 cbnz w0, 5b4 <link_pwq+0x24> 5e4: d4210000 brk #0x800 5e8: f9403a60 ldr x0, [x19, #112] if (!list_empty(&pwq->pwqs_node)) 5ec: 9101c274 add x20, x19, #0x70 5f0: eb00029f cmp x20, x0 5f4: 54fffe81 b.ne 5c4 <link_pwq+0x34> // b.any pwq->work_color = wq->work_color; 5f8: b9410aa1 ldr w1, [x21, #264] pwq_adjust_max_active(pwq); 5fc: aa1303e0 mov x0, x19 pwq->work_color = wq->work_color; 600: b9001261 str w1, [x19, #16] pwq_adjust_max_active(pwq); 604: 97ffffa7 bl 4a0 <pwq_adjust_max_active> * the _rcu list-traversal primitives, such as * list_for_each_entry_rcu(). */ static inline void list_add_rcu(struct list_head *new, struct list_head *head) { __list_add_rcu(new, head, head->next); 608: f94002a0 ldr x0, [x21] new->prev = prev; 60c: a9075660 stp x0, x21, [x19, #112] rcu_assign_pointer(list_next_rcu(prev), new); 610: c89ffeb4 stlr x20, [x21] next->prev = new; 614: f9000414 str x20, [x0, #8] } 618: a94153f3 ldp x19, x20, [sp, #16] 61c: f94013f5 ldr x21, [sp, #32] 620: a8c37bfd ldp x29, x30, [sp], #48 624: d65f03c0 ret 0000000000000628 <worker_pool_assign_id>: { 628: d10103ff sub sp, sp, #0x40 lockdep_assert_held(&wq_pool_mutex); 62c: 90000001 adrp x1, 0 <debug_locks> { 630: a9017bfd stp x29, x30, [sp, #16] 634: 910043fd add x29, sp, #0x10 638: a90253f3 stp x19, x20, [sp, #32] 63c: 90000013 adrp x19, 0 <__stack_chk_guard> 640: 91000273 add x19, x19, #0x0 644: f9001bf5 str x21, [sp, #48] 648: aa0003f5 mov x21, x0 lockdep_assert_held(&wq_pool_mutex); 64c: b9400020 ldr w0, [x1] { 650: f9400261 ldr x1, [x19] 654: f90007e1 str x1, [sp, #8] 658: d2800001 mov x1, #0x0 // #0 65c: 90000014 adrp x20, 0 <pwq_activate_delayed_work> 660: 91000294 add x20, x20, #0x0 lockdep_assert_held(&wq_pool_mutex); 664: 35000320 cbnz w0, 6c8 <worker_pool_assign_id+0xa0> int ret; if (WARN_ON_ONCE(start < 0)) return -EINVAL; ret = idr_alloc_cmn(idr, ptr, &id, start, end, gfp, false); 668: 52801805 mov w5, #0xc0 // #192 66c: aa1503e1 mov x1, x21 670: 9103a280 add x0, x20, #0xe8 674: 72a02805 movk w5, #0x140, lsl #16 678: 52800006 mov w6, #0x0 // #0 67c: b2407be4 mov x4, #0x7fffffff // #2147483647 680: d2800003 mov x3, #0x0 // #0 684: 910003e2 mov x2, sp 688: 94000000 bl 0 <idr_alloc_cmn> if (ret) return ret; return id; 68c: 7100001f cmp w0, #0x0 690: b94003e1 ldr w1, [sp] 694: 1a800020 csel w0, w1, w0, eq // eq = none if (ret >= 0) { 698: 37f80060 tbnz w0, #31, 6a4 <worker_pool_assign_id+0x7c> pool->id = ret; 69c: b900faa0 str w0, [x21, #248] return 0; 6a0: 52800000 mov w0, #0x0 // #0 } 6a4: f94007e2 ldr x2, [sp, #8] 6a8: f9400261 ldr x1, [x19] 6ac: ca010041 eor x1, x2, x1 6b0: b5000181 cbnz x1, 6e0 <worker_pool_assign_id+0xb8> 6b4: a9417bfd ldp x29, x30, [sp, #16] 6b8: a94253f3 ldp x19, x20, [sp, #32] 6bc: f9401bf5 ldr x21, [sp, #48] 6c0: 910103ff add sp, sp, #0x40 6c4: d65f03c0 ret 6c8: 9102e280 add x0, x20, #0xb8 6cc: 12800001 mov w1, #0xffffffff // #-1 6d0: 94000000 bl 0 <lock_is_held_type> lockdep_assert_held(&wq_pool_mutex); 6d4: 35fffca0 cbnz w0, 668 <worker_pool_assign_id+0x40> 6d8: d4210000 brk #0x800 6dc: 17ffffe3 b 668 <worker_pool_assign_id+0x40> } 6e0: 94000000 bl 0 <__stack_chk_fail> 6e4: d503201f nop 00000000000006e8 <wq_clamp_max_active>: { 6e8: a9bd7bfd stp x29, x30, [sp, #-48]! int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE; 6ec: 721f003f tst w1, #0x2 { 6f0: 910003fd mov x29, sp 6f4: a90153f3 stp x19, x20, [sp, #16] 6f8: 2a0003f3 mov w19, w0 6fc: 52804014 mov w20, #0x200 // #512 700: f90013f5 str x21, [sp, #32] 704: aa0203f5 mov x21, x2 int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE; 708: 54000100 b.eq 728 <wq_clamp_max_active+0x40> // b.none } static __always_inline int bitmap_weight(const unsigned long *src, unsigned int nbits) { if (small_const_nbits(nbits)) return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits)); 70c: 90000000 adrp x0, 0 <__cpu_possible_mask> return __sw_hweight8(w); } static inline unsigned long __arch_hweight64(__u64 w) { return __sw_hweight64(w); 710: f9400000 ldr x0, [x0] 714: 94000000 bl 0 <__sw_hweight64> 718: 531e7414 lsl w20, w0, #2 71c: 7108029f cmp w20, #0x200 720: 52804000 mov w0, #0x200 // #512 724: 1a80a294 csel w20, w20, w0, ge // ge = tcont if (max_active < 1 || max_active > lim) 728: 7100027f cmp w19, #0x0 72c: 7a53c281 ccmp w20, w19, #0x1, gt 730: 5400010a b.ge 750 <wq_clamp_max_active+0x68> // b.tcont pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n", 734: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 738: aa1503e2 mov x2, x21 73c: 2a1403e4 mov w4, w20 740: 91000000 add x0, x0, #0x0 744: 52800023 mov w3, #0x1 // #1 748: 2a1303e1 mov w1, w19 74c: 94000000 bl 0 <printk> return clamp_val(max_active, 1, lim); 750: 7100027f cmp w19, #0x0 754: 1a9fc660 csinc w0, w19, wzr, gt 758: 6b14001f cmp w0, w20 } 75c: 1a94d000 csel w0, w0, w20, le 760: a94153f3 ldp x19, x20, [sp, #16] 764: f94013f5 ldr x21, [sp, #32] 768: a8c37bfd ldp x29, x30, [sp], #48 76c: d65f03c0 ret 0000000000000770 <init_worker_pool>: { 770: a9bd7bfd stp x29, x30, [sp, #-48]! 774: 910003fd mov x29, sp 778: a90153f3 stp x19, x20, [sp, #16] spin_lock_init(&pool->lock); 77c: 90000014 adrp x20, 0 <pwq_activate_delayed_work> 780: 91000294 add x20, x20, #0x0 { 784: a9025bf5 stp x21, x22, [sp, #32] spin_lock_init(&pool->lock); 788: 90000016 adrp x22, 0 <pwq_activate_delayed_work> 78c: 910002d6 add x22, x22, #0x0 { 790: aa0003f3 mov x19, x0 spin_lock_init(&pool->lock); 794: 91002282 add x2, x20, #0x8 798: aa1603e1 mov x1, x22 79c: 94000000 bl 0 <__rt_mutex_init> 7a0: 91004282 add x2, x20, #0x10 7a4: aa1303e0 mov x0, x19 7a8: 90000001 adrp x1, 0 <pwq_activate_delayed_work> 7ac: 91000021 add x1, x1, #0x0 7b0: 94000000 bl 0 <__rt_spin_lock_init> pool->watchdog_ts = jiffies; 7b4: 90000000 adrp x0, 0 <jiffies> pool->flags |= POOL_DISASSOCIATED; 7b8: b940fe62 ldr w2, [x19, #252] INIT_LIST_HEAD(&pool->worklist); 7bc: 91042264 add x4, x19, #0x108 7c0: 910fc261 add x1, x19, #0x3f0 pool->watchdog_ts = jiffies; 7c4: f9400006 ldr x6, [x0] 7c8: 9107c260 add x0, x19, #0x1f0 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 7cc: f9008664 str x4, [x19, #264] INIT_LIST_HEAD(&pool->idle_list); 7d0: 91048263 add x3, x19, #0x120 pool->cpu = -1; 7d4: 92800005 mov x5, #0xffffffffffffffff // #-1 pool->flags |= POOL_DISASSOCIATED; 7d8: 321e0042 orr w2, w2, #0x4 pool->cpu = -1; 7dc: f9007a65 str x5, [x19, #240] pool->flags |= POOL_DISASSOCIATED; 7e0: 291f0a65 stp w5, w2, [x19, #248] pool->watchdog_ts = jiffies; 7e4: f9008266 str x6, [x19, #256] 7e8: f9008a64 str x4, [x19, #272] 7ec: f9009263 str x3, [x19, #288] 7f0: f9009663 str x3, [x19, #296] /* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */ #define hash_min(val, bits) \ (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits)) static inline void __hash_init(struct hlist_head *ht, unsigned int sz) 7f4: d503201f nop { unsigned int i; for (i = 0; i < sz; i++) INIT_HLIST_HEAD(&ht[i]); 7f8: f800841f str xzr, [x0], #8 for (i = 0; i < sz; i++) 7fc: eb00003f cmp x1, x0 800: 54ffffc1 b.ne 7f8 <init_worker_pool+0x88> // b.any setup_deferrable_timer(&pool->idle_timer, idle_worker_timeout, 804: 91006283 add x3, x20, #0x18 808: 52a00101 mov w1, #0x80000 // #524288 80c: 9104c260 add x0, x19, #0x130 810: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 814: 91000042 add x2, x2, #0x0 818: 94000000 bl 0 <init_timer_key> 81c: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 820: 91000000 add x0, x0, #0x0 824: a914ce60 stp x0, x19, [x19, #328] setup_timer(&pool->mayday_timer, pool_mayday_timeout, 828: 91008283 add x3, x20, #0x20 82c: 52800001 mov w1, #0x0 // #0 830: 91064260 add x0, x19, #0x190 834: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 838: 91000042 add x2, x2, #0x0 83c: 94000000 bl 0 <init_timer_key> 840: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 844: 91000000 add x0, x0, #0x0 848: a91ace60 stp x0, x19, [x19, #424] mutex_init(&pool->attach_mutex); 84c: 910fe275 add x21, x19, #0x3f8 850: aa1603e1 mov x1, x22 854: 9100a282 add x2, x20, #0x28 858: aa1503e0 mov x0, x21 85c: 94000000 bl 0 <__rt_mutex_init> 860: aa1503e0 mov x0, x21 864: 90000001 adrp x1, 0 <pwq_activate_delayed_work> 868: 9100c282 add x2, x20, #0x30 86c: 91000021 add x1, x1, #0x0 870: 94000000 bl 0 <__mutex_do_init> gfp_t gfp_mask); void ida_simple_remove(struct ida *ida, unsigned int id); static inline void ida_init(struct ida *ida) { INIT_RADIX_TREE(&ida->ida_rt, IDR_RT_MARKER | GFP_NOWAIT); 874: f902827f str xzr, [x19, #1280] index = kmalloc_index(size); if (!index) return ZERO_SIZE_PTR; return kmem_cache_alloc_trace( 878: 90000000 adrp x0, 0 <kmalloc_caches> INIT_LIST_HEAD(&pool->workers); 87c: 91138263 add x3, x19, #0x4e0 880: 52a1a004 mov w4, #0xd000000 // #218103808 884: f9027263 str x3, [x19, #1248] void *ret = kmem_cache_alloc(s, flags); 888: f9400000 ldr x0, [x0] 88c: f9027663 str x3, [x19, #1256] pool->refcnt = 1; 890: 52800023 mov w3, #0x1 // #1 894: b904fa64 str w4, [x19, #1272] #define HLIST_HEAD_INIT { .first = NULL } #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) static inline void INIT_HLIST_NODE(struct hlist_node *h) { h->next = NULL; 898: f9028a7f str xzr, [x19, #1296] 89c: 52901801 mov w1, #0x80c0 // #32960 h->pprev = NULL; 8a0: f9028e7f str xzr, [x19, #1304] 8a4: 72a02801 movk w1, #0x140, lsl #16 8a8: b9052263 str w3, [x19, #1312] 8ac: 94000000 bl 0 <kmem_cache_alloc> if (!attrs) 8b0: b4000160 cbz x0, 8dc <init_worker_pool+0x16c> *dst = *src; 8b4: 90000002 adrp x2, 0 <__cpu_possible_mask> return 0; 8b8: 52800001 mov w1, #0x0 // #0 pool->attrs = alloc_workqueue_attrs(GFP_KERNEL); 8bc: f9028660 str x0, [x19, #1288] 8c0: f9400042 ldr x2, [x2] 8c4: f9000402 str x2, [x0, #8] } 8c8: 2a0103e0 mov w0, w1 8cc: a94153f3 ldp x19, x20, [sp, #16] 8d0: a9425bf5 ldp x21, x22, [sp, #32] 8d4: a8c37bfd ldp x29, x30, [sp], #48 8d8: d65f03c0 ret return -ENOMEM; 8dc: 12800161 mov w1, #0xfffffff4 // #-12 pool->attrs = alloc_workqueue_attrs(GFP_KERNEL); 8e0: f902867f str xzr, [x19, #1288] return -ENOMEM; 8e4: 17fffff9 b 8c8 <init_worker_pool+0x158> 00000000000008e8 <wq_sysfs_prep_attrs>: return written; } /* prepare workqueue_attrs for sysfs store operations */ static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq) { 8e8: a9be7bfd stp x29, x30, [sp, #-32]! struct workqueue_attrs *attrs; lockdep_assert_held(&wq_pool_mutex); 8ec: 90000001 adrp x1, 0 <debug_locks> { 8f0: 910003fd mov x29, sp 8f4: f9000bf3 str x19, [sp, #16] 8f8: aa0003f3 mov x19, x0 lockdep_assert_held(&wq_pool_mutex); 8fc: b9400020 ldr w0, [x1] 900: 35000280 cbnz w0, 950 <wq_sysfs_prep_attrs+0x68> return kmem_cache_alloc_trace( 904: 90000000 adrp x0, 0 <kmalloc_caches> void *ret = kmem_cache_alloc(s, flags); 908: 52901801 mov w1, #0x80c0 // #32960 90c: 72a02801 movk w1, #0x140, lsl #16 910: f9400000 ldr x0, [x0] 914: 94000000 bl 0 <kmem_cache_alloc> if (!attrs) 918: b4000160 cbz x0, 944 <wq_sysfs_prep_attrs+0x5c> 91c: 90000002 adrp x2, 0 <__cpu_possible_mask> attrs = alloc_workqueue_attrs(GFP_KERNEL); if (!attrs) return NULL; copy_workqueue_attrs(attrs, wq->unbound_attrs); 920: f940b261 ldr x1, [x19, #352] 924: f9400042 ldr x2, [x2] 928: f9000402 str x2, [x0, #8] to->nice = from->nice; 92c: b9400022 ldr w2, [x1] 930: b9000002 str w2, [x0] 934: f9400422 ldr x2, [x1, #8] 938: f9000402 str x2, [x0, #8] to->no_numa = from->no_numa; 93c: 39404021 ldrb w1, [x1, #16] 940: 39004001 strb w1, [x0, #16] return attrs; } 944: f9400bf3 ldr x19, [sp, #16] 948: a8c27bfd ldp x29, x30, [sp], #32 94c: d65f03c0 ret 950: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 954: 91000000 add x0, x0, #0x0 958: 9102e000 add x0, x0, #0xb8 95c: 12800001 mov w1, #0xffffffff // #-1 960: 94000000 bl 0 <lock_is_held_type> lockdep_assert_held(&wq_pool_mutex); 964: 35fffd00 cbnz w0, 904 <wq_sysfs_prep_attrs+0x1c> 968: d4210000 brk #0x800 96c: 17ffffe6 b 904 <wq_sysfs_prep_attrs+0x1c> 0000000000000970 <workqueue_set_max_active>: { 970: a9bc7bfd stp x29, x30, [sp, #-64]! 974: 910003fd mov x29, sp if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) 978: b9420002 ldr w2, [x0, #512] 97c: 37980802 tbnz w2, #19, a7c <workqueue_set_max_active+0x10c> max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); 980: 2a0103e3 mov w3, w1 984: 2a0203e1 mov w1, w2 988: 9106a002 add x2, x0, #0x1a8 98c: a9025bf5 stp x21, x22, [sp, #32] 990: aa0003f5 mov x21, x0 mutex_lock(&wq->mutex); 994: 910082b6 add x22, x21, #0x20 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); 998: 2a0303e0 mov w0, w3 99c: a90153f3 stp x19, x20, [sp, #16] 9a0: 97ffff52 bl 6e8 <wq_clamp_max_active> 9a4: 2a0003f4 mov w20, w0 mutex_lock(&wq->mutex); 9a8: aa1603e0 mov x0, x22 9ac: 94000000 bl 0 <_mutex_lock> wq->flags &= ~__WQ_ORDERED; 9b0: b94202a1 ldr w1, [x21, #512] __READ_ONCE_SIZE; 9b4: f94002b3 ldr x19, [x21] 9b8: 120e7821 and w1, w1, #0xfffdffff wq->saved_max_active = max_active; 9bc: b9015eb4 str w20, [x21, #348] wq->flags &= ~__WQ_ORDERED; 9c0: b90202a1 str w1, [x21, #512] for_each_pwq(pwq, wq) 9c4: eb1302bf cmp x21, x19 9c8: d101c273 sub x19, x19, #0x70 9cc: 540004c0 b.eq a64 <workqueue_set_max_active+0xf4> // b.none 9d0: 90000014 adrp x20, 0 <pwq_activate_delayed_work> 9d4: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 9d8: 91000294 add x20, x20, #0x0 9dc: a90363f7 stp x23, x24, [sp, #48] 9e0: 91000017 add x23, x0, #0x0 9e4: 910362b8 add x24, x21, #0xd8 9e8: 14000007 b a04 <workqueue_set_max_active+0x94> pwq_adjust_max_active(pwq); 9ec: aa1303e0 mov x0, x19 9f0: 97fffeac bl 4a0 <pwq_adjust_max_active> 9f4: f9403a73 ldr x19, [x19, #112] for_each_pwq(pwq, wq) 9f8: eb1302bf cmp x21, x19 9fc: d101c273 sub x19, x19, #0x70 a00: 54000300 b.eq a60 <workqueue_set_max_active+0xf0> // b.none a04: 94000000 bl 0 <debug_lockdep_rcu_enabled> a08: 34ffff20 cbz w0, 9ec <workqueue_set_max_active+0x7c> a0c: 39400281 ldrb w1, [x20] a10: 35fffee1 cbnz w1, 9ec <workqueue_set_max_active+0x7c> a14: 94000000 bl 0 <rcu_read_lock_held> a18: 35fffea0 cbnz w0, 9ec <workqueue_set_max_active+0x7c> a1c: 12800001 mov w1, #0xffffffff // #-1 a20: aa1803e0 mov x0, x24 a24: 94000000 bl 0 <lock_is_held_type> a28: 35fffe20 cbnz w0, 9ec <workqueue_set_max_active+0x7c> a2c: 52800023 mov w3, #0x1 // #1 a30: aa1703e2 mov x2, x23 a34: 528215c1 mov w1, #0x10ae // #4270 a38: 90000000 adrp x0, 0 <pwq_activate_delayed_work> a3c: 91000000 add x0, x0, #0x0 a40: 39000283 strb w3, [x20] a44: 94000000 bl 0 <lockdep_rcu_suspicious> pwq_adjust_max_active(pwq); a48: aa1303e0 mov x0, x19 a4c: 97fffe95 bl 4a0 <pwq_adjust_max_active> a50: f9403a73 ldr x19, [x19, #112] for_each_pwq(pwq, wq) a54: eb1302bf cmp x21, x19 a58: d101c273 sub x19, x19, #0x70 a5c: 54fffd41 b.ne a04 <workqueue_set_max_active+0x94> // b.any a60: a94363f7 ldp x23, x24, [sp, #48] mutex_unlock(&wq->mutex); a64: aa1603e0 mov x0, x22 a68: 94000000 bl 0 <_mutex_unlock> a6c: a94153f3 ldp x19, x20, [sp, #16] a70: a9425bf5 ldp x21, x22, [sp, #32] } a74: a8c47bfd ldp x29, x30, [sp], #64 a78: d65f03c0 ret if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) a7c: d4210000 brk #0x800 } a80: a8c47bfd ldp x29, x30, [sp], #64 a84: d65f03c0 ret 0000000000000a88 <wq_unbind_fn>: { a88: a9ba7bfd stp x29, x30, [sp, #-96]! a8c: 910003fd mov x29, sp a90: a90153f3 stp x19, x20, [sp, #16] a94: a90363f7 stp x23, x24, [sp, #48] for_each_cpu_worker_pool(pool, cpu) { a98: 90000017 adrp x23, 0 <__per_cpu_offset> a9c: 910002f7 add x23, x23, #0x0 { aa0: a9046bf9 stp x25, x26, [sp, #64] int cpu = smp_processor_id(); aa4: 94000000 bl 0 <debug_smp_processor_id> for_each_cpu_worker_pool(pool, cpu) { aa8: 93407c19 sxtw x25, w0 aac: 90000018 adrp x24, 0 <pwq_activate_delayed_work> ab0: 91000318 add x24, x24, #0x0 ab4: aa1803e0 mov x0, x24 ab8: f8797af4 ldr x20, [x23, x25, lsl #3] abc: 8b000294 add x20, x20, x0 ac0: 91300280 add x0, x20, #0xc00 ac4: eb00029f cmp x20, x0 ac8: 54000842 b.cs bd0 <wq_unbind_fn+0x148> // b.hs, b.nlast acc: 9113829a add x26, x20, #0x4e0 ad0: a9025bf5 stp x21, x22, [sp, #32] ad4: 9112c295 add x21, x20, #0x4b0 ad8: f9002bfb str x27, [sp, #80] adc: 9000001b adrp x27, 0 <debug_locks> ae0: 9100037b add x27, x27, #0x0 ae4: d503201f nop mutex_lock(&pool->attach_mutex); ae8: 910fe296 add x22, x20, #0x3f8 aec: aa1603e0 mov x0, x22 af0: 94000000 bl 0 <_mutex_lock> spin_lock_irq(&pool->lock); af4: aa1403e0 mov x0, x20 af8: 94000000 bl 0 <rt_spin_lock> for_each_pool_worker(worker, pool) afc: f9427293 ldr x19, [x20, #1248] b00: eb13035f cmp x26, x19 b04: d1014273 sub x19, x19, #0x50 b08: 54000121 b.ne b2c <wq_unbind_fn+0xa4> // b.any b0c: 14000017 b b68 <wq_unbind_fn+0xe0> worker->flags |= WORKER_UNBOUND; b10: b9406a60 ldr w0, [x19, #104] for_each_pool_worker(worker, pool) b14: f9402a61 ldr x1, [x19, #80] worker->flags |= WORKER_UNBOUND; b18: 32190000 orr w0, w0, #0x80 b1c: b9006a60 str w0, [x19, #104] for_each_pool_worker(worker, pool) b20: eb01035f cmp x26, x1 b24: d1014033 sub x19, x1, #0x50 b28: 54000200 b.eq b68 <wq_unbind_fn+0xe0> // b.none b2c: b9400360 ldr w0, [x27] b30: 34ffff00 cbz w0, b10 <wq_unbind_fn+0x88> b34: 12800001 mov w1, #0xffffffff // #-1 b38: aa1503e0 mov x0, x21 b3c: 94000000 bl 0 <lock_is_held_type> b40: 35fffe80 cbnz w0, b10 <wq_unbind_fn+0x88> b44: d4210000 brk #0x800 worker->flags |= WORKER_UNBOUND; b48: b9406a60 ldr w0, [x19, #104] for_each_pool_worker(worker, pool) b4c: f9402a61 ldr x1, [x19, #80] worker->flags |= WORKER_UNBOUND; b50: 32190000 orr w0, w0, #0x80 b54: b9006a60 str w0, [x19, #104] for_each_pool_worker(worker, pool) b58: eb01035f cmp x26, x1 b5c: d1014033 sub x19, x1, #0x50 b60: 54fffe61 b.ne b2c <wq_unbind_fn+0xa4> // b.any b64: d503201f nop pool->flags |= POOL_DISASSOCIATED; b68: b940fe81 ldr w1, [x20, #252] spin_unlock_irq(&pool->lock); b6c: aa1403e0 mov x0, x20 b70: 9118035a add x26, x26, #0x600 b74: 911802b5 add x21, x21, #0x600 pool->flags |= POOL_DISASSOCIATED; b78: 321e0021 orr w1, w1, #0x4 b7c: b900fe81 str w1, [x20, #252] spin_unlock_irq(&pool->lock); b80: 94000000 bl 0 <rt_spin_unlock> mutex_unlock(&pool->attach_mutex); b84: aa1603e0 mov x0, x22 b88: 94000000 bl 0 <_mutex_unlock> schedule(); b8c: 94000000 bl 0 <schedule> case 4: *(volatile __u32 *)p = *(__u32 *)res; break; b90: b905829f str wzr, [x20, #1408] spin_lock_irq(&pool->lock); b94: aa1403e0 mov x0, x20 b98: 94000000 bl 0 <rt_spin_lock> wake_up_worker(pool); b9c: aa1403e0 mov x0, x20 ba0: 97fffe22 bl 428 <wake_up_worker> spin_unlock_irq(&pool->lock); ba4: aa1403e0 mov x0, x20 ba8: 94000000 bl 0 <rt_spin_unlock> for_each_cpu_worker_pool(pool, cpu) { bac: f8797ae1 ldr x1, [x23, x25, lsl #3] bb0: aa1803e0 mov x0, x24 bb4: 8b010000 add x0, x0, x1 bb8: 91180294 add x20, x20, #0x600 bbc: 91300000 add x0, x0, #0xc00 bc0: eb00029f cmp x20, x0 bc4: 54fff923 b.cc ae8 <wq_unbind_fn+0x60> // b.lo, b.ul, b.last bc8: a9425bf5 ldp x21, x22, [sp, #32] bcc: f9402bfb ldr x27, [sp, #80] } bd0: a94153f3 ldp x19, x20, [sp, #16] bd4: a94363f7 ldp x23, x24, [sp, #48] bd8: a9446bf9 ldp x25, x26, [sp, #64] bdc: a8c67bfd ldp x29, x30, [sp], #96 be0: d65f03c0 ret be4: d503201f nop 0000000000000be8 <wq_device_release>: return device_create_file(wq_subsys.dev_root, &wq_sysfs_cpumask_attr); } core_initcall(wq_sysfs_init); static void wq_device_release(struct device *dev) { be8: a9bf7bfd stp x29, x30, [sp, #-16]! struct wq_device *wq_dev = container_of(dev, struct wq_device, dev); kfree(wq_dev); bec: d1002000 sub x0, x0, #0x8 { bf0: 910003fd mov x29, sp kfree(wq_dev); bf4: 94000000 bl 0 <kfree> } bf8: a8c17bfd ldp x29, x30, [sp], #16 bfc: d65f03c0 ret 0000000000000c00 <rcu_free_pool>: { c00: a9be7bfd stp x29, x30, [sp, #-32]! c04: 910003fd mov x29, sp c08: a90153f3 stp x19, x20, [sp, #16] c0c: aa0003f4 mov x20, x0 struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu); c10: d1162013 sub x19, x0, #0x588 ida_destroy(&pool->worker_ida); c14: 9113e260 add x0, x19, #0x4f8 c18: 94000000 bl 0 <ida_destroy> free_workqueue_attrs(pool->attrs); c1c: f8580280 ldur x0, [x20, #-128] if (attrs) { c20: b4000040 cbz x0, c28 <rcu_free_pool+0x28> kfree(attrs); c24: 94000000 bl 0 <kfree> kfree(pool); c28: aa1303e0 mov x0, x19 c2c: 94000000 bl 0 <kfree> } c30: a94153f3 ldp x19, x20, [sp, #16] c34: a8c27bfd ldp x29, x30, [sp], #32 c38: d65f03c0 ret c3c: d503201f nop 0000000000000c40 <put_unbound_pool>: { c40: d10303ff sub sp, sp, #0xc0 * structure. */ static inline void __init_completion(struct completion *x) { x->done = 0; init_swait_queue_head(&x->wait); c44: 90000002 adrp x2, 0 <pwq_activate_delayed_work> c48: 91000042 add x2, x2, #0x0 c4c: 90000001 adrp x1, 0 <pwq_activate_delayed_work> c50: 9100e042 add x2, x2, #0x38 c54: 91000021 add x1, x1, #0x0 c58: a9097bfd stp x29, x30, [sp, #144] c5c: 910243fd add x29, sp, #0x90 c60: a90a53f3 stp x19, x20, [sp, #160] c64: 90000014 adrp x20, 0 <__stack_chk_guard> c68: 91000294 add x20, x20, #0x0 c6c: aa0003f3 mov x19, x0 c70: f9400280 ldr x0, [x20] c74: f90047e0 str x0, [sp, #136] c78: d2800000 mov x0, #0x0 // #0 c7c: 9100c3e0 add x0, sp, #0x30 x->done = 0; c80: b9002bff str wzr, [sp, #40] init_swait_queue_head(&x->wait); c84: 94000000 bl 0 <__init_swait_queue_head> lockdep_assert_held(&wq_pool_mutex); c88: 90000000 adrp x0, 0 <debug_locks> c8c: b9400000 ldr w0, [x0] c90: 35000a60 cbnz w0, ddc <put_unbound_pool+0x19c> if (--pool->refcnt) c94: b9452260 ldr w0, [x19, #1312] c98: 51000400 sub w0, w0, #0x1 c9c: b9052260 str w0, [x19, #1312] ca0: 350007a0 cbnz w0, d94 <put_unbound_pool+0x154> if (WARN_ON(!(pool->cpu < 0)) || ca4: b940f260 ldr w0, [x19, #240] ca8: 36f80e80 tbz w0, #31, e78 <put_unbound_pool+0x238> __READ_ONCE_SIZE; cac: f9408660 ldr x0, [x19, #264] WARN_ON(!list_empty(&pool->worklist))) cb0: 91042261 add x1, x19, #0x108 cb4: eb00003f cmp x1, x0 cb8: 54000e01 b.ne e78 <put_unbound_pool+0x238> // b.any if (pool->id >= 0) cbc: f9005bf5 str x21, [sp, #176] cc0: b940fa61 ldr w1, [x19, #248] cc4: 36f80781 tbz w1, #31, db4 <put_unbound_pool+0x174> } static inline int hlist_unhashed(const struct hlist_node *h) { return !h->pprev; cc8: 91180260 add x0, x19, #0x600 ccc: f8518001 ldur x1, [x0, #-232] n->pprev = LIST_POISON2; } static inline void hlist_del_init(struct hlist_node *n) { if (!hlist_unhashed(n)) { cd0: b40000c1 cbz x1, ce8 <put_unbound_pool+0xa8> struct hlist_node *next = n->next; cd4: f8510002 ldur x2, [x0, #-240] case 8: *(volatile __u64 *)p = *(__u64 *)res; break; cd8: f9000022 str x2, [x1] if (next) cdc: b4000042 cbz x2, ce4 <put_unbound_pool+0xa4> next->pprev = pprev; ce0: f9000441 str x1, [x2, #8] h->pprev = NULL; ce4: a9317c1f stp xzr, xzr, [x0, #-240] spin_lock_irq(&pool->lock); ce8: aa1303e0 mov x0, x19 cec: 94000000 bl 0 <rt_spin_lock> wait_event_lock_irq(wq_manager_wait, cf0: b940fe60 ldr w0, [x19, #252] cf4: 37000840 tbnz w0, #0, dfc <put_unbound_pool+0x1bc> __READ_ONCE_SIZE; cf8: f9409261 ldr x1, [x19, #288] pool->flags |= POOL_MANAGER_ACTIVE; cfc: 32000000 orr w0, w0, #0x1 d00: b900fe60 str w0, [x19, #252] if (unlikely(list_empty(&pool->idle_list))) d04: 91048275 add x21, x19, #0x120 d08: eb0102bf cmp x21, x1 d0c: 540000e0 b.eq d28 <put_unbound_pool+0xe8> // b.none return list_first_entry(&pool->idle_list, struct worker, entry); d10: f9409260 ldr x0, [x19, #288] while ((worker = first_idle_worker(pool))) d14: b40000a0 cbz x0, d28 <put_unbound_pool+0xe8> destroy_worker(worker); d18: 97fffd4a bl 240 <destroy_worker> d1c: f9409260 ldr x0, [x19, #288] if (unlikely(list_empty(&pool->idle_list))) d20: eb0002bf cmp x21, x0 d24: 54ffff61 b.ne d10 <put_unbound_pool+0xd0> // b.any WARN_ON(pool->nr_workers || pool->nr_idle); d28: f9408e60 ldr x0, [x19, #280] d2c: b5000a20 cbnz x0, e70 <put_unbound_pool+0x230> spin_unlock_irq(&pool->lock); d30: aa1303e0 mov x0, x19 mutex_lock(&pool->attach_mutex); d34: 910fe275 add x21, x19, #0x3f8 spin_unlock_irq(&pool->lock); d38: 94000000 bl 0 <rt_spin_unlock> mutex_lock(&pool->attach_mutex); d3c: aa1503e0 mov x0, x21 d40: 94000000 bl 0 <_mutex_lock> d44: f9427260 ldr x0, [x19, #1248] if (!list_empty(&pool->workers)) d48: 91138261 add x1, x19, #0x4e0 d4c: eb00003f cmp x1, x0 d50: 54000060 b.eq d5c <put_unbound_pool+0x11c> // b.none pool->detach_completion = &detach_completion; d54: 9100a3e0 add x0, sp, #0x28 d58: f9027a60 str x0, [x19, #1264] mutex_unlock(&pool->attach_mutex); d5c: aa1503e0 mov x0, x21 d60: 94000000 bl 0 <_mutex_unlock> if (pool->detach_completion) d64: f9427a60 ldr x0, [x19, #1264] d68: b4000040 cbz x0, d70 <put_unbound_pool+0x130> wait_for_completion(pool->detach_completion); d6c: 94000000 bl 0 <wait_for_completion> del_timer_sync(&pool->idle_timer); d70: 9104c260 add x0, x19, #0x130 d74: 94000000 bl 0 <del_timer_sync> del_timer_sync(&pool->mayday_timer); d78: 91064260 add x0, x19, #0x190 d7c: 94000000 bl 0 <del_timer_sync> call_rcu(&pool->rcu, rcu_free_pool); d80: 91162260 add x0, x19, #0x588 d84: 90000001 adrp x1, 0 <pwq_activate_delayed_work> d88: 91000021 add x1, x1, #0x0 d8c: 94000000 bl 0 <call_rcu> d90: f9405bf5 ldr x21, [sp, #176] } d94: f94047e1 ldr x1, [sp, #136] d98: f9400280 ldr x0, [x20] d9c: ca000020 eor x0, x1, x0 da0: b5000700 cbnz x0, e80 <put_unbound_pool+0x240> da4: a9497bfd ldp x29, x30, [sp, #144] da8: a94a53f3 ldp x19, x20, [sp, #160] dac: 910303ff add sp, sp, #0xc0 db0: d65f03c0 ret return radix_tree_delete_item(&idr->idr_rt, id, NULL); db4: 90000000 adrp x0, 0 <pwq_activate_delayed_work> db8: 91000000 add x0, x0, #0x0 dbc: 93407c21 sxtw x1, w1 dc0: 9103a000 add x0, x0, #0xe8 dc4: d2800002 mov x2, #0x0 // #0 dc8: 94000000 bl 0 <radix_tree_delete_item> return !h->pprev; dcc: 91180260 add x0, x19, #0x600 dd0: f8518001 ldur x1, [x0, #-232] if (!hlist_unhashed(n)) { dd4: b5fff801 cbnz x1, cd4 <put_unbound_pool+0x94> dd8: 17ffffc4 b ce8 <put_unbound_pool+0xa8> ddc: 90000000 adrp x0, 0 <pwq_activate_delayed_work> de0: 91000000 add x0, x0, #0x0 de4: 9102e000 add x0, x0, #0xb8 de8: 12800001 mov w1, #0xffffffff // #-1 dec: 94000000 bl 0 <lock_is_held_type> lockdep_assert_held(&wq_pool_mutex); df0: 35fff520 cbnz w0, c94 <put_unbound_pool+0x54> df4: d4210000 brk #0x800 df8: 17ffffa7 b c94 <put_unbound_pool+0x54> wait_event_lock_irq(wq_manager_wait, dfc: 910003e0 mov x0, sp e00: 52800001 mov w1, #0x0 // #0 e04: 94000000 bl 0 <init_wait_entry> e08: 90000000 adrp x0, 0 <pwq_activate_delayed_work> e0c: 91000000 add x0, x0, #0x0 e10: 91040015 add x21, x0, #0x100 e14: 14000006 b e2c <put_unbound_pool+0x1ec> e18: aa1303e0 mov x0, x19 e1c: 94000000 bl 0 <rt_spin_unlock> e20: 94000000 bl 0 <schedule> e24: aa1303e0 mov x0, x19 e28: 94000000 bl 0 <rt_spin_lock> e2c: 52800042 mov w2, #0x2 // #2 e30: 910003e1 mov x1, sp e34: aa1503e0 mov x0, x21 e38: 94000000 bl 0 <prepare_to_wait_event> e3c: b940fe60 ldr w0, [x19, #252] e40: 3707fec0 tbnz w0, #0, e18 <put_unbound_pool+0x1d8> e44: aa1503e0 mov x0, x21 e48: 910003e1 mov x1, sp e4c: 94000000 bl 0 <finish_wait> if (unlikely(list_empty(&pool->idle_list))) e50: 91048275 add x21, x19, #0x120 e54: b940fe60 ldr w0, [x19, #252] e58: f9409261 ldr x1, [x19, #288] pool->flags |= POOL_MANAGER_ACTIVE; e5c: 32000000 orr w0, w0, #0x1 e60: b900fe60 str w0, [x19, #252] if (unlikely(list_empty(&pool->idle_list))) e64: eb0102bf cmp x21, x1 e68: 54fff541 b.ne d10 <put_unbound_pool+0xd0> // b.any e6c: 17ffffaf b d28 <put_unbound_pool+0xe8> WARN_ON(pool->nr_workers || pool->nr_idle); e70: d4210000 brk #0x800 e74: 17ffffaf b d30 <put_unbound_pool+0xf0> WARN_ON(!list_empty(&pool->worklist))) e78: d4210000 brk #0x800 e7c: 17ffffc6 b d94 <put_unbound_pool+0x154> e80: f9005bf5 str x21, [sp, #176] } e84: 94000000 bl 0 <__stack_chk_fail> 0000000000000e88 <pwq_unbound_release_workfn>: { e88: a9bc7bfd stp x29, x30, [sp, #-64]! e8c: 910003fd mov x29, sp e90: a90153f3 stp x19, x20, [sp, #16] e94: a9025bf5 stp x21, x22, [sp, #32] struct workqueue_struct *wq = pwq->wq; e98: a9775016 ldp x22, x20, [x0, #-144] if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND))) e9c: b9420281 ldr w1, [x20, #512] ea0: 36080561 tbz w1, #1, f4c <pwq_unbound_release_workfn+0xc4> mutex_lock(&wq->mutex); ea4: aa0003f3 mov x19, x0 ea8: 91008295 add x21, x20, #0x20 eac: aa1503e0 mov x0, x21 eb0: f9001bf7 str x23, [sp, #48] eb4: 94000000 bl 0 <_mutex_lock> __list_del(entry->prev, entry->next); eb8: a97e0662 ldp x2, x1, [x19, #-32] next->prev = prev; ebc: f9000441 str x1, [x2, #8] * grace period has elapsed. */ static inline void list_del_rcu(struct list_head *entry) { __list_del_entry(entry); entry->prev = LIST_POISON2; ec0: d2804003 mov x3, #0x200 // #512 mutex_unlock(&wq->mutex); ec4: aa1503e0 mov x0, x21 ec8: f2fbd5a3 movk x3, #0xdead, lsl #48 mutex_lock(&wq_pool_mutex); ecc: 90000015 adrp x21, 0 <pwq_activate_delayed_work> ed0: 910002b5 add x21, x21, #0x0 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; ed4: f9000022 str x2, [x1] ed8: f81e8263 stur x3, [x19, #-24] __READ_ONCE_SIZE; edc: f9400297 ldr x23, [x20] mutex_unlock(&wq->mutex); ee0: 94000000 bl 0 <_mutex_unlock> mutex_lock(&wq_pool_mutex); ee4: aa1503e0 mov x0, x21 ee8: 94000000 bl 0 <_mutex_lock> put_unbound_pool(pool); eec: aa1603e0 mov x0, x22 ef0: 97ffff54 bl c40 <put_unbound_pool> mutex_unlock(&wq_pool_mutex); ef4: aa1503e0 mov x0, x21 ef8: 94000000 bl 0 <_mutex_unlock> call_rcu(&pwq->rcu, rcu_free_pwq); efc: 91014260 add x0, x19, #0x50 f00: 90000001 adrp x1, 0 <pwq_activate_delayed_work> f04: 91000021 add x1, x1, #0x0 f08: 94000000 bl 0 <call_rcu> if (is_last) f0c: eb17029f cmp x20, x23 f10: 540000c0 b.eq f28 <pwq_unbound_release_workfn+0xa0> // b.none f14: f9401bf7 ldr x23, [sp, #48] } f18: a94153f3 ldp x19, x20, [sp, #16] f1c: a9425bf5 ldp x21, x22, [sp, #32] f20: a8c47bfd ldp x29, x30, [sp], #64 f24: d65f03c0 ret call_rcu(&wq->rcu, rcu_free_wq); f28: 91070280 add x0, x20, #0x1c0 f2c: 90000001 adrp x1, 0 <pwq_activate_delayed_work> f30: 91000021 add x1, x1, #0x0 f34: 94000000 bl 0 <call_rcu> } f38: a94153f3 ldp x19, x20, [sp, #16] f3c: a9425bf5 ldp x21, x22, [sp, #32] f40: f9401bf7 ldr x23, [sp, #48] f44: a8c47bfd ldp x29, x30, [sp], #64 f48: d65f03c0 ret if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND))) f4c: d4210000 brk #0x800 f50: 17fffff2 b f18 <pwq_unbound_release_workfn+0x90> f54: d503201f nop 0000000000000f58 <rcu_free_wq>: { f58: a9be7bfd stp x29, x30, [sp, #-32]! f5c: 910003fd mov x29, sp f60: a90153f3 stp x19, x20, [sp, #16] container_of(rcu, struct workqueue_struct, rcu); f64: d1070014 sub x20, x0, #0x1c0 { f68: aa0003f3 mov x19, x0 if (!(wq->flags & WQ_UNBOUND)) f6c: b9420280 ldr w0, [x20, #512] f70: 36080160 tbz w0, #1, f9c <rcu_free_wq+0x44> free_workqueue_attrs(wq->unbound_attrs); f74: f85a0260 ldur x0, [x19, #-96] if (attrs) { f78: b4000040 cbz x0, f80 <rcu_free_wq+0x28> kfree(attrs); f7c: 94000000 bl 0 <kfree> kfree(wq->rescuer); f80: f8590260 ldur x0, [x19, #-112] f84: 94000000 bl 0 <kfree> kfree(wq); f88: aa1403e0 mov x0, x20 f8c: 94000000 bl 0 <kfree> } f90: a94153f3 ldp x19, x20, [sp, #16] f94: a8c27bfd ldp x29, x30, [sp], #32 f98: d65f03c0 ret free_percpu(wq->cpu_pwqs); f9c: f9410680 ldr x0, [x20, #520] fa0: 94000000 bl 0 <free_percpu> kfree(wq->rescuer); fa4: f8590260 ldur x0, [x19, #-112] fa8: 94000000 bl 0 <kfree> kfree(wq); fac: aa1403e0 mov x0, x20 fb0: 94000000 bl 0 <kfree> } fb4: a94153f3 ldp x19, x20, [sp, #16] fb8: a8c27bfd ldp x29, x30, [sp], #32 fbc: d65f03c0 ret 0000000000000fc0 <rcu_free_pwq>: { fc0: a9bf7bfd stp x29, x30, [sp, #-16]! kmem_cache_free(pwq_cache, fc4: 90000002 adrp x2, 0 <pwq_activate_delayed_work> fc8: d1038001 sub x1, x0, #0xe0 { fcc: 910003fd mov x29, sp kmem_cache_free(pwq_cache, fd0: f9400040 ldr x0, [x2] fd4: 94000000 bl 0 <kmem_cache_free> } fd8: a8c17bfd ldp x29, x30, [sp], #16 fdc: d65f03c0 ret 0000000000000fe0 <worker_detach_from_pool>: { fe0: a9bd7bfd stp x29, x30, [sp, #-48]! fe4: 910003fd mov x29, sp fe8: a90153f3 stp x19, x20, [sp, #16] fec: aa0003f3 mov x19, x0 ff0: aa0103f4 mov x20, x1 ff4: f90013f5 str x21, [sp, #32] mutex_lock(&pool->attach_mutex); ff8: 910fe035 add x21, x1, #0x3f8 ffc: aa1503e0 mov x0, x21 1000: 94000000 bl 0 <_mutex_lock> __list_del(entry->prev, entry->next); 1004: a9450261 ldp x1, x0, [x19, #80] next->prev = prev; 1008: f9000420 str x0, [x1, #8] entry->next = LIST_POISON1; 100c: d2802004 mov x4, #0x100 // #256 entry->prev = LIST_POISON2; 1010: d2804003 mov x3, #0x200 // #512 entry->next = LIST_POISON1; 1014: f2fbd5a4 movk x4, #0xdead, lsl #48 entry->prev = LIST_POISON2; 1018: f2fbd5a3 movk x3, #0xdead, lsl #48 if (list_empty(&pool->workers)) 101c: 91138282 add x2, x20, #0x4e0 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 1020: f9000001 str x1, [x0] 1024: a9050e64 stp x4, x3, [x19, #80] __READ_ONCE_SIZE; 1028: f9427280 ldr x0, [x20, #1248] 102c: eb00005f cmp x2, x0 mutex_unlock(&pool->attach_mutex); 1030: aa1503e0 mov x0, x21 if (list_empty(&pool->workers)) 1034: 54000120 b.eq 1058 <worker_detach_from_pool+0x78> // b.none mutex_unlock(&pool->attach_mutex); 1038: 94000000 bl 0 <_mutex_unlock> worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND); 103c: b9406a60 ldr w0, [x19, #104] 1040: 12177400 and w0, w0, #0xfffffe7f 1044: b9006a60 str w0, [x19, #104] } 1048: a94153f3 ldp x19, x20, [sp, #16] 104c: f94013f5 ldr x21, [sp, #32] 1050: a8c37bfd ldp x29, x30, [sp], #48 1054: d65f03c0 ret detach_completion = pool->detach_completion; 1058: f9427a94 ldr x20, [x20, #1264] mutex_unlock(&pool->attach_mutex); 105c: 94000000 bl 0 <_mutex_unlock> worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND); 1060: b9406a60 ldr w0, [x19, #104] 1064: 12177400 and w0, w0, #0xfffffe7f 1068: b9006a60 str w0, [x19, #104] if (detach_completion) 106c: b4fffef4 cbz x20, 1048 <worker_detach_from_pool+0x68> complete(detach_completion); 1070: aa1403e0 mov x0, x20 1074: 94000000 bl 0 <complete> } 1078: a94153f3 ldp x19, x20, [sp, #16] 107c: f94013f5 ldr x21, [sp, #32] 1080: a8c37bfd ldp x29, x30, [sp], #48 1084: d65f03c0 ret 0000000000001088 <flush_workqueue_prep_pwqs>: { 1088: a9ba7bfd stp x29, x30, [sp, #-96]! 108c: 910003fd mov x29, sp 1090: a90153f3 stp x19, x20, [sp, #16] 1094: aa0003f4 mov x20, x0 1098: a9025bf5 stp x21, x22, [sp, #32] 109c: 2a0103f6 mov w22, w1 10a0: 2a0203f5 mov w21, w2 10a4: a9046bf9 stp x25, x26, [sp, #64] if (flush_color >= 0) { 10a8: 37f80be1 tbnz w1, #31, 1224 <flush_workqueue_prep_pwqs+0x19c> 10ac: b9411000 ldr w0, [x0, #272] WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush)); 10b0: 35000dc0 cbnz w0, 1268 <flush_workqueue_prep_pwqs+0x1e0> case 4: *(volatile __u32 *)p = *(__u32 *)res; break; 10b4: 52800020 mov w0, #0x1 // #1 10b8: b9011280 str w0, [x20, #272] __READ_ONCE_SIZE; 10bc: f9400293 ldr x19, [x20] for_each_pwq(pwq, wq) { 10c0: eb13029f cmp x20, x19 10c4: d101c273 sub x19, x19, #0x70 10c8: 54000cc0 b.eq 1260 <flush_workqueue_prep_pwqs+0x1d8> // b.none 10cc: 9000001a adrp x26, 0 <pwq_activate_delayed_work> 10d0: 9100035a add x26, x26, #0x0 10d4: a90363f7 stp x23, x24, [sp, #48] 10d8: 90000017 adrp x23, 0 <pwq_activate_delayed_work> return (color + 1) % WORK_NR_COLORS; 10dc: 52911138 mov w24, #0x8889 // #34953 for_each_pwq(pwq, wq) { 10e0: 910002f7 add x23, x23, #0x0 bool wait = false; 10e4: 52800019 mov w25, #0x0 // #0 return (color + 1) % WORK_NR_COLORS; 10e8: 72b11118 movk w24, #0x8888, lsl #16 10ec: f9002bfb str x27, [sp, #80] for_each_pwq(pwq, wq) { 10f0: 94000000 bl 0 <debug_lockdep_rcu_enabled> 10f4: 34000060 cbz w0, 1100 <flush_workqueue_prep_pwqs+0x78> 10f8: 394006e0 ldrb w0, [x23, #1] 10fc: 34000700 cbz w0, 11dc <flush_workqueue_prep_pwqs+0x154> spin_lock_irq(&pool->lock); 1100: f940027b ldr x27, [x19] 1104: aa1b03e0 mov x0, x27 1108: 94000000 bl 0 <rt_spin_lock> if (flush_color >= 0) { 110c: 37f801f6 tbnz w22, #31, 1148 <flush_workqueue_prep_pwqs+0xc0> WARN_ON_ONCE(pwq->flush_color != -1); 1110: b9401660 ldr w0, [x19, #20] 1114: 3100041f cmn w0, #0x1 1118: 54000821 b.ne 121c <flush_workqueue_prep_pwqs+0x194> // b.any if (pwq->nr_in_flight[flush_color]) { 111c: 8b36ca60 add x0, x19, w22, sxtw #2 1120: b9401c00 ldr w0, [x0, #28] 1124: 34000120 cbz w0, 1148 <flush_workqueue_prep_pwqs+0xc0> ATOMIC_FETCH_OP ( , dmb ish, , l, "memory", __VA_ARGS__)\ ATOMIC_FETCH_OP (_relaxed, , , , , __VA_ARGS__)\ ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\ ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__) ATOMIC_OPS(add, add) 1128: 91044282 add x2, x20, #0x110 pwq->flush_color = flush_color; 112c: b9001676 str w22, [x19, #20] 1130: f9800051 prfm pstl1strm, [x2] 1134: 885f7c40 ldxr w0, [x2] 1138: 11000400 add w0, w0, #0x1 113c: 88017c40 stxr w1, w0, [x2] 1140: 35ffffa1 cbnz w1, 1134 <flush_workqueue_prep_pwqs+0xac> wait = true; 1144: 52800039 mov w25, #0x1 // #1 if (work_color >= 0) { 1148: 37f801d5 tbnz w21, #31, 1180 <flush_workqueue_prep_pwqs+0xf8> return (color + 1) % WORK_NR_COLORS; 114c: b9401261 ldr w1, [x19, #16] 1150: 11000421 add w1, w1, #0x1 1154: 9b387c20 smull x0, w1, w24 1158: d360fc00 lsr x0, x0, #32 115c: 0b000020 add w0, w1, w0 1160: 13037c00 asr w0, w0, #3 1164: 4b817c00 sub w0, w0, w1, asr #31 1168: 531c6c02 lsl w2, w0, #4 116c: 4b000040 sub w0, w2, w0 1170: 4b000021 sub w1, w1, w0 WARN_ON_ONCE(work_color != work_next_color(pwq->work_color)); 1174: 6b0102bf cmp w21, w1 1178: 540004e1 b.ne 1214 <flush_workqueue_prep_pwqs+0x18c> // b.any pwq->work_color = work_color; 117c: b9001275 str w21, [x19, #16] spin_unlock_irq(&pool->lock); 1180: aa1b03e0 mov x0, x27 1184: 94000000 bl 0 <rt_spin_unlock> 1188: f9403a73 ldr x19, [x19, #112] for_each_pwq(pwq, wq) { 118c: eb13029f cmp x20, x19 1190: d101c273 sub x19, x19, #0x70 1194: 54fffae1 b.ne 10f0 <flush_workqueue_prep_pwqs+0x68> // b.any if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush)) 1198: a94363f7 ldp x23, x24, [sp, #48] 119c: f9402bfb ldr x27, [sp, #80] 11a0: 37f80136 tbnz w22, #31, 11c4 <flush_workqueue_prep_pwqs+0x13c> ATOMIC_OPS(sub, sub) 11a4: 91044282 add x2, x20, #0x110 11a8: f9800051 prfm pstl1strm, [x2] 11ac: 885f7c40 ldxr w0, [x2] 11b0: 51000400 sub w0, w0, #0x1 11b4: 8801fc40 stlxr w1, w0, [x2] 11b8: 35ffffa1 cbnz w1, 11ac <flush_workqueue_prep_pwqs+0x124> 11bc: d5033bbf dmb ish 11c0: 340003e0 cbz w0, 123c <flush_workqueue_prep_pwqs+0x1b4> } 11c4: 2a1903e0 mov w0, w25 11c8: a94153f3 ldp x19, x20, [sp, #16] 11cc: a9425bf5 ldp x21, x22, [sp, #32] 11d0: a9446bf9 ldp x25, x26, [sp, #64] 11d4: a8c67bfd ldp x29, x30, [sp], #96 11d8: d65f03c0 ret for_each_pwq(pwq, wq) { 11dc: 94000000 bl 0 <rcu_read_lock_held> 11e0: 35fff900 cbnz w0, 1100 <flush_workqueue_prep_pwqs+0x78> 11e4: 12800001 mov w1, #0xffffffff // #-1 11e8: 91036280 add x0, x20, #0xd8 11ec: 94000000 bl 0 <lock_is_held_type> 11f0: 35fff880 cbnz w0, 1100 <flush_workqueue_prep_pwqs+0x78> 11f4: 52800023 mov w3, #0x1 // #1 11f8: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 11fc: aa1a03e2 mov x2, x26 1200: 91000000 add x0, x0, #0x0 1204: 52814a41 mov w1, #0xa52 // #2642 1208: 390006e3 strb w3, [x23, #1] 120c: 94000000 bl 0 <lockdep_rcu_suspicious> 1210: 17ffffbc b 1100 <flush_workqueue_prep_pwqs+0x78> WARN_ON_ONCE(work_color != work_next_color(pwq->work_color)); 1214: d4210000 brk #0x800 1218: 17ffffd9 b 117c <flush_workqueue_prep_pwqs+0xf4> WARN_ON_ONCE(pwq->flush_color != -1); 121c: d4210000 brk #0x800 1220: 17ffffbf b 111c <flush_workqueue_prep_pwqs+0x94> 1224: f9400013 ldr x19, [x0] for_each_pwq(pwq, wq) { 1228: eb13001f cmp x0, x19 122c: d101c273 sub x19, x19, #0x70 1230: 54fff4e1 b.ne 10cc <flush_workqueue_prep_pwqs+0x44> // b.any bool wait = false; 1234: 52800019 mov w25, #0x0 // #0 return wait; 1238: 17ffffe3 b 11c4 <flush_workqueue_prep_pwqs+0x13c> complete(&wq->first_flusher->done); 123c: f9408e80 ldr x0, [x20, #280] 1240: 91006000 add x0, x0, #0x18 1244: 94000000 bl 0 <complete> } 1248: 2a1903e0 mov w0, w25 124c: a94153f3 ldp x19, x20, [sp, #16] 1250: a9425bf5 ldp x21, x22, [sp, #32] 1254: a9446bf9 ldp x25, x26, [sp, #64] 1258: a8c67bfd ldp x29, x30, [sp], #96 125c: d65f03c0 ret bool wait = false; 1260: 52800019 mov w25, #0x0 // #0 1264: 17ffffd0 b 11a4 <flush_workqueue_prep_pwqs+0x11c> WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush)); 1268: d4210000 brk #0x800 126c: 17ffff92 b 10b4 <flush_workqueue_prep_pwqs+0x2c> 0000000000001270 <wq_barrier_func>: { 1270: a9bf7bfd stp x29, x30, [sp, #-16]! complete(&barr->done); 1274: 91014000 add x0, x0, #0x50 { 1278: 910003fd mov x29, sp complete(&barr->done); 127c: 94000000 bl 0 <complete> } 1280: a8c17bfd ldp x29, x30, [sp], #16 1284: d65f03c0 ret 0000000000001288 <worker_attach_to_pool>: { 1288: a9bd7bfd stp x29, x30, [sp, #-48]! 128c: 910003fd mov x29, sp 1290: a90153f3 stp x19, x20, [sp, #16] 1294: aa0003f4 mov x20, x0 1298: aa0103f3 mov x19, x1 129c: f90013f5 str x21, [sp, #32] mutex_lock(&pool->attach_mutex); 12a0: 910fe035 add x21, x1, #0x3f8 12a4: aa1503e0 mov x0, x21 12a8: 94000000 bl 0 <_mutex_lock> set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask); 12ac: f9402280 ldr x0, [x20, #64] 12b0: f9428661 ldr x1, [x19, #1288] 12b4: 91002021 add x1, x1, #0x8 12b8: 94000000 bl 0 <set_cpus_allowed_ptr> if (pool->flags & POOL_DISASSOCIATED) 12bc: b940fe60 ldr w0, [x19, #252] 12c0: 36100080 tbz w0, #2, 12d0 <worker_attach_to_pool+0x48> worker->flags |= WORKER_UNBOUND; 12c4: b9406a80 ldr w0, [x20, #104] 12c8: 32190000 orr w0, w0, #0x80 12cc: b9006a80 str w0, [x20, #104] __list_add(new, head->prev, head); 12d0: 91100263 add x3, x19, #0x400 list_add_tail(&worker->node, &pool->workers); 12d4: 91014282 add x2, x20, #0x50 12d8: f9407461 ldr x1, [x3, #232] 12dc: 91138273 add x19, x19, #0x4e0 next->prev = new; 12e0: f9007462 str x2, [x3, #232] mutex_unlock(&pool->attach_mutex); 12e4: aa1503e0 mov x0, x21 new->prev = prev; 12e8: a9050693 stp x19, x1, [x20, #80] case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 12ec: f9000022 str x2, [x1] 12f0: 94000000 bl 0 <_mutex_unlock> } 12f4: a94153f3 ldp x19, x20, [sp, #16] 12f8: f94013f5 ldr x21, [sp, #32] 12fc: a8c37bfd ldp x29, x30, [sp], #48 1300: d65f03c0 ret 1304: d503201f nop 0000000000001308 <check_flush_dependency>: { 1308: a9bd7bfd stp x29, x30, [sp, #-48]! 130c: 910003fd mov x29, sp 1310: a9025bf5 stp x21, x22, [sp, #32] 1314: d2800015 mov x21, #0x0 // #0 work_func_t target_func = target_work ? target_work->func : NULL; 1318: b4000041 cbz x1, 1320 <check_flush_dependency+0x18> 131c: f9400c35 ldr x21, [x1, #24] if (target_wq->flags & WQ_MEM_RECLAIM) 1320: b9420001 ldr w1, [x0, #512] 1324: 37180281 tbnz w1, #3, 1374 <check_flush_dependency+0x6c> 1328: a90153f3 stp x19, x20, [sp, #16] 132c: aa0003f3 mov x19, x0 1330: d5384114 mrs x20, sp_el0 __READ_ONCE_SIZE; 1334: b9401281 ldr w1, [x20, #16] /** * current_wq_worker - return struct worker if %current is a workqueue worker */ static inline struct worker *current_wq_worker(void) { if (in_task() && (current->flags & PF_WQ_WORKER)) 1338: 12183021 and w1, w1, #0x1fff00 133c: 12106021 and w1, w1, #0xffff01ff 1340: 35000061 cbnz w1, 134c <check_flush_dependency+0x44> 1344: b9403680 ldr w0, [x20, #52] 1348: 372801c0 tbnz w0, #5, 1380 <check_flush_dependency+0x78> 134c: d5384100 mrs x0, sp_el0 WARN_ONCE(current->flags & PF_MEMALLOC, 1350: b9403400 ldr w0, [x0, #52] 1354: 365800e0 tbz w0, #11, 1370 <check_flush_dependency+0x68> 1358: 90000005 adrp x5, 0 <pwq_activate_delayed_work> 135c: 910000a5 add x5, x5, #0x0 1360: d2800016 mov x22, #0x0 // #0 1364: 394008a0 ldrb w0, [x5, #2] 1368: 340004e0 cbz w0, 1404 <check_flush_dependency+0xfc> 136c: d503201f nop 1370: a94153f3 ldp x19, x20, [sp, #16] } 1374: a9425bf5 ldp x21, x22, [sp, #32] 1378: a8c37bfd ldp x29, x30, [sp], #48 137c: d65f03c0 ret return kthread_data(current); 1380: aa1403e0 mov x0, x20 1384: 94000000 bl 0 <kthread_data> 1388: aa0003f6 mov x22, x0 WARN_ONCE(current->flags & PF_MEMALLOC, 138c: b9403680 ldr w0, [x20, #52] 1390: 37580320 tbnz w0, #11, 13f4 <check_flush_dependency+0xec> WARN_ONCE(worker && ((worker->current_pwq->wq->flags & 1394: b4fffef6 cbz x22, 1370 <check_flush_dependency+0x68> 1398: f94012c0 ldr x0, [x22, #32] 139c: f9400401 ldr x1, [x0, #8] 13a0: b9420022 ldr w2, [x1, #512] 13a4: 121d3c42 and w2, w2, #0x7fff8 13a8: 120e4442 and w2, w2, #0xfffc000f 13ac: 7100205f cmp w2, #0x8 13b0: 54fffe01 b.ne 1370 <check_flush_dependency+0x68> // b.any 13b4: 90000005 adrp x5, 0 <pwq_activate_delayed_work> 13b8: 910000a5 add x5, x5, #0x0 13bc: 39400ca0 ldrb w0, [x5, #3] 13c0: 35fffd80 cbnz w0, 1370 <check_flush_dependency+0x68> 13c4: f9400ec2 ldr x2, [x22, #24] 13c8: 52800026 mov w6, #0x1 // #1 13cc: aa1503e4 mov x4, x21 13d0: 9106a263 add x3, x19, #0x1a8 13d4: 9106a021 add x1, x1, #0x1a8 13d8: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 13dc: 91000000 add x0, x0, #0x0 13e0: 39000ca6 strb w6, [x5, #3] 13e4: 94000000 bl 0 <printk> 13e8: d4210000 brk #0x800 13ec: a94153f3 ldp x19, x20, [sp, #16] 13f0: 17ffffe1 b 1374 <check_flush_dependency+0x6c> WARN_ONCE(current->flags & PF_MEMALLOC, 13f4: 90000005 adrp x5, 0 <pwq_activate_delayed_work> 13f8: 910000a5 add x5, x5, #0x0 13fc: 394008a0 ldrb w0, [x5, #2] 1400: 35fffca0 cbnz w0, 1394 <check_flush_dependency+0x8c> 1404: d5384102 mrs x2, sp_el0 1408: b945a841 ldr w1, [x2, #1448] 140c: 52800026 mov w6, #0x1 // #1 1410: aa1503e4 mov x4, x21 1414: 9106a263 add x3, x19, #0x1a8 1418: 911e6042 add x2, x2, #0x798 141c: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 1420: 91000000 add x0, x0, #0x0 1424: 390008a6 strb w6, [x5, #2] 1428: 94000000 bl 0 <printk> 142c: d4210000 brk #0x800 1430: 17ffffd9 b 1394 <check_flush_dependency+0x8c> 1434: d503201f nop 0000000000001438 <flush_workqueue>: { 1438: d10343ff sub sp, sp, #0xd0 struct wq_flusher this_flusher = { 143c: 12800003 mov w3, #0xffffffff // #-1 1440: 90000001 adrp x1, 0 <pwq_activate_delayed_work> 1444: 91000021 add x1, x1, #0x0 { 1448: a9087bfd stp x29, x30, [sp, #128] 144c: 910203fd add x29, sp, #0x80 1450: a90953f3 stp x19, x20, [sp, #144] 1454: 90000013 adrp x19, 0 <pwq_activate_delayed_work> 1458: 91000273 add x19, x19, #0x0 145c: a90b63f7 stp x23, x24, [sp, #176] 1460: 90000017 adrp x23, 0 <__stack_chk_guard> 1464: 910002f7 add x23, x23, #0x0 1468: aa0003f4 mov x20, x0 146c: f94002e0 ldr x0, [x23] 1470: f9003fe0 str x0, [sp, #120] 1474: d2800000 mov x0, #0x0 // #0 1478: a90a5bf5 stp x21, x22, [sp, #160] struct wq_flusher this_flusher = { 147c: 910003f5 mov x21, sp 1480: 910082a0 add x0, x21, #0x20 1484: 9100e262 add x2, x19, #0x38 1488: a90057f5 stp x21, x21, [sp] 148c: b90013e3 str w3, [sp, #16] x->done = 0; 1490: b9001bff str wzr, [sp, #24] init_swait_queue_head(&x->wait); 1494: 94000000 bl 0 <__init_swait_queue_head> if (WARN_ON(!wq_online)) 1498: 39412260 ldrb w0, [x19, #72] 149c: 34001d00 cbz w0, 183c <flush_workqueue+0x404> lock_map_acquire(&wq->lockdep_map); 14a0: 9105e293 add x19, x20, #0x178 14a4: 52800003 mov w3, #0x0 // #0 14a8: aa1303e0 mov x0, x19 14ac: d2800005 mov x5, #0x0 // #0 14b0: 52800024 mov w4, #0x1 // #1 14b4: 52800002 mov w2, #0x0 // #0 14b8: 52800001 mov w1, #0x0 // #0 14bc: 90000006 adrp x6, 0 <pwq_activate_delayed_work> 14c0: 910000c6 add x6, x6, #0x0 14c4: 94000000 bl 0 <lock_acquire> lock_map_release(&wq->lockdep_map); 14c8: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 14cc: 91000042 add x2, x2, #0x0 14d0: 52800021 mov w1, #0x1 // #1 14d4: aa1303e0 mov x0, x19 mutex_lock(&wq->mutex); 14d8: 91008298 add x24, x20, #0x20 lock_map_release(&wq->lockdep_map); 14dc: 94000000 bl 0 <lock_release> mutex_lock(&wq->mutex); 14e0: aa1803e0 mov x0, x24 14e4: 94000000 bl 0 <_mutex_lock> next_color = work_next_color(wq->work_color); 14e8: b9410a82 ldr w2, [x20, #264] return (color + 1) % WORK_NR_COLORS; 14ec: 52911133 mov w19, #0x8889 // #34953 14f0: 72b11113 movk w19, #0x8888, lsl #16 if (next_color != wq->flush_color) { 14f4: b9410e81 ldr w1, [x20, #268] return (color + 1) % WORK_NR_COLORS; 14f8: 11000440 add w0, w2, #0x1 14fc: 9104c296 add x22, x20, #0x130 1500: 9b337c13 smull x19, w0, w19 1504: d360fe73 lsr x19, x19, #32 1508: 0b130013 add w19, w0, w19 150c: 13037e73 asr w19, w19, #3 1510: 4b807e73 sub w19, w19, w0, asr #31 1514: 531c6e63 lsl w3, w19, #4 1518: 4b130073 sub w19, w3, w19 151c: 4b130013 sub w19, w0, w19 if (next_color != wq->flush_color) { 1520: 6b13003f cmp w1, w19 1524: 540004e0 b.eq 15c0 <flush_workqueue+0x188> // b.none 1528: f9409a80 ldr x0, [x20, #304] WARN_ON_ONCE(!list_empty(&wq->flusher_overflow)); 152c: eb0002df cmp x22, x0 1530: 54001941 b.ne 1858 <flush_workqueue+0x420> // b.any if (!wq->first_flusher) { 1534: f9408e80 ldr x0, [x20, #280] this_flusher.flush_color = wq->work_color; 1538: b90013e2 str w2, [sp, #16] wq->work_color = next_color; 153c: b9010a93 str w19, [x20, #264] WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); 1540: 6b01005f cmp w2, w1 if (!wq->first_flusher) { 1544: b4000580 cbz x0, 15f4 <flush_workqueue+0x1bc> WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color); 1548: 54001a00 b.eq 1888 <flush_workqueue+0x450> // b.none __list_add(new, head->prev, head); 154c: f9409682 ldr x2, [x20, #296] list_add_tail(&this_flusher.list, &wq->flusher_queue); 1550: 91048280 add x0, x20, #0x120 new->prev = prev; 1554: a9000be0 stp x0, x2, [sp] flush_workqueue_prep_pwqs(wq, -1, wq->work_color); 1558: 12800001 mov w1, #0xffffffff // #-1 155c: aa1403e0 mov x0, x20 next->prev = new; 1560: f9009695 str x21, [x20, #296] case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 1564: f9000055 str x21, [x2] 1568: b9410a82 ldr w2, [x20, #264] 156c: 97fffec7 bl 1088 <flush_workqueue_prep_pwqs> check_flush_dependency(wq, NULL); 1570: d2800001 mov x1, #0x0 // #0 1574: aa1403e0 mov x0, x20 1578: 97ffff64 bl 1308 <check_flush_dependency> mutex_unlock(&wq->mutex); 157c: aa1803e0 mov x0, x24 1580: 94000000 bl 0 <_mutex_unlock> wait_for_completion(&this_flusher.done); 1584: 910062a0 add x0, x21, #0x18 1588: 94000000 bl 0 <wait_for_completion> if (wq->first_flusher != &this_flusher) 158c: f9408e80 ldr x0, [x20, #280] 1590: eb15001f cmp x0, x21 1594: 54000200 b.eq 15d4 <flush_workqueue+0x19c> // b.none } 1598: f9403fe1 ldr x1, [sp, #120] 159c: f94002e0 ldr x0, [x23] 15a0: ca000020 eor x0, x1, x0 15a4: b5001880 cbnz x0, 18b4 <flush_workqueue+0x47c> 15a8: a9487bfd ldp x29, x30, [sp, #128] 15ac: a94953f3 ldp x19, x20, [sp, #144] 15b0: a94a5bf5 ldp x21, x22, [sp, #160] 15b4: a94b63f7 ldp x23, x24, [sp, #176] 15b8: 910343ff add sp, sp, #0xd0 15bc: d65f03c0 ret __list_add(new, head->prev, head); 15c0: f9409e80 ldr x0, [x20, #312] new->prev = prev; 15c4: a90003f6 stp x22, x0, [sp] next->prev = new; 15c8: f9009e95 str x21, [x20, #312] 15cc: f9000015 str x21, [x0] WRITE_ONCE(prev->next, new); 15d0: 17ffffe8 b 1570 <flush_workqueue+0x138> mutex_lock(&wq->mutex); 15d4: aa1803e0 mov x0, x24 15d8: 94000000 bl 0 <_mutex_lock> if (wq->first_flusher != &this_flusher) 15dc: f9408e80 ldr x0, [x20, #280] 15e0: eb15001f cmp x0, x21 15e4: 540001c0 b.eq 161c <flush_workqueue+0x1e4> // b.none mutex_unlock(&wq->mutex); 15e8: aa1803e0 mov x0, x24 15ec: 94000000 bl 0 <_mutex_unlock> 15f0: 17ffffea b 1598 <flush_workqueue+0x160> WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); 15f4: 2a1303e2 mov w2, w19 15f8: 540014c1 b.ne 1890 <flush_workqueue+0x458> // b.any wq->first_flusher = &this_flusher; 15fc: f9008e95 str x21, [x20, #280] if (!flush_workqueue_prep_pwqs(wq, wq->flush_color, 1600: aa1403e0 mov x0, x20 1604: 97fffea1 bl 1088 <flush_workqueue_prep_pwqs> 1608: 72001c1f tst w0, #0xff 160c: 54fffb21 b.ne 1570 <flush_workqueue+0x138> // b.any wq->flush_color = next_color; 1610: b9010e93 str w19, [x20, #268] wq->first_flusher = NULL; 1614: f9008e9f str xzr, [x20, #280] goto out_unlock; 1618: 17fffff4 b 15e8 <flush_workqueue+0x1b0> __READ_ONCE_SIZE; 161c: f94003e0 ldr x0, [sp] 1620: a90c6bf9 stp x25, x26, [sp, #192] wq->first_flusher = NULL; 1624: f9008e9f str xzr, [x20, #280] WARN_ON_ONCE(!list_empty(&this_flusher.list)); 1628: eb0002bf cmp x21, x0 162c: 540013a1 b.ne 18a0 <flush_workqueue+0x468> // b.any WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); 1630: b9410e81 ldr w1, [x20, #268] 1634: b94013e0 ldr w0, [sp, #16] 1638: 6b00003f cmp w1, w0 163c: 54001361 b.ne 18a8 <flush_workqueue+0x470> // b.any return (color + 1) % WORK_NR_COLORS; 1640: 52911139 mov w25, #0x8889 // #34953 1644: 91048295 add x21, x20, #0x120 1648: 72b11119 movk w25, #0x8888, lsl #16 164c: d503201f nop list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { 1650: f9409282 ldr x2, [x20, #288] 1654: eb15005f cmp x2, x21 1658: aa0203fa mov x26, x2 165c: f9400041 ldr x1, [x2] 1660: 54001040 b.eq 1868 <flush_workqueue+0x430> // b.none if (next->flush_color != wq->flush_color) 1664: b9401043 ldr w3, [x2, #16] 1668: 6b00007f cmp w3, w0 166c: 540000e0 b.eq 1688 <flush_workqueue+0x250> // b.none 1670: 14000081 b 1874 <flush_workqueue+0x43c> 1674: b9401263 ldr w3, [x19, #16] 1678: aa1303e2 mov x2, x19 167c: b9410e80 ldr w0, [x20, #268] 1680: 6b00007f cmp w3, w0 1684: 540001c1 b.ne 16bc <flush_workqueue+0x284> // b.any __list_del(entry->prev, entry->next); 1688: f9400443 ldr x3, [x2, #8] next->prev = prev; 168c: f9000423 str x3, [x1, #8] list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { 1690: aa0103f3 mov x19, x1 complete(&next->done); 1694: 91006040 add x0, x2, #0x18 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 1698: f9000061 str x1, [x3] 169c: f900005a str x26, [x2] list->prev = list; 16a0: f900045a str x26, [x2, #8] list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { 16a4: aa0103fa mov x26, x1 complete(&next->done); 16a8: 94000000 bl 0 <complete> list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { 16ac: eb15027f cmp x19, x21 16b0: f9400261 ldr x1, [x19] 16b4: 54fffe01 b.ne 1674 <flush_workqueue+0x23c> // b.any 16b8: b9410e80 ldr w0, [x20, #268] __READ_ONCE_SIZE; 16bc: f9409a81 ldr x1, [x20, #304] return READ_ONCE(head->next) == head; 16c0: b9410a82 ldr w2, [x20, #264] WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) && 16c4: eb0102df cmp x22, x1 16c8: 540001e0 b.eq 1704 <flush_workqueue+0x2cc> // b.none return (color + 1) % WORK_NR_COLORS; 16cc: 11000443 add w3, w2, #0x1 16d0: 9b397c61 smull x1, w3, w25 16d4: d360fc21 lsr x1, x1, #32 16d8: 0b010061 add w1, w3, w1 16dc: 13037c21 asr w1, w1, #3 16e0: 4b837c21 sub w1, w1, w3, asr #31 16e4: 531c6c24 lsl w4, w1, #4 16e8: 4b010081 sub w1, w4, w1 16ec: 4b010063 sub w3, w3, w1 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) && 16f0: 6b00007f cmp w3, w0 16f4: 54000080 b.eq 1704 <flush_workqueue+0x2cc> // b.none 16f8: d4210000 brk #0x800 16fc: b9410a82 ldr w2, [x20, #264] 1700: b9410e80 ldr w0, [x20, #268] return (color + 1) % WORK_NR_COLORS; 1704: 11000400 add w0, w0, #0x1 1708: f9409a83 ldr x3, [x20, #304] 170c: 9b397c01 smull x1, w0, w25 if (!list_empty(&wq->flusher_overflow)) { 1710: eb0302df cmp x22, x3 return (color + 1) % WORK_NR_COLORS; 1714: d360fc21 lsr x1, x1, #32 1718: 0b010001 add w1, w0, w1 171c: 13037c21 asr w1, w1, #3 1720: 4b807c21 sub w1, w1, w0, asr #31 1724: 531c6c23 lsl w3, w1, #4 1728: 4b010061 sub w1, w3, w1 172c: 4b010001 sub w1, w0, w1 wq->flush_color = work_next_color(wq->flush_color); 1730: b9010e81 str w1, [x20, #268] if (!list_empty(&wq->flusher_overflow)) { 1734: 54000301 b.ne 1794 <flush_workqueue+0x35c> // b.any 1738: f9409280 ldr x0, [x20, #288] if (list_empty(&wq->flusher_queue)) { 173c: eb0002bf cmp x21, x0 1740: 54000720 b.eq 1824 <flush_workqueue+0x3ec> // b.none WARN_ON_ONCE(wq->flush_color == wq->work_color); 1744: 6b01005f cmp w2, w1 1748: 54000820 b.eq 184c <flush_workqueue+0x414> // b.none WARN_ON_ONCE(wq->flush_color != next->flush_color); 174c: b9401260 ldr w0, [x19, #16] 1750: 6b00003f cmp w1, w0 1754: 54000781 b.ne 1844 <flush_workqueue+0x40c> // b.any __list_del(entry->prev, entry->next); 1758: a9400663 ldp x3, x1, [x19] next->prev = prev; 175c: f9000461 str x1, [x3, #8] if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1)) 1760: 12800002 mov w2, #0xffffffff // #-1 1764: aa1403e0 mov x0, x20 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 1768: f9000023 str x3, [x1] 176c: f900027a str x26, [x19] list->prev = list; 1770: f900067a str x26, [x19, #8] wq->first_flusher = next; 1774: f9008e93 str x19, [x20, #280] if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1)) 1778: b9410e81 ldr w1, [x20, #268] 177c: 97fffe43 bl 1088 <flush_workqueue_prep_pwqs> 1780: 72001c1f tst w0, #0xff 1784: 54000541 b.ne 182c <flush_workqueue+0x3f4> // b.any wq->first_flusher = NULL; 1788: b9410e80 ldr w0, [x20, #268] 178c: f9008e9f str xzr, [x20, #280] while (true) { 1790: 17ffffb0 b 1650 <flush_workqueue+0x218> list_for_each_entry(tmp, &wq->flusher_overflow, list) 1794: f9409a80 ldr x0, [x20, #304] 1798: eb16001f cmp x0, x22 179c: 540000c0 b.eq 17b4 <flush_workqueue+0x37c> // b.none tmp->flush_color = wq->work_color; 17a0: b9001002 str w2, [x0, #16] list_for_each_entry(tmp, &wq->flusher_overflow, list) 17a4: f9400000 ldr x0, [x0] 17a8: b9410a82 ldr w2, [x20, #264] 17ac: eb16001f cmp x0, x22 17b0: 54ffff81 b.ne 17a0 <flush_workqueue+0x368> // b.any return (color + 1) % WORK_NR_COLORS; 17b4: 11000440 add w0, w2, #0x1 __READ_ONCE_SIZE; 17b8: f9409a81 ldr x1, [x20, #304] 17bc: 9b397c02 smull x2, w0, w25 if (!list_empty(list)) { 17c0: eb0102df cmp x22, x1 17c4: d360fc42 lsr x2, x2, #32 17c8: 0b020002 add w2, w0, w2 17cc: 13037c42 asr w2, w2, #3 17d0: 4b807c42 sub w2, w2, w0, asr #31 17d4: 531c6c41 lsl w1, w2, #4 17d8: 4b020022 sub w2, w1, w2 17dc: 4b020002 sub w2, w0, w2 wq->work_color = work_next_color(wq->work_color); 17e0: b9010a82 str w2, [x20, #264] 17e4: 54000140 b.eq 180c <flush_workqueue+0x3d4> // b.none struct list_head *first = list->next; 17e8: a9528a81 ldp x1, x2, [x20, #296] struct list_head *last = list->prev; 17ec: f9409e80 ldr x0, [x20, #312] first->prev = prev; 17f0: f9000441 str x1, [x2, #8] prev->next = first; 17f4: f9000022 str x2, [x1] last->next = next; 17f8: f9000015 str x21, [x0] next->prev = last; 17fc: f9009680 str x0, [x20, #296] case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 1800: b9410a82 ldr w2, [x20, #264] 1804: f9009a96 str x22, [x20, #304] list->prev = list; 1808: f9009e96 str x22, [x20, #312] flush_workqueue_prep_pwqs(wq, -1, wq->work_color); 180c: 12800001 mov w1, #0xffffffff // #-1 1810: aa1403e0 mov x0, x20 1814: 97fffe1d bl 1088 <flush_workqueue_prep_pwqs> 1818: b9410a82 ldr w2, [x20, #264] 181c: b9410e81 ldr w1, [x20, #268] 1820: 17ffffc6 b 1738 <flush_workqueue+0x300> WARN_ON_ONCE(wq->flush_color != wq->work_color); 1824: 6b01005f cmp w2, w1 1828: 540002a1 b.ne 187c <flush_workqueue+0x444> // b.any mutex_unlock(&wq->mutex); 182c: aa1803e0 mov x0, x24 1830: a94c6bf9 ldp x25, x26, [sp, #192] 1834: 94000000 bl 0 <_mutex_unlock> 1838: 17ffff58 b 1598 <flush_workqueue+0x160> if (WARN_ON(!wq_online)) 183c: d4210000 brk #0x800 1840: 17ffff56 b 1598 <flush_workqueue+0x160> WARN_ON_ONCE(wq->flush_color != next->flush_color); 1844: d4210000 brk #0x800 1848: 17ffffc4 b 1758 <flush_workqueue+0x320> WARN_ON_ONCE(wq->flush_color == wq->work_color); 184c: d4210000 brk #0x800 1850: b9410e81 ldr w1, [x20, #268] 1854: 17ffffbe b 174c <flush_workqueue+0x314> WARN_ON_ONCE(!list_empty(&wq->flusher_overflow)); 1858: d4210000 brk #0x800 185c: b9410a82 ldr w2, [x20, #264] 1860: b9410e81 ldr w1, [x20, #268] 1864: 17ffff34 b 1534 <flush_workqueue+0xfc> list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { 1868: aa1503f3 mov x19, x21 186c: aa1503fa mov x26, x21 1870: 17ffff93 b 16bc <flush_workqueue+0x284> 1874: aa0203f3 mov x19, x2 1878: 17ffff91 b 16bc <flush_workqueue+0x284> WARN_ON_ONCE(wq->flush_color != wq->work_color); 187c: d4210000 brk #0x800 1880: a94c6bf9 ldp x25, x26, [sp, #192] 1884: 17ffff59 b 15e8 <flush_workqueue+0x1b0> WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color); 1888: d4210000 brk #0x800 188c: 17ffff30 b 154c <flush_workqueue+0x114> WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); 1890: d4210000 brk #0x800 1894: b9410a82 ldr w2, [x20, #264] 1898: b9410e81 ldr w1, [x20, #268] 189c: 17ffff58 b 15fc <flush_workqueue+0x1c4> WARN_ON_ONCE(!list_empty(&this_flusher.list)); 18a0: d4210000 brk #0x800 18a4: 17ffff63 b 1630 <flush_workqueue+0x1f8> WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); 18a8: d4210000 brk #0x800 18ac: b9410e80 ldr w0, [x20, #268] 18b0: 17ffff64 b 1640 <flush_workqueue+0x208> 18b4: a90c6bf9 stp x25, x26, [sp, #192] } 18b8: 94000000 bl 0 <__stack_chk_fail> 18bc: d503201f nop 00000000000018c0 <drain_workqueue>: { 18c0: a9bb7bfd stp x29, x30, [sp, #-80]! 18c4: 910003fd mov x29, sp 18c8: a90153f3 stp x19, x20, [sp, #16] 18cc: aa0003f4 mov x20, x0 18d0: a9025bf5 stp x21, x22, [sp, #32] mutex_lock(&wq->mutex); 18d4: 91008016 add x22, x0, #0x20 18d8: aa1603e0 mov x0, x22 { 18dc: a90363f7 stp x23, x24, [sp, #48] 18e0: f90023f9 str x25, [sp, #64] mutex_lock(&wq->mutex); 18e4: 94000000 bl 0 <_mutex_lock> if (!wq->nr_drainers++) 18e8: b9415a80 ldr w0, [x20, #344] 18ec: 11000401 add w1, w0, #0x1 18f0: b9015a81 str w1, [x20, #344] 18f4: 35000080 cbnz w0, 1904 <drain_workqueue+0x44> wq->flags |= __WQ_DRAINING; 18f8: b9420280 ldr w0, [x20, #512] 18fc: 32100000 orr w0, w0, #0x10000 1900: b9020280 str w0, [x20, #512] mutex_unlock(&wq->mutex); 1904: aa1603e0 mov x0, x22 for_each_pwq(pwq, wq) { 1908: 90000017 adrp x23, 0 <pwq_activate_delayed_work> 190c: 90000018 adrp x24, 0 <pwq_activate_delayed_work> mutex_unlock(&wq->mutex); 1910: 94000000 bl 0 <_mutex_unlock> for_each_pwq(pwq, wq) { 1914: 910002f7 add x23, x23, #0x0 1918: 91000318 add x24, x24, #0x0 unsigned int flush_cnt = 0; 191c: 52800015 mov w21, #0x0 // #0 1920: 14000015 b 1974 <drain_workqueue+0xb4> __READ_ONCE_SIZE; 1924: f9403262 ldr x2, [x19, #96] return READ_ONCE(head->next) == head; 1928: f8460420 ldr x0, [x1], #96 drained = !pwq->nr_active && list_empty(&pwq->delayed_works); 192c: eb02003f cmp x1, x2 1930: 540005c0 b.eq 19e8 <drain_workqueue+0x128> // b.none if (++flush_cnt == 10 || 1934: 110006b5 add w21, w21, #0x1 spin_unlock_irq(&pwq->pool->lock); 1938: 94000000 bl 0 <rt_spin_unlock> if (++flush_cnt == 10 || 193c: 71002abf cmp w21, #0xa 1940: 54000480 b.eq 19d0 <drain_workqueue+0x110> // b.none 1944: d503201f nop (flush_cnt % 100 == 0 && flush_cnt <= 1000)) 1948: 5290a3e0 mov w0, #0x851f // #34079 194c: 52800c81 mov w1, #0x64 // #100 1950: 72aa3d60 movk w0, #0x51eb, lsl #16 1954: 710fa2bf cmp w21, #0x3e8 1958: 9ba07ea0 umull x0, w21, w0 195c: d365fc00 lsr x0, x0, #37 1960: 1b01d400 msub w0, w0, w1, w21 1964: 7a409800 ccmp w0, #0x0, #0x0, ls // ls = plast 1968: 54000340 b.eq 19d0 <drain_workqueue+0x110> // b.none mutex_unlock(&wq->mutex); 196c: aa1603e0 mov x0, x22 1970: 94000000 bl 0 <_mutex_unlock> flush_workqueue(wq); 1974: aa1403e0 mov x0, x20 1978: 94000000 bl 1438 <flush_workqueue> mutex_lock(&wq->mutex); 197c: aa1603e0 mov x0, x22 1980: 94000000 bl 0 <_mutex_lock> 1984: f9400293 ldr x19, [x20] for_each_pwq(pwq, wq) { 1988: eb13029f cmp x20, x19 198c: d101c273 sub x19, x19, #0x70 1990: 54000360 b.eq 19fc <drain_workqueue+0x13c> // b.none 1994: 91036299 add x25, x20, #0xd8 1998: 94000000 bl 0 <debug_lockdep_rcu_enabled> 199c: 34000060 cbz w0, 19a8 <drain_workqueue+0xe8> 19a0: 394012e0 ldrb w0, [x23, #4] 19a4: 340004a0 cbz w0, 1a38 <drain_workqueue+0x178> spin_lock_irq(&pwq->pool->lock); 19a8: f9400260 ldr x0, [x19] 19ac: 94000000 bl 0 <rt_spin_lock> drained = !pwq->nr_active && list_empty(&pwq->delayed_works); 19b0: b9405a60 ldr w0, [x19, #88] 19b4: aa1303e1 mov x1, x19 19b8: 34fffb60 cbz w0, 1924 <drain_workqueue+0x64> 19bc: f9400260 ldr x0, [x19] if (++flush_cnt == 10 || 19c0: 110006b5 add w21, w21, #0x1 spin_unlock_irq(&pwq->pool->lock); 19c4: 94000000 bl 0 <rt_spin_unlock> if (++flush_cnt == 10 || 19c8: 71002abf cmp w21, #0xa 19cc: 54fffbe1 b.ne 1948 <drain_workqueue+0x88> // b.any pr_warn("workqueue %s: drain_workqueue() isn't complete after %u tries\n", 19d0: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 19d4: 2a1503e2 mov w2, w21 19d8: 9106a281 add x1, x20, #0x1a8 19dc: 91000000 add x0, x0, #0x0 19e0: 94000000 bl 0 <printk> 19e4: 17ffffe2 b 196c <drain_workqueue+0xac> spin_unlock_irq(&pwq->pool->lock); 19e8: 94000000 bl 0 <rt_spin_unlock> 19ec: f9403a73 ldr x19, [x19, #112] for_each_pwq(pwq, wq) { 19f0: eb13029f cmp x20, x19 19f4: d101c273 sub x19, x19, #0x70 19f8: 54fffd01 b.ne 1998 <drain_workqueue+0xd8> // b.any if (!--wq->nr_drainers) 19fc: b9415a80 ldr w0, [x20, #344] 1a00: 51000400 sub w0, w0, #0x1 1a04: b9015a80 str w0, [x20, #344] 1a08: 35000080 cbnz w0, 1a18 <drain_workqueue+0x158> wq->flags &= ~__WQ_DRAINING; 1a0c: b9420280 ldr w0, [x20, #512] 1a10: 120f7800 and w0, w0, #0xfffeffff 1a14: b9020280 str w0, [x20, #512] mutex_unlock(&wq->mutex); 1a18: aa1603e0 mov x0, x22 1a1c: 94000000 bl 0 <_mutex_unlock> } 1a20: a94153f3 ldp x19, x20, [sp, #16] 1a24: a9425bf5 ldp x21, x22, [sp, #32] 1a28: a94363f7 ldp x23, x24, [sp, #48] 1a2c: f94023f9 ldr x25, [sp, #64] 1a30: a8c57bfd ldp x29, x30, [sp], #80 1a34: d65f03c0 ret for_each_pwq(pwq, wq) { 1a38: 94000000 bl 0 <rcu_read_lock_held> 1a3c: 35fffb60 cbnz w0, 19a8 <drain_workqueue+0xe8> 1a40: 12800001 mov w1, #0xffffffff // #-1 1a44: aa1903e0 mov x0, x25 1a48: 94000000 bl 0 <lock_is_held_type> 1a4c: 35fffae0 cbnz w0, 19a8 <drain_workqueue+0xe8> 1a50: 52800023 mov w3, #0x1 // #1 1a54: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 1a58: aa1803e2 mov x2, x24 1a5c: 91000000 add x0, x0, #0x0 1a60: 52816561 mov w1, #0xb2b // #2859 1a64: 390012e3 strb w3, [x23, #4] 1a68: 94000000 bl 0 <lockdep_rcu_suspicious> 1a6c: 17ffffcf b 19a8 <drain_workqueue+0xe8> 0000000000001a70 <get_work_pool>: { 1a70: a9be7bfd stp x29, x30, [sp, #-32]! 1a74: 910003fd mov x29, sp 1a78: a90153f3 stp x19, x20, [sp, #16] 1a7c: f9400013 ldr x19, [x0] assert_rcu_or_pool_mutex(); 1a80: 94000000 bl 0 <debug_lockdep_rcu_enabled> 1a84: 340000a0 cbz w0, 1a98 <get_work_pool+0x28> 1a88: 90000014 adrp x20, 0 <pwq_activate_delayed_work> 1a8c: 91000294 add x20, x20, #0x0 1a90: 39401680 ldrb w0, [x20, #5] 1a94: 34000260 cbz w0, 1ae0 <get_work_pool+0x70> if (data & WORK_STRUCT_PWQ) 1a98: 371001b3 tbnz w19, #2, 1acc <get_work_pool+0x5c> pool_id = data >> WORK_OFFQ_POOL_SHIFT; 1a9c: d345fe73 lsr x19, x19, #5 if (pool_id == WORK_OFFQ_POOL_NONE) 1aa0: 12b00000 mov w0, #0x7fffffff // #2147483647 1aa4: 6b00027f cmp w19, w0 1aa8: 540003e0 b.eq 1b24 <get_work_pool+0xb4> // b.none return radix_tree_lookup(&idr->idr_rt, id); 1aac: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 1ab0: 91000000 add x0, x0, #0x0 1ab4: 93407e61 sxtw x1, w19 1ab8: 9103a000 add x0, x0, #0xe8 1abc: 94000000 bl 0 <radix_tree_lookup> } 1ac0: a94153f3 ldp x19, x20, [sp, #16] 1ac4: a8c27bfd ldp x29, x30, [sp], #32 1ac8: d65f03c0 ret (data & WORK_STRUCT_WQ_DATA_MASK))->pool; 1acc: 9278de73 and x19, x19, #0xffffffffffffff00 1ad0: f9400260 ldr x0, [x19] } 1ad4: a94153f3 ldp x19, x20, [sp, #16] 1ad8: a8c27bfd ldp x29, x30, [sp], #32 1adc: d65f03c0 ret assert_rcu_or_pool_mutex(); 1ae0: 94000000 bl 0 <rcu_read_lock_held> 1ae4: 35fffda0 cbnz w0, 1a98 <get_work_pool+0x28> 1ae8: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 1aec: 91000000 add x0, x0, #0x0 1af0: 9102e000 add x0, x0, #0xb8 1af4: 12800001 mov w1, #0xffffffff // #-1 1af8: 94000000 bl 0 <lock_is_held_type> 1afc: 35fffce0 cbnz w0, 1a98 <get_work_pool+0x28> 1b00: 52800023 mov w3, #0x1 // #1 1b04: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 1b08: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 1b0c: 91000042 add x2, x2, #0x0 1b10: 91000000 add x0, x0, #0x0 1b14: 52805d81 mov w1, #0x2ec // #748 1b18: 39001683 strb w3, [x20, #5] 1b1c: 94000000 bl 0 <lockdep_rcu_suspicious> 1b20: 17ffffde b 1a98 <get_work_pool+0x28> return NULL; 1b24: d2800000 mov x0, #0x0 // #0 } 1b28: a94153f3 ldp x19, x20, [sp, #16] 1b2c: a8c27bfd ldp x29, x30, [sp], #32 1b30: d65f03c0 ret 1b34: d503201f nop 0000000000001b38 <work_busy>: { 1b38: a9bd7bfd stp x29, x30, [sp, #-48]! 1b3c: 910003fd mov x29, sp 1b40: a90153f3 stp x19, x20, [sp, #16] 1b44: aa0003f3 mov x19, x0 #ifdef CONFIG_DEBUG_LOCK_ALLOC static inline void rcu_lock_acquire(struct lockdep_map *map) { lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_); 1b48: 90000014 adrp x20, 0 <rcu_lock_map> 1b4c: a9025bf5 stp x21, x22, [sp, #32] 1b50: 91000294 add x20, x20, #0x0 1b54: f9400015 ldr x21, [x0] * read-side critical sections may be preempted and they may also block, but * only when acquiring spinlocks that are subject to priority inheritance. */ static __always_inline void rcu_read_lock(void) { __rcu_read_lock(); 1b58: 94000000 bl 0 <__rcu_read_lock> lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_); 1b5c: aa1403e0 mov x0, x20 1b60: 90000006 adrp x6, 0 <pwq_activate_delayed_work> 1b64: 910000c6 add x6, x6, #0x0 1b68: d2800005 mov x5, #0x0 // #0 1b6c: 52800004 mov w4, #0x0 // #0 1b70: 52800043 mov w3, #0x2 // #2 1b74: 52800002 mov w2, #0x0 // #0 1b78: 52800001 mov w1, #0x0 // #0 unsigned int ret = 0; 1b7c: 120002b5 and w21, w21, #0x1 1b80: 94000000 bl 0 <lock_acquire> __acquire(RCU); rcu_lock_acquire(&rcu_lock_map); RCU_LOCKDEP_WARN(!rcu_is_watching(), 1b84: 94000000 bl 0 <debug_lockdep_rcu_enabled> 1b88: 340000a0 cbz w0, 1b9c <work_busy+0x64> 1b8c: 90000016 adrp x22, 0 <pwq_activate_delayed_work> 1b90: 910002d6 add x22, x22, #0x0 1b94: 39401ac0 ldrb w0, [x22, #6] 1b98: 34000740 cbz w0, 1c80 <work_busy+0x148> pool = get_work_pool(work); 1b9c: aa1303e0 mov x0, x19 1ba0: 97ffffb4 bl 1a70 <get_work_pool> 1ba4: aa0003f6 mov x22, x0 if (pool) { 1ba8: b40001a0 cbz x0, 1bdc <work_busy+0xa4> spin_lock_irqsave(&pool->lock, flags); 1bac: 94000000 bl 0 <rt_spin_lock> #endif static __always_inline u32 hash_64_generic(u64 val, unsigned int bits) { #if BITS_PER_LONG == 64 /* 64x64-bit multiply is efficient on all 64-bit processors */ return val * GOLDEN_RATIO_64 >> (64 - bits); 1bb0: d2907d60 mov x0, #0x83eb // #33771 1bb4: f2b016a0 movk x0, #0x80b5, lsl #16 1bb8: f2d0c8c0 movk x0, #0x8646, lsl #32 1bbc: f2ec3900 movk x0, #0x61c8, lsl #48 1bc0: 9b007e60 mul x0, x19, x0 hash_for_each_possible(pool->busy_hash, worker, hentry, 1bc4: d37afc00 lsr x0, x0, #58 1bc8: 8b000ec0 add x0, x22, x0, lsl #3 1bcc: f940f800 ldr x0, [x0, #496] 1bd0: b5000340 cbnz x0, 1c38 <work_busy+0x100> spin_unlock_irqrestore(&pool->lock, flags); 1bd4: aa1603e0 mov x0, x22 1bd8: 94000000 bl 0 <rt_spin_unlock> * * See rcu_read_lock() for more information. */ static inline void rcu_read_unlock(void) { RCU_LOCKDEP_WARN(!rcu_is_watching(), 1bdc: 94000000 bl 0 <debug_lockdep_rcu_enabled> 1be0: 340000a0 cbz w0, 1bf4 <work_busy+0xbc> 1be4: 90000013 adrp x19, 0 <pwq_activate_delayed_work> 1be8: 91000273 add x19, x19, #0x0 1bec: 39401e60 ldrb w0, [x19, #7] 1bf0: 34000300 cbz w0, 1c50 <work_busy+0x118> "rcu_read_unlock() used illegally while idle"); __release(RCU); __rcu_read_unlock(); 1bf4: 94000000 bl 0 <__rcu_read_unlock> lock_release(map, 1, _THIS_IP_); 1bf8: aa1403e0 mov x0, x20 1bfc: 52800021 mov w1, #0x1 // #1 1c00: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 1c04: 91000042 add x2, x2, #0x0 1c08: 94000000 bl 0 <lock_release> } 1c0c: 2a1503e0 mov w0, w21 1c10: a94153f3 ldp x19, x20, [sp, #16] 1c14: a9425bf5 ldp x21, x22, [sp, #32] 1c18: a8c37bfd ldp x29, x30, [sp], #48 1c1c: d65f03c0 ret if (worker->current_work == work && 1c20: f9400c02 ldr x2, [x0, #24] 1c24: f9400e61 ldr x1, [x19, #24] 1c28: eb01005f cmp x2, x1 1c2c: 540000e0 b.eq 1c48 <work_busy+0x110> // b.none hash_for_each_possible(pool->busy_hash, worker, hentry, 1c30: f9400000 ldr x0, [x0] 1c34: b4fffd00 cbz x0, 1bd4 <work_busy+0x9c> if (worker->current_work == work && 1c38: f9400801 ldr x1, [x0, #16] 1c3c: eb01027f cmp x19, x1 1c40: 54ffff81 b.ne 1c30 <work_busy+0xf8> // b.any 1c44: 17fffff7 b 1c20 <work_busy+0xe8> ret |= WORK_BUSY_RUNNING; 1c48: 321f02b5 orr w21, w21, #0x2 1c4c: 17ffffe2 b 1bd4 <work_busy+0x9c> RCU_LOCKDEP_WARN(!rcu_is_watching(), 1c50: 94000000 bl 0 <rcu_is_watching> 1c54: 72001c1f tst w0, #0xff 1c58: 54fffce1 b.ne 1bf4 <work_busy+0xbc> // b.any 1c5c: 52800023 mov w3, #0x1 // #1 1c60: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 1c64: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 1c68: 91000042 add x2, x2, #0x0 1c6c: 91000000 add x0, x0, #0x0 1c70: 528051a1 mov w1, #0x28d // #653 1c74: 39001e63 strb w3, [x19, #7] 1c78: 94000000 bl 0 <lockdep_rcu_suspicious> 1c7c: 17ffffde b 1bf4 <work_busy+0xbc> RCU_LOCKDEP_WARN(!rcu_is_watching(), 1c80: 94000000 bl 0 <rcu_is_watching> 1c84: 72001c1f tst w0, #0xff 1c88: 54fff8a1 b.ne 1b9c <work_busy+0x64> // b.any 1c8c: 52800023 mov w3, #0x1 // #1 1c90: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 1c94: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 1c98: 91000042 add x2, x2, #0x0 1c9c: 91000000 add x0, x0, #0x0 1ca0: 52804b41 mov w1, #0x25a // #602 1ca4: 39001ac3 strb w3, [x22, #6] 1ca8: 94000000 bl 0 <lockdep_rcu_suspicious> 1cac: 17ffffbc b 1b9c <work_busy+0x64> 0000000000001cb0 <cwt_wakefn>: if (cwait->work != key) 1cb0: f9401404 ldr x4, [x0, #40] 1cb4: eb03009f cmp x4, x3 1cb8: 54000060 b.eq 1cc4 <cwt_wakefn+0x14> // b.none return 0; 1cbc: 52800000 mov w0, #0x0 // #0 } 1cc0: d65f03c0 ret { 1cc4: a9bf7bfd stp x29, x30, [sp, #-16]! 1cc8: 910003fd mov x29, sp return autoremove_wake_function(wait, mode, sync, key); 1ccc: 94000000 bl 0 <autoremove_wake_function> } 1cd0: a8c17bfd ldp x29, x30, [sp], #16 1cd4: d65f03c0 ret 0000000000001cd8 <wq_unbound_cpumask_show>: { 1cd8: a9be7bfd stp x29, x30, [sp, #-32]! 1cdc: 910003fd mov x29, sp 1ce0: a90153f3 stp x19, x20, [sp, #16] mutex_lock(&wq_pool_mutex); 1ce4: 90000013 adrp x19, 0 <pwq_activate_delayed_work> 1ce8: 91000273 add x19, x19, #0x0 { 1cec: aa0203f4 mov x20, x2 mutex_lock(&wq_pool_mutex); 1cf0: aa1303e0 mov x0, x19 1cf4: 94000000 bl 0 <_mutex_lock> written = scnprintf(buf, PAGE_SIZE, "%*pb\n", 1cf8: 90000000 adrp x0, 0 <nr_cpu_ids> 1cfc: 90000004 adrp x4, 0 <pwq_activate_delayed_work> 1d00: 91000084 add x4, x4, #0x0 1d04: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 1d08: b9400003 ldr w3, [x0] 1d0c: 91014084 add x4, x4, #0x50 1d10: 91000042 add x2, x2, #0x0 1d14: d2820001 mov x1, #0x1000 // #4096 1d18: aa1403e0 mov x0, x20 1d1c: 94000000 bl 0 <scnprintf> 1d20: 2a0003f4 mov w20, w0 mutex_unlock(&wq_pool_mutex); 1d24: aa1303e0 mov x0, x19 1d28: 94000000 bl 0 <_mutex_unlock> } 1d2c: 93407e80 sxtw x0, w20 1d30: a94153f3 ldp x19, x20, [sp, #16] 1d34: a8c27bfd ldp x29, x30, [sp], #32 1d38: d65f03c0 ret 1d3c: d503201f nop 0000000000001d40 <max_active_show>: { 1d40: a9bf7bfd stp x29, x30, [sp, #-16]! return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active); 1d44: d2820001 mov x1, #0x1000 // #4096 { 1d48: 910003fd mov x29, sp return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active); 1d4c: f85f8003 ldur x3, [x0, #-8] { 1d50: aa0203e0 mov x0, x2 return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active); 1d54: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 1d58: 91000042 add x2, x2, #0x0 1d5c: b9415c63 ldr w3, [x3, #348] 1d60: 94000000 bl 0 <scnprintf> } 1d64: 93407c00 sxtw x0, w0 1d68: a8c17bfd ldp x29, x30, [sp], #16 1d6c: d65f03c0 ret 0000000000001d70 <per_cpu_show>: { 1d70: a9bf7bfd stp x29, x30, [sp, #-16]! return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND)); 1d74: d2820001 mov x1, #0x1000 // #4096 { 1d78: 910003fd mov x29, sp return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND)); 1d7c: f85f8003 ldur x3, [x0, #-8] { 1d80: aa0203e0 mov x0, x2 return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND)); 1d84: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 1d88: 91000042 add x2, x2, #0x0 1d8c: b9420063 ldr w3, [x3, #512] 1d90: d27f0063 eor x3, x3, #0x2 1d94: 53010463 ubfx w3, w3, #1, #1 1d98: 94000000 bl 0 <scnprintf> } 1d9c: 93407c00 sxtw x0, w0 1da0: a8c17bfd ldp x29, x30, [sp], #16 1da4: d65f03c0 ret 0000000000001da8 <wq_numa_show>: { 1da8: a9bd7bfd stp x29, x30, [sp, #-48]! 1dac: 910003fd mov x29, sp 1db0: a90153f3 stp x19, x20, [sp, #16] 1db4: f90013f5 str x21, [sp, #32] 1db8: aa0203f5 mov x21, x2 return wq_dev->wq; 1dbc: f85f8014 ldur x20, [x0, #-8] mutex_lock(&wq->mutex); 1dc0: 91008293 add x19, x20, #0x20 1dc4: aa1303e0 mov x0, x19 1dc8: 94000000 bl 0 <_mutex_lock> !wq->unbound_attrs->no_numa); 1dcc: f940b283 ldr x3, [x20, #352] written = scnprintf(buf, PAGE_SIZE, "%d\n", 1dd0: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 1dd4: 91000042 add x2, x2, #0x0 1dd8: d2820001 mov x1, #0x1000 // #4096 1ddc: aa1503e0 mov x0, x21 !wq->unbound_attrs->no_numa); 1de0: 39404063 ldrb w3, [x3, #16] written = scnprintf(buf, PAGE_SIZE, "%d\n", 1de4: 52000063 eor w3, w3, #0x1 1de8: 94000000 bl 0 <scnprintf> 1dec: 2a0003f4 mov w20, w0 mutex_unlock(&wq->mutex); 1df0: aa1303e0 mov x0, x19 1df4: 94000000 bl 0 <_mutex_unlock> } 1df8: 93407e80 sxtw x0, w20 1dfc: a94153f3 ldp x19, x20, [sp, #16] 1e00: f94013f5 ldr x21, [sp, #32] 1e04: a8c37bfd ldp x29, x30, [sp], #48 1e08: d65f03c0 ret 1e0c: d503201f nop 0000000000001e10 <wq_cpumask_show>: { 1e10: a9bd7bfd stp x29, x30, [sp, #-48]! 1e14: 910003fd mov x29, sp 1e18: a90153f3 stp x19, x20, [sp, #16] 1e1c: f90013f5 str x21, [sp, #32] 1e20: aa0203f5 mov x21, x2 return wq_dev->wq; 1e24: f85f8014 ldur x20, [x0, #-8] mutex_lock(&wq->mutex); 1e28: 91008293 add x19, x20, #0x20 1e2c: aa1303e0 mov x0, x19 1e30: 94000000 bl 0 <_mutex_lock> written = scnprintf(buf, PAGE_SIZE, "%*pb\n", 1e34: 90000000 adrp x0, 0 <nr_cpu_ids> 1e38: 90000002 adrp x2, 0 <pwq_activate_delayed_work> cpumask_pr_args(wq->unbound_attrs->cpumask)); 1e3c: f940b284 ldr x4, [x20, #352] written = scnprintf(buf, PAGE_SIZE, "%*pb\n", 1e40: 91000042 add x2, x2, #0x0 1e44: b9400003 ldr w3, [x0] 1e48: d2820001 mov x1, #0x1000 // #4096 1e4c: 91002084 add x4, x4, #0x8 1e50: aa1503e0 mov x0, x21 1e54: 94000000 bl 0 <scnprintf> 1e58: 2a0003f4 mov w20, w0 mutex_unlock(&wq->mutex); 1e5c: aa1303e0 mov x0, x19 1e60: 94000000 bl 0 <_mutex_unlock> } 1e64: 93407e80 sxtw x0, w20 1e68: a94153f3 ldp x19, x20, [sp, #16] 1e6c: f94013f5 ldr x21, [sp, #32] 1e70: a8c37bfd ldp x29, x30, [sp], #48 1e74: d65f03c0 ret 0000000000001e78 <wq_nice_show>: { 1e78: a9bd7bfd stp x29, x30, [sp, #-48]! 1e7c: 910003fd mov x29, sp 1e80: a90153f3 stp x19, x20, [sp, #16] 1e84: f90013f5 str x21, [sp, #32] 1e88: aa0203f5 mov x21, x2 return wq_dev->wq; 1e8c: f85f8014 ldur x20, [x0, #-8] mutex_lock(&wq->mutex); 1e90: 91008293 add x19, x20, #0x20 1e94: aa1303e0 mov x0, x19 1e98: 94000000 bl 0 <_mutex_lock> written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice); 1e9c: f940b283 ldr x3, [x20, #352] 1ea0: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 1ea4: 91000042 add x2, x2, #0x0 1ea8: d2820001 mov x1, #0x1000 // #4096 1eac: aa1503e0 mov x0, x21 1eb0: b9400063 ldr w3, [x3] 1eb4: 94000000 bl 0 <scnprintf> 1eb8: 2a0003f4 mov w20, w0 mutex_unlock(&wq->mutex); 1ebc: aa1303e0 mov x0, x19 1ec0: 94000000 bl 0 <_mutex_unlock> } 1ec4: 93407e80 sxtw x0, w20 1ec8: a94153f3 ldp x19, x20, [sp, #16] 1ecc: f94013f5 ldr x21, [sp, #32] 1ed0: a8c37bfd ldp x29, x30, [sp], #48 1ed4: d65f03c0 ret 0000000000001ed8 <max_active_store>: { 1ed8: d10103ff sub sp, sp, #0x40 1edc: aa0203e4 mov x4, x2 if (sscanf(buf, "%d", &val) != 1 || val <= 0) 1ee0: 910013e2 add x2, sp, #0x4 { 1ee4: a9017bfd stp x29, x30, [sp, #16] 1ee8: 910043fd add x29, sp, #0x10 1eec: a90253f3 stp x19, x20, [sp, #32] 1ef0: 90000013 adrp x19, 0 <__stack_chk_guard> 1ef4: 91000273 add x19, x19, #0x0 1ef8: f9001bf5 str x21, [sp, #48] 1efc: f9400261 ldr x1, [x19] 1f00: f90007e1 str x1, [sp, #8] 1f04: d2800001 mov x1, #0x0 // #0 1f08: aa0303f4 mov x20, x3 if (sscanf(buf, "%d", &val) != 1 || val <= 0) 1f0c: 90000001 adrp x1, 0 <pwq_activate_delayed_work> 1f10: 91000021 add x1, x1, #0x0 return wq_dev->wq; 1f14: f85f8015 ldur x21, [x0, #-8] if (sscanf(buf, "%d", &val) != 1 || val <= 0) 1f18: aa0403e0 mov x0, x4 1f1c: 94000000 bl 0 <sscanf> 1f20: 7100041f cmp w0, #0x1 1f24: 54000201 b.ne 1f64 <max_active_store+0x8c> // b.any 1f28: b94007e1 ldr w1, [sp, #4] 1f2c: 7100003f cmp w1, #0x0 1f30: 540001ad b.le 1f64 <max_active_store+0x8c> workqueue_set_max_active(wq, val); 1f34: aa1503e0 mov x0, x21 1f38: 94000000 bl 970 <workqueue_set_max_active> return count; 1f3c: aa1403e0 mov x0, x20 } 1f40: f94007e2 ldr x2, [sp, #8] 1f44: f9400261 ldr x1, [x19] 1f48: ca010041 eor x1, x2, x1 1f4c: b5000101 cbnz x1, 1f6c <max_active_store+0x94> 1f50: a9417bfd ldp x29, x30, [sp, #16] 1f54: a94253f3 ldp x19, x20, [sp, #32] 1f58: f9401bf5 ldr x21, [sp, #48] 1f5c: 910103ff add sp, sp, #0x40 1f60: d65f03c0 ret return -EINVAL; 1f64: 928002a0 mov x0, #0xffffffffffffffea // #-22 1f68: 17fffff6 b 1f40 <max_active_store+0x68> } 1f6c: 94000000 bl 0 <__stack_chk_fail> 0000000000001f70 <alloc_worker.isra.1>: static struct worker *alloc_worker(int node) 1f70: a9bf7bfd stp x29, x30, [sp, #-16]! int i = kmalloc_index(size); if (!i) return ZERO_SIZE_PTR; return kmem_cache_alloc_node_trace( 1f74: 90000000 adrp x0, 0 <kmalloc_caches> return kmem_cache_alloc(s, flags); 1f78: 52901801 mov w1, #0x80c0 // #32960 1f7c: 910003fd mov x29, sp 1f80: f9400000 ldr x0, [x0] 1f84: 72a02801 movk w1, #0x140, lsl #16 1f88: 94000000 bl 0 <kmem_cache_alloc> if (worker) { 1f8c: b4000160 cbz x0, 1fb8 <alloc_worker.isra.1+0x48> case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 1f90: f9000000 str x0, [x0] INIT_LIST_HEAD(&worker->scheduled); 1f94: 9100c002 add x2, x0, #0x30 1f98: f9001802 str x2, [x0, #48] INIT_LIST_HEAD(&worker->node); 1f9c: 91014001 add x1, x0, #0x50 worker->flags = WORKER_PREP; 1fa0: 52800103 mov w3, #0x8 // #8 list->prev = list; 1fa4: f9000400 str x0, [x0, #8] 1fa8: f9001c02 str x2, [x0, #56] 1fac: f9002801 str x1, [x0, #80] 1fb0: f9002c01 str x1, [x0, #88] 1fb4: b9006803 str w3, [x0, #104] } 1fb8: a8c17bfd ldp x29, x30, [sp], #16 1fbc: d65f03c0 ret 0000000000001fc0 <create_worker>: { 1fc0: d10183ff sub sp, sp, #0x60 id = ida_simple_get(&pool->worker_ida, 0, 0, GFP_KERNEL); 1fc4: 52801803 mov w3, #0xc0 // #192 1fc8: 72a02803 movk w3, #0x140, lsl #16 1fcc: 52800002 mov w2, #0x0 // #0 { 1fd0: a9027bfd stp x29, x30, [sp, #32] 1fd4: 910083fd add x29, sp, #0x20 1fd8: a9045bf5 stp x21, x22, [sp, #64] 1fdc: 90000015 adrp x21, 0 <__stack_chk_guard> 1fe0: 910002b5 add x21, x21, #0x0 1fe4: a90563f7 stp x23, x24, [sp, #80] id = ida_simple_get(&pool->worker_ida, 0, 0, GFP_KERNEL); 1fe8: 9113e017 add x23, x0, #0x4f8 { 1fec: f94002a1 ldr x1, [x21] 1ff0: f9000fe1 str x1, [sp, #24] 1ff4: d2800001 mov x1, #0x0 // #0 1ff8: a90353f3 stp x19, x20, [sp, #48] 1ffc: aa0003f4 mov x20, x0 id = ida_simple_get(&pool->worker_ida, 0, 0, GFP_KERNEL); 2000: aa1703e0 mov x0, x23 2004: 94000000 bl 0 <ida_simple_get> if (id < 0) 2008: 37f80960 tbnz w0, #31, 2134 <create_worker+0x174> worker = alloc_worker(pool->node); 200c: 2a0003f6 mov w22, w0 2010: 97ffffd8 bl 1f70 <alloc_worker.isra.1> 2014: aa0003f3 mov x19, x0 if (!worker) 2018: b4000800 cbz x0, 2118 <create_worker+0x158> worker->pool = pool; 201c: f9002414 str x20, [x0, #72] worker->id = id; 2020: b9006c16 str w22, [x0, #108] if (pool->cpu >= 0) 2024: b940f283 ldr w3, [x20, #240] 2028: 37f808a3 tbnz w3, #31, 213c <create_worker+0x17c> pool->attrs->nice < 0 ? "H" : ""); 202c: f9428680 ldr x0, [x20, #1288] snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id, 2030: 90000001 adrp x1, 0 <pwq_activate_delayed_work> 2034: 90000005 adrp x5, 0 <pwq_activate_delayed_work> 2038: 91000021 add x1, x1, #0x0 203c: 910000a5 add x5, x5, #0x0 2040: 910023f8 add x24, sp, #0x8 2044: b9400004 ldr w4, [x0] 2048: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 204c: aa1803e0 mov x0, x24 2050: 91000042 add x2, x2, #0x0 2054: 7100009f cmp w4, #0x0 2058: 2a1603e4 mov w4, w22 205c: 9a81b0a5 csel x5, x5, x1, lt // lt = tstop 2060: d2800201 mov x1, #0x10 // #16 2064: 94000000 bl 0 <snprintf> worker->task = kthread_create_on_node(worker_thread, worker, pool->node, 2068: b940f682 ldr w2, [x20, #244] 206c: aa1803e4 mov x4, x24 2070: 90000003 adrp x3, 0 <pwq_activate_delayed_work> 2074: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 2078: 91000063 add x3, x3, #0x0 207c: 91000000 add x0, x0, #0x0 2080: aa1303e1 mov x1, x19 2084: 94000000 bl 0 <kthread_create_on_node> 2088: f9002260 str x0, [x19, #64] if (IS_ERR(worker->task)) 208c: b140041f cmn x0, #0x1, lsl #12 2090: 54000448 b.hi 2118 <create_worker+0x158> // b.pmore set_user_nice(worker->task, pool->attrs->nice); 2094: f9428681 ldr x1, [x20, #1288] 2098: b9800021 ldrsw x1, [x1] 209c: 94000000 bl 0 <set_user_nice> kthread_bind_mask(worker->task, pool->attrs->cpumask); 20a0: f9402260 ldr x0, [x19, #64] 20a4: f9428681 ldr x1, [x20, #1288] 20a8: 91002021 add x1, x1, #0x8 20ac: 94000000 bl 0 <kthread_bind_mask> worker_attach_to_pool(worker, pool); 20b0: aa1403e1 mov x1, x20 20b4: aa1303e0 mov x0, x19 20b8: 97fffc74 bl 1288 <worker_attach_to_pool> spin_lock_irq(&pool->lock); 20bc: aa1403e0 mov x0, x20 20c0: 94000000 bl 0 <rt_spin_lock> worker->pool->nr_workers++; 20c4: f9402662 ldr x2, [x19, #72] worker_enter_idle(worker); 20c8: aa1303e0 mov x0, x19 worker->pool->nr_workers++; 20cc: b9411841 ldr w1, [x2, #280] 20d0: 11000421 add w1, w1, #0x1 20d4: b9011841 str w1, [x2, #280] worker_enter_idle(worker); 20d8: 97fff808 bl f8 <worker_enter_idle> wake_up_process(worker->task); 20dc: f9402260 ldr x0, [x19, #64] 20e0: 94000000 bl 0 <wake_up_process> spin_unlock_irq(&pool->lock); 20e4: aa1403e0 mov x0, x20 20e8: 94000000 bl 0 <rt_spin_unlock> } 20ec: aa1303e0 mov x0, x19 20f0: f9400fe2 ldr x2, [sp, #24] 20f4: f94002a1 ldr x1, [x21] 20f8: ca010041 eor x1, x2, x1 20fc: b5000321 cbnz x1, 2160 <create_worker+0x1a0> 2100: a9427bfd ldp x29, x30, [sp, #32] 2104: a94353f3 ldp x19, x20, [sp, #48] 2108: a9445bf5 ldp x21, x22, [sp, #64] 210c: a94563f7 ldp x23, x24, [sp, #80] 2110: 910183ff add sp, sp, #0x60 2114: d65f03c0 ret ida_simple_remove(&pool->worker_ida, id); 2118: 2a1603e1 mov w1, w22 211c: aa1703e0 mov x0, x23 2120: 94000000 bl 0 <ida_simple_remove> kfree(worker); 2124: aa1303e0 mov x0, x19 return NULL; 2128: d2800013 mov x19, #0x0 // #0 kfree(worker); 212c: 94000000 bl 0 <kfree> return NULL; 2130: 17ffffef b 20ec <create_worker+0x12c> struct worker *worker = NULL; 2134: d2800013 mov x19, #0x0 // #0 2138: 17fffffb b 2124 <create_worker+0x164> snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id); 213c: b940fa83 ldr w3, [x20, #248] 2140: 910023f8 add x24, sp, #0x8 2144: aa1803e0 mov x0, x24 2148: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 214c: 2a1603e4 mov w4, w22 2150: 91000042 add x2, x2, #0x0 2154: d2800201 mov x1, #0x10 // #16 2158: 94000000 bl 0 <snprintf> 215c: 17ffffc3 b 2068 <create_worker+0xa8> } 2160: 94000000 bl 0 <__stack_chk_fail> 2164: d503201f nop 0000000000002168 <current_work>: { 2168: a9bf7bfd stp x29, x30, [sp, #-16]! 216c: d5384100 mrs x0, sp_el0 2170: 910003fd mov x29, sp __READ_ONCE_SIZE; 2174: b9401001 ldr w1, [x0, #16] if (in_task() && (current->flags & PF_WQ_WORKER)) 2178: 12183021 and w1, w1, #0x1fff00 217c: 12106021 and w1, w1, #0xffff01ff 2180: 35000061 cbnz w1, 218c <current_work+0x24> 2184: b9403401 ldr w1, [x0, #52] 2188: 37280081 tbnz w1, #5, 2198 <current_work+0x30> return worker ? worker->current_work : NULL; 218c: d2800000 mov x0, #0x0 // #0 } 2190: a8c17bfd ldp x29, x30, [sp], #16 2194: d65f03c0 ret return kthread_data(current); 2198: 94000000 bl 0 <kthread_data> return worker ? worker->current_work : NULL; 219c: b4ffff80 cbz x0, 218c <current_work+0x24> } 21a0: a8c17bfd ldp x29, x30, [sp], #16 return worker ? worker->current_work : NULL; 21a4: f9400800 ldr x0, [x0, #16] } 21a8: d65f03c0 ret 21ac: d503201f nop 00000000000021b0 <get_pwq.isra.3>: static void get_pwq(struct pool_workqueue *pwq) 21b0: a9be7bfd stp x29, x30, [sp, #-32]! lockdep_assert_held(&pwq->pool->lock); 21b4: 90000002 adrp x2, 0 <debug_locks> static void get_pwq(struct pool_workqueue *pwq) 21b8: 910003fd mov x29, sp 21bc: f9000bf3 str x19, [sp, #16] 21c0: aa0103f3 mov x19, x1 lockdep_assert_held(&pwq->pool->lock); 21c4: b9400041 ldr w1, [x2] 21c8: 35000121 cbnz w1, 21ec <get_pwq.isra.3+0x3c> WARN_ON_ONCE(pwq->refcnt <= 0); 21cc: b9400260 ldr w0, [x19] 21d0: 7100001f cmp w0, #0x0 21d4: 540001ed b.le 2210 <get_pwq.isra.3+0x60> pwq->refcnt++; 21d8: 11000400 add w0, w0, #0x1 21dc: b9000260 str w0, [x19] } 21e0: f9400bf3 ldr x19, [sp, #16] 21e4: a8c27bfd ldp x29, x30, [sp], #32 21e8: d65f03c0 ret lockdep_assert_held(&pwq->pool->lock); 21ec: f9400000 ldr x0, [x0] 21f0: 12800001 mov w1, #0xffffffff // #-1 21f4: 91030000 add x0, x0, #0xc0 21f8: 94000000 bl 0 <lock_is_held_type> 21fc: 35fffe80 cbnz w0, 21cc <get_pwq.isra.3+0x1c> 2200: d4210000 brk #0x800 WARN_ON_ONCE(pwq->refcnt <= 0); 2204: b9400260 ldr w0, [x19] 2208: 7100001f cmp w0, #0x0 220c: 54fffe6c b.gt 21d8 <get_pwq.isra.3+0x28> 2210: d4210000 brk #0x800 2214: b9400260 ldr w0, [x19] 2218: 17fffff0 b 21d8 <get_pwq.isra.3+0x28> 221c: d503201f nop 0000000000002220 <pool_mayday_timeout>: { 2220: a9ba7bfd stp x29, x30, [sp, #-96]! 2224: 910003fd mov x29, sp 2228: a9025bf5 stp x21, x22, [sp, #32] 222c: aa0003f6 mov x22, x0 2230: a90363f7 stp x23, x24, [sp, #48] spin_lock(&wq_mayday_lock); /* for wq->maydays */ 2234: 90000017 adrp x23, 0 <pwq_activate_delayed_work> 2238: 910002f7 add x23, x23, #0x0 { 223c: a9046bf9 stp x25, x26, [sp, #64] spin_lock_irq(&pool->lock); 2240: 94000000 bl 0 <rt_spin_lock> spin_lock(&wq_mayday_lock); /* for wq->maydays */ 2244: 910b42e0 add x0, x23, #0x2d0 2248: 94000000 bl 0 <rt_spin_lock> return !list_empty(&pool->worklist) && __need_more_worker(pool); 224c: 910422da add x26, x22, #0x108 2250: f94086c0 ldr x0, [x22, #264] 2254: eb00035f cmp x26, x0 2258: 54000200 b.eq 2298 <pool_mayday_timeout+0x78> // b.none 225c: b94582c0 ldr w0, [x22, #1408] 2260: 350001c0 cbnz w0, 2298 <pool_mayday_timeout+0x78> return need_more_worker(pool) && !may_start_working(pool); 2264: b9411ec0 ldr w0, [x22, #284] 2268: 35000180 cbnz w0, 2298 <pool_mayday_timeout+0x78> list_for_each_entry(work, &pool->worklist, entry) 226c: a90153f3 stp x19, x20, [sp, #16] 2270: 910e42f8 add x24, x23, #0x390 2274: f9002bfb str x27, [sp, #80] 2278: 9000001b adrp x27, 0 <debug_locks> 227c: 9100037b add x27, x27, #0x0 2280: f94086d3 ldr x19, [x22, #264] 2284: eb13035f cmp x26, x19 2288: d1002273 sub x19, x19, #0x8 228c: 54000421 b.ne 2310 <pool_mayday_timeout+0xf0> // b.any 2290: a94153f3 ldp x19, x20, [sp, #16] 2294: f9402bfb ldr x27, [sp, #80] spin_unlock(&wq_mayday_lock); 2298: 910b42e0 add x0, x23, #0x2d0 229c: 94000000 bl 0 <rt_spin_unlock> spin_unlock_irq(&pool->lock); 22a0: aa1603e0 mov x0, x22 22a4: 94000000 bl 0 <rt_spin_unlock> mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); 22a8: 90000001 adrp x1, 0 <jiffies> 22ac: 910642c0 add x0, x22, #0x190 22b0: f9400021 ldr x1, [x1] 22b4: 91006421 add x1, x1, #0x19 22b8: 94000000 bl 0 <mod_timer> } 22bc: a9425bf5 ldp x21, x22, [sp, #32] 22c0: a94363f7 ldp x23, x24, [sp, #48] 22c4: a9446bf9 ldp x25, x26, [sp, #64] 22c8: a8c67bfd ldp x29, x30, [sp], #96 22cc: d65f03c0 ret 22d0: 12800001 mov w1, #0xffffffff // #-1 22d4: aa1803e0 mov x0, x24 22d8: 94000000 bl 0 <lock_is_held_type> lockdep_assert_held(&wq_mayday_lock); 22dc: 35000060 cbnz w0, 22e8 <pool_mayday_timeout+0xc8> 22e0: d4210000 brk #0x800 22e4: d503201f nop if (!wq->rescuer) 22e8: f940aaa0 ldr x0, [x21, #336] if (list_empty(&pwq->mayday_node)) { 22ec: 91020299 add x25, x20, #0x80 if (!wq->rescuer) 22f0: b4000080 cbz x0, 2300 <pool_mayday_timeout+0xe0> 22f4: f9404280 ldr x0, [x20, #128] if (list_empty(&pwq->mayday_node)) { 22f8: eb00033f cmp x25, x0 22fc: 540001a0 b.eq 2330 <pool_mayday_timeout+0x110> // b.none list_for_each_entry(work, &pool->worklist, entry) 2300: f9400673 ldr x19, [x19, #8] 2304: eb13035f cmp x26, x19 2308: d1002273 sub x19, x19, #0x8 230c: 54fffc20 b.eq 2290 <pool_mayday_timeout+0x70> // b.none 2310: f9400260 ldr x0, [x19] lockdep_assert_held(&wq_mayday_lock); 2314: b9400361 ldr w1, [x27] return (void *)(data & WORK_STRUCT_WQ_DATA_MASK); 2318: 9278dc14 and x20, x0, #0xffffffffffffff00 231c: f27e001f tst x0, #0x4 2320: 9a9f1294 csel x20, x20, xzr, ne // ne = any struct workqueue_struct *wq = pwq->wq; 2324: f9400695 ldr x21, [x20, #8] lockdep_assert_held(&wq_mayday_lock); 2328: 34fffe01 cbz w1, 22e8 <pool_mayday_timeout+0xc8> 232c: 17ffffe9 b 22d0 <pool_mayday_timeout+0xb0> get_pwq(pwq); 2330: 91006281 add x1, x20, #0x18 2334: aa1403e0 mov x0, x20 2338: 97ffff9e bl 21b0 <get_pwq.isra.3> __list_add(new, head->prev, head); 233c: f940a6a0 ldr x0, [x21, #328] next->prev = new; 2340: f900a6b9 str x25, [x21, #328] list_add_tail(&pwq->mayday_node, &wq->maydays); 2344: 910502a1 add x1, x21, #0x140 new->prev = prev; 2348: a9080281 stp x1, x0, [x20, #128] case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 234c: f9000019 str x25, [x0] wake_up_process(wq->rescuer->task); 2350: f940aaa0 ldr x0, [x21, #336] 2354: f9402000 ldr x0, [x0, #64] 2358: 94000000 bl 0 <wake_up_process> 235c: 17ffffe9 b 2300 <pool_mayday_timeout+0xe0> 0000000000002360 <insert_work>: { 2360: a9be7bfd stp x29, x30, [sp, #-32]! 2364: aa0103e5 mov x5, x1 set_work_data(work, (unsigned long)pwq, 2368: 528000a1 mov w1, #0x5 // #5 { 236c: 910003fd mov x29, sp 2370: f9000bf3 str x19, [sp, #16] set_work_data(work, (unsigned long)pwq, 2374: 2a010063 orr w3, w3, w1 2378: f94000a1 ldr x1, [x5] struct worker_pool *pool = pwq->pool; 237c: f9400013 ldr x19, [x0] WARN_ON_ONCE(!work_pending(work)); 2380: 360002a1 tbz w1, #0, 23d4 <insert_work+0x74> 2384: aa0503e4 mov x4, x5 atomic_long_set(&work->data, data | flags | work_static(work)); 2388: aa030003 orr x3, x0, x3 get_pwq(pwq); 238c: 91006001 add x1, x0, #0x18 2390: f8008483 str x3, [x4], #8 __list_add(new, head->prev, head); 2394: f9400443 ldr x3, [x2, #8] next->prev = new; 2398: f9000444 str x4, [x2, #8] new->prev = prev; 239c: a9008ca2 stp x2, x3, [x5, #8] 23a0: f9000064 str x4, [x3] 23a4: 97ffff83 bl 21b0 <get_pwq.isra.3> smp_mb(); 23a8: d5033bbf dmb ish __READ_ONCE_SIZE; 23ac: b9458260 ldr w0, [x19, #1408] if (__need_more_worker(pool)) 23b0: 34000080 cbz w0, 23c0 <insert_work+0x60> } 23b4: f9400bf3 ldr x19, [sp, #16] 23b8: a8c27bfd ldp x29, x30, [sp], #32 23bc: d65f03c0 ret wake_up_worker(pool); 23c0: aa1303e0 mov x0, x19 23c4: 97fff819 bl 428 <wake_up_worker> } 23c8: f9400bf3 ldr x19, [sp, #16] 23cc: a8c27bfd ldp x29, x30, [sp], #32 23d0: d65f03c0 ret WARN_ON_ONCE(!work_pending(work)); 23d4: d4210000 brk #0x800 23d8: 17ffffeb b 2384 <insert_work+0x24> 23dc: d503201f nop 00000000000023e0 <flush_work>: { 23e0: d10443ff sub sp, sp, #0x110 23e4: a90c7bfd stp x29, x30, [sp, #192] 23e8: 910303fd add x29, sp, #0xc0 23ec: a90e5bf5 stp x21, x22, [sp, #224] if (WARN_ON(!wq_online)) 23f0: 90000016 adrp x22, 0 <pwq_activate_delayed_work> 23f4: 910002d6 add x22, x22, #0x0 { 23f8: a90f63f7 stp x23, x24, [sp, #240] 23fc: 90000017 adrp x23, 0 <__stack_chk_guard> 2400: 910002f7 add x23, x23, #0x0 if (WARN_ON(!wq_online)) 2404: 394122d8 ldrb w24, [x22, #72] { 2408: f94002e1 ldr x1, [x23] 240c: f9005fe1 str x1, [sp, #184] 2410: d2800001 mov x1, #0x0 // #0 if (WARN_ON(!wq_online)) 2414: 34001bd8 cbz w24, 278c <flush_work+0x3ac> lock_map_acquire(&work->lockdep_map); 2418: 90000006 adrp x6, 0 <pwq_activate_delayed_work> 241c: 910000c6 add x6, x6, #0x0 2420: d2800005 mov x5, #0x0 // #0 2424: 52800024 mov w4, #0x1 // #1 2428: 52800003 mov w3, #0x0 // #0 242c: a90d53f3 stp x19, x20, [sp, #208] 2430: 91008013 add x19, x0, #0x20 2434: 52800002 mov w2, #0x0 // #0 2438: 52800001 mov w1, #0x0 // #0 243c: aa0003f4 mov x20, x0 2440: aa1303e0 mov x0, x19 2444: a9106bf9 stp x25, x26, [sp, #256] 2448: 94000000 bl 0 <lock_acquire> lock_map_release(&work->lockdep_map); 244c: aa1303e0 mov x0, x19 2450: 52800021 mov w1, #0x1 // #1 2454: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 2458: 91000042 add x2, x2, #0x0 245c: 94000000 bl 0 <lock_release> might_sleep(); 2460: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 2464: 91000000 add x0, x0, #0x0 2468: 52800002 mov w2, #0x0 // #0 246c: 52816941 mov w1, #0xb4a // #2890 lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_); 2470: 90000019 adrp x25, 0 <rcu_lock_map> 2474: 94000000 bl 0 <__might_sleep> 2478: 91000339 add x25, x25, #0x0 __rcu_read_lock(); 247c: 94000000 bl 0 <__rcu_read_lock> lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_); 2480: 90000006 adrp x6, 0 <pwq_activate_delayed_work> 2484: aa1903e0 mov x0, x25 2488: 910000c6 add x6, x6, #0x0 248c: d2800005 mov x5, #0x0 // #0 2490: 52800004 mov w4, #0x0 // #0 2494: 52800043 mov w3, #0x2 // #2 2498: 52800002 mov w2, #0x0 // #0 249c: 52800001 mov w1, #0x0 // #0 24a0: 94000000 bl 0 <lock_acquire> RCU_LOCKDEP_WARN(!rcu_is_watching(), 24a4: 94000000 bl 0 <debug_lockdep_rcu_enabled> 24a8: 35000c00 cbnz w0, 2628 <flush_work+0x248> pool = get_work_pool(work); 24ac: aa1403e0 mov x0, x20 24b0: 97fffd70 bl 1a70 <get_work_pool> 24b4: aa0003fa mov x26, x0 if (!pool) { 24b8: b40010e0 cbz x0, 26d4 <flush_work+0x2f4> spin_lock_irq(&pool->lock); 24bc: 94000000 bl 0 <rt_spin_lock> 24c0: f9400295 ldr x21, [x20] if (data & WORK_STRUCT_PWQ) 24c4: 36100d35 tbz w21, #2, 2668 <flush_work+0x288> if (pwq) { 24c8: f278deb5 ands x21, x21, #0xffffffffffffff00 24cc: 54000ce0 b.eq 2668 <flush_work+0x288> // b.none if (unlikely(pwq->pool != pool)) 24d0: f94002a0 ldr x0, [x21] 24d4: eb00035f cmp x26, x0 24d8: 54001361 b.ne 2744 <flush_work+0x364> // b.any struct worker *worker = NULL; 24dc: d2800013 mov x19, #0x0 // #0 check_flush_dependency(pwq->wq, work); 24e0: f94006a0 ldr x0, [x21, #8] 24e4: aa1403e1 mov x1, x20 24e8: 97fffb88 bl 1308 <check_flush_dependency> INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); 24ec: 910083e0 add x0, sp, #0x20 24f0: b27b7be4 mov x4, #0xfffffffe0 // #68719476704 24f4: 52800003 mov w3, #0x0 // #0 24f8: 910162c2 add x2, x22, #0x58 24fc: 90000001 adrp x1, 0 <pwq_activate_delayed_work> 2500: 91000021 add x1, x1, #0x0 2504: f90003e4 str x4, [sp] 2508: 94000000 bl 0 <lockdep_init_map> WRITE_ONCE(list->next, list); 250c: 910023e5 add x5, sp, #0x8 *p |= mask; 2510: f94003e4 ldr x4, [sp] 2514: 90000003 adrp x3, 0 <pwq_activate_delayed_work> 2518: 91000063 add x3, x3, #0x0 251c: 910163e0 add x0, sp, #0x58 2520: b2400084 orr x4, x4, #0x1 2524: 9100e2c2 add x2, x22, #0x38 2528: 90000001 adrp x1, 0 <pwq_activate_delayed_work> 252c: 91000021 add x1, x1, #0x0 2530: f90003e4 str x4, [sp] case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 2534: f90007e5 str x5, [sp, #8] 2538: a9010fe5 stp x5, x3, [sp, #16] x->done = 0; 253c: b90053ff str wzr, [sp, #80] init_swait_queue_head(&x->wait); 2540: 94000000 bl 0 <__init_swait_queue_head> 2544: d5384100 mrs x0, sp_el0 barr->task = current; 2548: f9005be0 str x0, [sp, #176] if (worker) 254c: b4000b93 cbz x19, 26bc <flush_work+0x2dc> head = worker->scheduled.next; 2550: f9401a62 ldr x2, [x19, #48] 2554: 52801e03 mov w3, #0xf0 // #240 insert_work(pwq, &barr->work, head, 2558: 910003e1 mov x1, sp 255c: aa1503e0 mov x0, x21 2560: 97ffff80 bl 2360 <insert_work> spin_unlock_irq(&pool->lock); 2564: aa1a03e0 mov x0, x26 2568: 94000000 bl 0 <rt_spin_unlock> if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer) { 256c: f94006a0 ldr x0, [x21, #8] 2570: b9415c01 ldr w1, [x0, #348] 2574: 7100043f cmp w1, #0x1 2578: 54000060 b.eq 2584 <flush_work+0x1a4> // b.none 257c: f940a801 ldr x1, [x0, #336] 2580: b4000201 cbz x1, 25c0 <flush_work+0x1e0> lock_map_acquire(&pwq->wq->lockdep_map); 2584: 9105e000 add x0, x0, #0x178 2588: 52800002 mov w2, #0x0 // #0 258c: 52800001 mov w1, #0x0 // #0 2590: 90000006 adrp x6, 0 <pwq_activate_delayed_work> 2594: d2800005 mov x5, #0x0 // #0 2598: 910000c6 add x6, x6, #0x0 259c: 52800024 mov w4, #0x1 // #1 25a0: 52800003 mov w3, #0x0 // #0 25a4: 94000000 bl 0 <lock_acquire> lock_map_release(&pwq->wq->lockdep_map); 25a8: f94006a0 ldr x0, [x21, #8] 25ac: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 25b0: 52800021 mov w1, #0x1 // #1 25b4: 91000042 add x2, x2, #0x0 25b8: 9105e000 add x0, x0, #0x178 25bc: 94000000 bl 0 <lock_release> RCU_LOCKDEP_WARN(!rcu_is_watching(), 25c0: 94000000 bl 0 <debug_lockdep_rcu_enabled> 25c4: 340000a0 cbz w0, 25d8 <flush_work+0x1f8> 25c8: 90000013 adrp x19, 0 <pwq_activate_delayed_work> 25cc: 91000273 add x19, x19, #0x0 25d0: 39401e60 ldrb w0, [x19, #7] 25d4: 34000a00 cbz w0, 2714 <flush_work+0x334> __rcu_read_unlock(); 25d8: 94000000 bl 0 <__rcu_read_unlock> lock_release(map, 1, _THIS_IP_); 25dc: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 25e0: 91000042 add x2, x2, #0x0 25e4: 52800021 mov w1, #0x1 // #1 25e8: aa1903e0 mov x0, x25 25ec: 94000000 bl 0 <lock_release> wait_for_completion(&barr.done); 25f0: 910143e0 add x0, sp, #0x50 25f4: 94000000 bl 0 <wait_for_completion> return true; 25f8: a94d53f3 ldp x19, x20, [sp, #208] 25fc: a9506bf9 ldp x25, x26, [sp, #256] } 2600: 2a1803e0 mov w0, w24 2604: f9405fe2 ldr x2, [sp, #184] 2608: f94002e1 ldr x1, [x23] 260c: ca010041 eor x1, x2, x1 2610: b5000f21 cbnz x1, 27f4 <flush_work+0x414> 2614: a94c7bfd ldp x29, x30, [sp, #192] 2618: a94e5bf5 ldp x21, x22, [sp, #224] 261c: a94f63f7 ldp x23, x24, [sp, #240] 2620: 910443ff add sp, sp, #0x110 2624: d65f03c0 ret RCU_LOCKDEP_WARN(!rcu_is_watching(), 2628: 90000013 adrp x19, 0 <pwq_activate_delayed_work> 262c: 91000273 add x19, x19, #0x0 2630: 39401a60 ldrb w0, [x19, #6] 2634: 35fff3c0 cbnz w0, 24ac <flush_work+0xcc> 2638: 94000000 bl 0 <rcu_is_watching> 263c: 72001c1f tst w0, #0xff 2640: 54fff361 b.ne 24ac <flush_work+0xcc> // b.any 2644: 52800023 mov w3, #0x1 // #1 2648: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 264c: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 2650: 91000042 add x2, x2, #0x0 2654: 91000000 add x0, x0, #0x0 2658: 52804b41 mov w1, #0x25a // #602 265c: 39001a63 strb w3, [x19, #6] 2660: 94000000 bl 0 <lockdep_rcu_suspicious> 2664: 17ffff92 b 24ac <flush_work+0xcc> 2668: d2907d60 mov x0, #0x83eb // #33771 266c: f2b016a0 movk x0, #0x80b5, lsl #16 2670: f2d0c8c0 movk x0, #0x8646, lsl #32 2674: f2ec3900 movk x0, #0x61c8, lsl #48 2678: 9b007e80 mul x0, x20, x0 hash_for_each_possible(pool->busy_hash, worker, hentry, 267c: d37afc00 lsr x0, x0, #58 2680: 8b000f40 add x0, x26, x0, lsl #3 2684: f940f813 ldr x19, [x0, #496] 2688: b5000093 cbnz x19, 2698 <flush_work+0x2b8> 268c: 1400002e b 2744 <flush_work+0x364> 2690: f9400273 ldr x19, [x19] 2694: b4000593 cbz x19, 2744 <flush_work+0x364> if (worker->current_work == work && 2698: f9400a60 ldr x0, [x19, #16] 269c: eb00029f cmp x20, x0 26a0: 54ffff81 b.ne 2690 <flush_work+0x2b0> // b.any 26a4: f9400e61 ldr x1, [x19, #24] 26a8: f9400e80 ldr x0, [x20, #24] 26ac: eb00003f cmp x1, x0 26b0: 54ffff01 b.ne 2690 <flush_work+0x2b0> // b.any pwq = worker->current_pwq; 26b4: f9401275 ldr x21, [x19, #32] 26b8: 17ffff8a b 24e0 <flush_work+0x100> head = target->entry.next; 26bc: a9400a80 ldp x0, x2, [x20] linked = *bits & WORK_STRUCT_LINKED; 26c0: 121d0003 and w3, w0, #0x8 26c4: b27d0000 orr x0, x0, #0x8 26c8: 321c0c63 orr w3, w3, #0xf0 26cc: f9000280 str x0, [x20] 26d0: 17ffffa2 b 2558 <flush_work+0x178> RCU_LOCKDEP_WARN(!rcu_is_watching(), 26d4: 94000000 bl 0 <debug_lockdep_rcu_enabled> 26d8: 340000a0 cbz w0, 26ec <flush_work+0x30c> 26dc: 90000013 adrp x19, 0 <pwq_activate_delayed_work> 26e0: 91000273 add x19, x19, #0x0 26e4: 39401e60 ldrb w0, [x19, #7] 26e8: 340006e0 cbz w0, 27c4 <flush_work+0x3e4> __rcu_read_unlock(); 26ec: 94000000 bl 0 <__rcu_read_unlock> return false; 26f0: 52800018 mov w24, #0x0 // #0 lock_release(map, 1, _THIS_IP_); 26f4: aa1903e0 mov x0, x25 26f8: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 26fc: 52800021 mov w1, #0x1 // #1 2700: 91000042 add x2, x2, #0x0 2704: 94000000 bl 0 <lock_release> return false; 2708: a94d53f3 ldp x19, x20, [sp, #208] 270c: a9506bf9 ldp x25, x26, [sp, #256] 2710: 17ffffbc b 2600 <flush_work+0x220> RCU_LOCKDEP_WARN(!rcu_is_watching(), 2714: 94000000 bl 0 <rcu_is_watching> 2718: 72001c1f tst w0, #0xff 271c: 54fff5e1 b.ne 25d8 <flush_work+0x1f8> // b.any 2720: 52800023 mov w3, #0x1 // #1 2724: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 2728: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 272c: 91000042 add x2, x2, #0x0 2730: 91000000 add x0, x0, #0x0 2734: 528051a1 mov w1, #0x28d // #653 2738: 39001e63 strb w3, [x19, #7] 273c: 94000000 bl 0 <lockdep_rcu_suspicious> 2740: 17ffffa6 b 25d8 <flush_work+0x1f8> spin_unlock_irq(&pool->lock); 2744: aa1a03e0 mov x0, x26 2748: 94000000 bl 0 <rt_spin_unlock> 274c: 94000000 bl 0 <debug_lockdep_rcu_enabled> 2750: 340000a0 cbz w0, 2764 <flush_work+0x384> 2754: 90000013 adrp x19, 0 <pwq_activate_delayed_work> 2758: 91000273 add x19, x19, #0x0 275c: 39401e60 ldrb w0, [x19, #7] 2760: 340001a0 cbz w0, 2794 <flush_work+0x3b4> __rcu_read_unlock(); 2764: 94000000 bl 0 <__rcu_read_unlock> return false; 2768: 52800018 mov w24, #0x0 // #0 lock_release(map, 1, _THIS_IP_); 276c: aa1903e0 mov x0, x25 2770: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 2774: 52800021 mov w1, #0x1 // #1 2778: 91000042 add x2, x2, #0x0 277c: 94000000 bl 0 <lock_release> 2780: a94d53f3 ldp x19, x20, [sp, #208] 2784: a9506bf9 ldp x25, x26, [sp, #256] 2788: 17ffff9e b 2600 <flush_work+0x220> if (WARN_ON(!wq_online)) 278c: d4210000 brk #0x800 2790: 17ffff9c b 2600 <flush_work+0x220> RCU_LOCKDEP_WARN(!rcu_is_watching(), 2794: 94000000 bl 0 <rcu_is_watching> 2798: 72001c1f tst w0, #0xff 279c: 54fffe41 b.ne 2764 <flush_work+0x384> // b.any 27a0: 52800023 mov w3, #0x1 // #1 27a4: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 27a8: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 27ac: 91000042 add x2, x2, #0x0 27b0: 91000000 add x0, x0, #0x0 27b4: 528051a1 mov w1, #0x28d // #653 27b8: 39001e63 strb w3, [x19, #7] 27bc: 94000000 bl 0 <lockdep_rcu_suspicious> 27c0: 17ffffe9 b 2764 <flush_work+0x384> 27c4: 94000000 bl 0 <rcu_is_watching> 27c8: 72001c1f tst w0, #0xff 27cc: 54fff901 b.ne 26ec <flush_work+0x30c> // b.any 27d0: 52800023 mov w3, #0x1 // #1 27d4: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 27d8: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 27dc: 91000042 add x2, x2, #0x0 27e0: 91000000 add x0, x0, #0x0 27e4: 528051a1 mov w1, #0x28d // #653 27e8: 39001e63 strb w3, [x19, #7] 27ec: 94000000 bl 0 <lockdep_rcu_suspicious> 27f0: 17ffffbf b 26ec <flush_work+0x30c> 27f4: a90d53f3 stp x19, x20, [sp, #208] 27f8: a9106bf9 stp x25, x26, [sp, #256] } 27fc: 94000000 bl 0 <__stack_chk_fail> 0000000000002800 <init_pwq>: { 2800: a9bd7bfd stp x29, x30, [sp, #-48]! BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK); 2804: 72001c1f tst w0, #0xff { 2808: 910003fd mov x29, sp 280c: a90153f3 stp x19, x20, [sp, #16] 2810: f90013f5 str x21, [sp, #32] BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK); 2814: 540004c1 b.ne 28ac <init_pwq+0xac> // b.any memset(pwq, 0, sizeof(*pwq)); 2818: aa0003f3 mov x19, x0 281c: aa0203f5 mov x21, x2 2820: aa0103f4 mov x20, x1 2824: d2801e02 mov x2, #0xf0 // #240 2828: 52800001 mov w1, #0x0 // #0 282c: 91004000 add x0, x0, #0x10 2830: 94000000 bl 0 <memset> pwq->wq = wq; 2834: a9005275 stp x21, x20, [x19] INIT_LIST_HEAD(&pwq->delayed_works); 2838: 91018263 add x3, x19, #0x60 283c: f9003263 str x3, [x19, #96] INIT_LIST_HEAD(&pwq->pwqs_node); 2840: 9101c261 add x1, x19, #0x70 2844: f9003a61 str x1, [x19, #112] INIT_LIST_HEAD(&pwq->mayday_node); 2848: 91020260 add x0, x19, #0x80 pwq->flush_color = -1; 284c: b24083e5 mov x5, #0x1ffffffff // #8589934591 INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn); 2850: b27b7be4 mov x4, #0xfffffffe0 // #68719476704 pwq->flush_color = -1; 2854: f8014265 stur x5, [x19, #20] INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn); 2858: 90000002 adrp x2, 0 <pwq_activate_delayed_work> list->prev = list; 285c: f9003663 str x3, [x19, #104] 2860: 91000042 add x2, x2, #0x0 2864: f9003e61 str x1, [x19, #120] 2868: 91018042 add x2, x2, #0x60 286c: f9004260 str x0, [x19, #128] 2870: 52800003 mov w3, #0x0 // #0 2874: a9089260 stp x0, x4, [x19, #136] 2878: 90000001 adrp x1, 0 <pwq_activate_delayed_work> 287c: 9102c260 add x0, x19, #0xb0 2880: 91000021 add x1, x1, #0x0 2884: 94000000 bl 0 <lockdep_init_map> 2888: 91026261 add x1, x19, #0x98 288c: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 2890: 91000000 add x0, x0, #0x0 2894: f9004e61 str x1, [x19, #152] 2898: a90a0261 stp x1, x0, [x19, #160] } 289c: a94153f3 ldp x19, x20, [sp, #16] 28a0: f94013f5 ldr x21, [sp, #32] 28a4: a8c37bfd ldp x29, x30, [sp], #48 28a8: d65f03c0 ret BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK); 28ac: d4210000 brk #0x800 00000000000028b0 <alloc_unbound_pwq>: { 28b0: a9bc7bfd stp x29, x30, [sp, #-64]! 28b4: 910003fd mov x29, sp 28b8: a90153f3 stp x19, x20, [sp, #16] lockdep_assert_held(&wq_pool_mutex); 28bc: 90000014 adrp x20, 0 <debug_locks> { 28c0: aa0003f3 mov x19, x0 lockdep_assert_held(&wq_pool_mutex); 28c4: b9400285 ldr w5, [x20] { 28c8: a9025bf5 stp x21, x22, [sp, #32] 28cc: aa0103f5 mov x21, x1 28d0: a90363f7 stp x23, x24, [sp, #48] lockdep_assert_held(&wq_pool_mutex); 28d4: 35001345 cbnz w5, 2b3c <alloc_unbound_pwq+0x28c> /* __jhash_nwords - hash exactly 3, 2 or 1 word(s) */ static inline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval) { a += initval; 28d8: b94002a2 ldr w2, [x21] 28dc: 5297de66 mov w6, #0xbef3 // #48883 28e0: 72bbd5a6 movk w6, #0xdead, lsl #16 b += initval; c += initval; __jhash_final(a, b, c); 28e4: 52810aa4 mov w4, #0x855 // #2133 a += initval; 28e8: 0b060042 add w2, w2, w6 __jhash_final(a, b, c); 28ec: 72b20864 movk w4, #0x9043, lsl #16 28f0: 528a6fc1 mov w1, #0x537e // #21374 28f4: 4a040042 eor w2, w2, w4 28f8: 72bcf7a1 movk w1, #0xe7bd, lsl #16 28fc: 0b010042 add w2, w2, w1 2900: 4a060046 eor w6, w2, w6 case 7: b += (u32)k[6]<<16; /* fall through */ 2904: 39403aa8 ldrb w8, [x21, #14] case 5: b += k[4]; /* fall through */ 2908: 394032a1 ldrb w1, [x21, #12] * @word: value to rotate * @shift: bits to roll */ static inline __u32 rol32(__u32 word, unsigned int shift) { return (word << (shift & 31)) | (word >> ((-shift) & 31)); 290c: 13821c43 ror w3, w2, #7 __jhash_final(a, b, c); 2910: 4b0300c6 sub w6, w6, w3 case 8: b += (u32)k[7]<<24; /* fall through */ 2914: 39403ea0 ldrb w0, [x21, #15] __jhash_final(a, b, c); 2918: 4a0400c4 eor w4, w6, w4 case 6: b += (u32)k[5]<<8; /* fall through */ 291c: 394036a7 ldrb w7, [x21, #13] case 7: b += (u32)k[6]<<16; /* fall through */ 2920: 53103d08 lsl w8, w8, #16 2924: 138640c3 ror w3, w6, #16 __jhash_final(a, b, c); 2928: 4b030084 sub w4, w4, w3 a = b = c = JHASH_INITVAL + length + initval; 292c: 5297dee9 mov w9, #0xbef7 // #48887 __jhash_final(a, b, c); 2930: 4a040042 eor w2, w2, w4 case 5: b += k[4]; /* fall through */ 2934: 0b072027 add w7, w1, w7, lsl #8 a = b = c = JHASH_INITVAL + length + initval; 2938: 72bbd5a9 movk w9, #0xdead, lsl #16 293c: 13847083 ror w3, w4, #28 __jhash_final(a, b, c); 2940: 4b030042 sub w2, w2, w3 case 5: b += k[4]; /* fall through */ 2944: 0b006100 add w0, w8, w0, lsl #24 __jhash_final(a, b, c); 2948: 4a0200c6 eor w6, w6, w2 case 3: a += (u32)k[2]<<16; /* fall through */ 294c: 39402aa1 ldrb w1, [x21, #10] case 5: b += k[4]; /* fall through */ 2950: 0b070000 add w0, w0, w7 2954: 13824842 ror w2, w2, #18 __jhash_final(a, b, c); 2958: 4b0200c6 sub w6, w6, w2 case 1: a += k[0]; 295c: 394022a7 ldrb w7, [x21, #8] __jhash_final(a, b, c); 2960: 4a060083 eor w3, w4, w6 case 4: a += (u32)k[3]<<24; /* fall through */ 2964: 39402ea2 ldrb w2, [x21, #11] a = b = c = JHASH_INITVAL + length + initval; 2968: 0b090063 add w3, w3, w9 296c: 138620c6 ror w6, w6, #8 2970: 4b060063 sub w3, w3, w6 case 2: a += (u32)k[1]<<8; /* fall through */ 2974: 394026a4 ldrb w4, [x21, #9] case 5: b += k[4]; /* fall through */ 2978: 0b030000 add w0, w0, w3 case 3: a += (u32)k[2]<<16; /* fall through */ 297c: 53103c21 lsl w1, w1, #16 case 1: a += k[0]; 2980: 0b026021 add w1, w1, w2, lsl #24 __jhash_final(a, b, c); 2984: 4a000062 eor w2, w3, w0 case 1: a += k[0]; 2988: 0b0420e4 add w4, w7, w4, lsl #8 298c: 13804806 ror w6, w0, #18 __jhash_final(a, b, c); 2990: 4b060042 sub w2, w2, w6 case 1: a += k[0]; 2994: 0b040021 add w1, w1, w4 2998: 0b030021 add w1, w1, w3 __jhash_final(a, b, c); 299c: 4a020021 eor w1, w1, w2 29a0: 13825443 ror w3, w2, #21 29a4: 4b030021 sub w1, w1, w3 29a8: 4a010000 eor w0, w0, w1 29ac: 13811c23 ror w3, w1, #7 29b0: 4b030000 sub w0, w0, w3 29b4: 4a000042 eor w2, w2, w0 29b8: 13804003 ror w3, w0, #16 29bc: 4b030042 sub w2, w2, w3 29c0: 4a020021 eor w1, w1, w2 29c4: 13827043 ror w3, w2, #28 29c8: 4b030021 sub w1, w1, w3 29cc: 4a010000 eor w0, w0, w1 29d0: 13814821 ror w1, w1, #18 29d4: 4b010000 sub w0, w0, w1 29d8: 4a000054 eor w20, w2, w0 29dc: 13802000 ror w0, w0, #8 29e0: 4b000294 sub w20, w20, w0 lockdep_assert_held(&wq_pool_mutex); 29e4: 35000be5 cbnz w5, 2b60 <alloc_unbound_pwq+0x2b0> return val * GOLDEN_RATIO_32; 29e8: 5290c8e0 mov w0, #0x8647 // #34375 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) { 29ec: 90000017 adrp x23, 0 <pwq_activate_delayed_work> 29f0: 72ac3900 movk w0, #0x61c8, lsl #16 29f4: 910002f7 add x23, x23, #0x0 29f8: 1b007e94 mul w20, w20, w0 29fc: 9101a2e0 add x0, x23, #0x68 2a00: 531a7e94 lsr w20, w20, #26 2a04: f8747816 ldr x22, [x0, x20, lsl #3] 2a08: b4000456 cbz x22, 2a90 <alloc_unbound_pwq+0x1e0> 2a0c: f11442d6 subs x22, x22, #0x510 2a10: 54000400 b.eq 2a90 <alloc_unbound_pwq+0x1e0> // b.none if (a->nice != b->nice) 2a14: b94002a2 ldr w2, [x21] if (wqattrs_equal(pool->attrs, attrs)) { 2a18: f94286c0 ldr x0, [x22, #1288] if (a->nice != b->nice) 2a1c: b9400001 ldr w1, [x0] 2a20: 6b02003f cmp w1, w2 2a24: 540002e1 b.ne 2a80 <alloc_unbound_pwq+0x1d0> // b.any if (!cpumask_equal(a->cpumask, b->cpumask)) 2a28: f9400401 ldr x1, [x0, #8] 2a2c: f94006a0 ldr x0, [x21, #8] 2a30: eb00003f cmp x1, x0 2a34: 54000261 b.ne 2a80 <alloc_unbound_pwq+0x1d0> // b.any pool->refcnt++; 2a38: b94522c0 ldr w0, [x22, #1312] 2a3c: 11000400 add w0, w0, #0x1 2a40: b90522c0 str w0, [x22, #1312] 2a44: f94022e0 ldr x0, [x23, #64] 2a48: 52801801 mov w1, #0xc0 // #192 2a4c: 72a02801 movk w1, #0x140, lsl #16 2a50: 94000000 bl 0 <kmem_cache_alloc> 2a54: aa0003f8 mov x24, x0 if (!pwq) { 2a58: b4000a40 cbz x0, 2ba0 <alloc_unbound_pwq+0x2f0> init_pwq(pwq, wq, pool); 2a5c: aa1603e2 mov x2, x22 2a60: aa1303e1 mov x1, x19 2a64: 97ffff67 bl 2800 <init_pwq> } 2a68: aa1803e0 mov x0, x24 2a6c: a94153f3 ldp x19, x20, [sp, #16] 2a70: a9425bf5 ldp x21, x22, [sp, #32] 2a74: a94363f7 ldp x23, x24, [sp, #48] 2a78: a8c47bfd ldp x29, x30, [sp], #64 2a7c: d65f03c0 ret hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) { 2a80: f9428ac0 ldr x0, [x22, #1296] 2a84: f1144016 subs x22, x0, #0x510 2a88: b4000040 cbz x0, 2a90 <alloc_unbound_pwq+0x1e0> 2a8c: 54fffc61 b.ne 2a18 <alloc_unbound_pwq+0x168> // b.any return kmem_cache_alloc_node_trace( 2a90: 90000000 adrp x0, 0 <kmalloc_caches> return kmem_cache_alloc(s, flags); 2a94: 52901801 mov w1, #0x80c0 // #32960 2a98: 72a02801 movk w1, #0x140, lsl #16 2a9c: f9400000 ldr x0, [x0] 2aa0: 94000000 bl 0 <kmem_cache_alloc> 2aa4: aa0003f8 mov x24, x0 if (!pool || init_worker_pool(pool) < 0) 2aa8: b4fffe00 cbz x0, 2a68 <alloc_unbound_pwq+0x1b8> 2aac: 97fff731 bl 770 <init_worker_pool> 2ab0: 37f80700 tbnz w0, #31, 2b90 <alloc_unbound_pwq+0x2e0> lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */ 2ab4: aa1803e0 mov x0, x24 2ab8: 52800023 mov w3, #0x1 // #1 2abc: 90000001 adrp x1, 0 <pwq_activate_delayed_work> 2ac0: 91000021 add x1, x1, #0x0 2ac4: f84c0c02 ldr x2, [x0, #192]! 2ac8: 94000000 bl 0 <lockdep_init_map> copy_workqueue_attrs(pool->attrs, attrs); 2acc: f9428701 ldr x1, [x24, #1288] pool->node = target_node; 2ad0: 12800002 mov w2, #0xffffffff // #-1 to->nice = from->nice; 2ad4: b94002a3 ldr w3, [x21] if (worker_pool_assign_id(pool) < 0) 2ad8: aa1803e0 mov x0, x24 to->nice = from->nice; 2adc: b9000023 str w3, [x1] 2ae0: f94006a3 ldr x3, [x21, #8] 2ae4: f9000423 str x3, [x1, #8] to->no_numa = from->no_numa; 2ae8: 394042a3 ldrb w3, [x21, #16] 2aec: 39004023 strb w3, [x1, #16] pool->node = target_node; 2af0: b900f702 str w2, [x24, #244] pool->attrs->no_numa = false; 2af4: f9428701 ldr x1, [x24, #1288] 2af8: 3900403f strb wzr, [x1, #16] if (worker_pool_assign_id(pool) < 0) 2afc: 97fff6cb bl 628 <worker_pool_assign_id> 2b00: 37f80480 tbnz w0, #31, 2b90 <alloc_unbound_pwq+0x2e0> if (wq_online && !create_worker(pool)) 2b04: 394122e0 ldrb w0, [x23, #72] 2b08: 350003c0 cbnz w0, 2b80 <alloc_unbound_pwq+0x2d0> hash_add(unbound_pool_hash, &pool->hash_node, hash); 2b0c: 9101a2e0 add x0, x23, #0x68 } static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) { struct hlist_node *first = h->first; n->next = first; 2b10: 91180302 add x2, x24, #0x600 2b14: 8b140c04 add x4, x0, x20, lsl #3 2b18: 91144303 add x3, x24, #0x510 struct hlist_node *first = h->first; 2b1c: f8747801 ldr x1, [x0, x20, lsl #3] n->next = first; 2b20: f8110041 stur x1, [x2, #-240] if (first) 2b24: b4000041 cbz x1, 2b2c <alloc_unbound_pwq+0x27c> first->pprev = &n->next; 2b28: f9000423 str x3, [x1, #8] WRITE_ONCE(h->first, n); n->pprev = &h->first; 2b2c: aa1803f6 mov x22, x24 2b30: f8118044 stur x4, [x2, #-232] 2b34: f8347803 str x3, [x0, x20, lsl #3] return pool; 2b38: 17ffffc3 b 2a44 <alloc_unbound_pwq+0x194> 2b3c: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 2b40: 91000000 add x0, x0, #0x0 2b44: 9102e000 add x0, x0, #0xb8 2b48: 12800001 mov w1, #0xffffffff // #-1 2b4c: 94000000 bl 0 <lock_is_held_type> lockdep_assert_held(&wq_pool_mutex); 2b50: 35000040 cbnz w0, 2b58 <alloc_unbound_pwq+0x2a8> 2b54: d4210000 brk #0x800 2b58: b9400285 ldr w5, [x20] 2b5c: 17ffff5f b 28d8 <alloc_unbound_pwq+0x28> 2b60: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 2b64: 91000000 add x0, x0, #0x0 2b68: 9102e000 add x0, x0, #0xb8 2b6c: 12800001 mov w1, #0xffffffff // #-1 2b70: 94000000 bl 0 <lock_is_held_type> lockdep_assert_held(&wq_pool_mutex); 2b74: 35fff3a0 cbnz w0, 29e8 <alloc_unbound_pwq+0x138> 2b78: d4210000 brk #0x800 2b7c: 17ffff9b b 29e8 <alloc_unbound_pwq+0x138> if (wq_online && !create_worker(pool)) 2b80: aa1803e0 mov x0, x24 2b84: 97fffd0f bl 1fc0 <create_worker> 2b88: b5fffc20 cbnz x0, 2b0c <alloc_unbound_pwq+0x25c> 2b8c: d503201f nop put_unbound_pool(pool); 2b90: aa1803e0 mov x0, x24 return NULL; 2b94: d2800018 mov x24, #0x0 // #0 put_unbound_pool(pool); 2b98: 97fff82a bl c40 <put_unbound_pool> if (!pool) 2b9c: 17ffffb3 b 2a68 <alloc_unbound_pwq+0x1b8> put_unbound_pool(pool); 2ba0: aa1603e0 mov x0, x22 2ba4: 97fff827 bl c40 <put_unbound_pool> return NULL; 2ba8: 17ffffb0 b 2a68 <alloc_unbound_pwq+0x1b8> 2bac: d503201f nop 0000000000002bb0 <wq_calc_node_cpumask.constprop.13>: 2bb0: f9400401 ldr x1, [x0, #8] } 2bb4: 52800000 mov w0, #0x0 // #0 2bb8: f9000041 str x1, [x2] 2bbc: d65f03c0 ret 0000000000002bc0 <numa_pwq_tbl_install.constprop.14>: static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq, 2bc0: a9bd7bfd stp x29, x30, [sp, #-48]! 2bc4: 910003fd mov x29, sp 2bc8: f90013f5 str x21, [sp, #32] lockdep_assert_held(&wq_pool_mutex); 2bcc: 90000015 adrp x21, 0 <debug_locks> static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq, 2bd0: a90153f3 stp x19, x20, [sp, #16] 2bd4: aa0003f3 mov x19, x0 lockdep_assert_held(&wq_pool_mutex); 2bd8: b94002a0 ldr w0, [x21] static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq, 2bdc: aa0103f4 mov x20, x1 lockdep_assert_held(&wq_pool_mutex); 2be0: 35000140 cbnz w0, 2c08 <numa_pwq_tbl_install.constprop.14+0x48> link_pwq(pwq); 2be4: aa1403e0 mov x0, x20 2be8: 97fff66a bl 590 <link_pwq> rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq); 2bec: 91084261 add x1, x19, #0x210 __READ_ONCE_SIZE; 2bf0: f9410a60 ldr x0, [x19, #528] 2bf4: c89ffc34 stlr x20, [x1] } 2bf8: a94153f3 ldp x19, x20, [sp, #16] 2bfc: f94013f5 ldr x21, [sp, #32] 2c00: a8c37bfd ldp x29, x30, [sp], #48 2c04: d65f03c0 ret 2c08: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 2c0c: 91000000 add x0, x0, #0x0 2c10: 9102e000 add x0, x0, #0xb8 2c14: 12800001 mov w1, #0xffffffff // #-1 2c18: 94000000 bl 0 <lock_is_held_type> lockdep_assert_held(&wq_pool_mutex); 2c1c: 34000120 cbz w0, 2c40 <numa_pwq_tbl_install.constprop.14+0x80> lockdep_assert_held(&wq->mutex); 2c20: b94002a0 ldr w0, [x21] 2c24: 34fffe00 cbz w0, 2be4 <numa_pwq_tbl_install.constprop.14+0x24> 2c28: 12800001 mov w1, #0xffffffff // #-1 2c2c: 91036260 add x0, x19, #0xd8 2c30: 94000000 bl 0 <lock_is_held_type> 2c34: 35fffd80 cbnz w0, 2be4 <numa_pwq_tbl_install.constprop.14+0x24> 2c38: d4210000 brk #0x800 2c3c: 17ffffea b 2be4 <numa_pwq_tbl_install.constprop.14+0x24> lockdep_assert_held(&wq_pool_mutex); 2c40: d4210000 brk #0x800 2c44: 17fffff7 b 2c20 <numa_pwq_tbl_install.constprop.14+0x60> 0000000000002c48 <apply_wqattrs_commit>: { 2c48: a9be7bfd stp x29, x30, [sp, #-32]! 2c4c: 910003fd mov x29, sp 2c50: f9000bf3 str x19, [sp, #16] 2c54: aa0003f3 mov x19, x0 mutex_lock(&ctx->wq->mutex); 2c58: f9400000 ldr x0, [x0] 2c5c: 91008000 add x0, x0, #0x20 2c60: 94000000 bl 0 <_mutex_lock> copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs); 2c64: a9400660 ldp x0, x1, [x19] 2c68: f940b000 ldr x0, [x0, #352] to->nice = from->nice; 2c6c: b9400022 ldr w2, [x1] 2c70: b9000002 str w2, [x0] 2c74: f9400422 ldr x2, [x1, #8] 2c78: f9000402 str x2, [x0, #8] to->no_numa = from->no_numa; 2c7c: 39404021 ldrb w1, [x1, #16] 2c80: 39004001 strb w1, [x0, #16] ctx->pwq_tbl[node] = numa_pwq_tbl_install(ctx->wq, node, 2c84: f9400260 ldr x0, [x19] 2c88: f9401661 ldr x1, [x19, #40] 2c8c: 97ffffcd bl 2bc0 <numa_pwq_tbl_install.constprop.14> 2c90: f9001660 str x0, [x19, #40] link_pwq(ctx->dfl_pwq); 2c94: f9401260 ldr x0, [x19, #32] 2c98: 97fff63e bl 590 <link_pwq> swap(ctx->wq->dfl_pwq, ctx->dfl_pwq); 2c9c: f9400260 ldr x0, [x19] 2ca0: f9401262 ldr x2, [x19, #32] 2ca4: f940b401 ldr x1, [x0, #360] 2ca8: f900b402 str x2, [x0, #360] 2cac: f9001261 str x1, [x19, #32] mutex_unlock(&ctx->wq->mutex); 2cb0: f9400260 ldr x0, [x19] 2cb4: 91008000 add x0, x0, #0x20 2cb8: 94000000 bl 0 <_mutex_unlock> } 2cbc: f9400bf3 ldr x19, [sp, #16] 2cc0: a8c27bfd ldp x29, x30, [sp], #32 2cc4: d65f03c0 ret 0000000000002cc8 <unbound_pwq_by_node.constprop.15>: static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq, 2cc8: a9be7bfd stp x29, x30, [sp, #-32]! 2ccc: 910003fd mov x29, sp 2cd0: a90153f3 stp x19, x20, [sp, #16] 2cd4: aa0003f3 mov x19, x0 assert_rcu_or_wq_mutex_or_pool_mutex(wq); 2cd8: 94000000 bl 0 <debug_lockdep_rcu_enabled> 2cdc: 340000a0 cbz w0, 2cf0 <unbound_pwq_by_node.constprop.15+0x28> 2ce0: 90000014 adrp x20, 0 <pwq_activate_delayed_work> 2ce4: 91000294 add x20, x20, #0x0 2ce8: 39402280 ldrb w0, [x20, #8] 2cec: 340000a0 cbz w0, 2d00 <unbound_pwq_by_node.constprop.15+0x38> 2cf0: f9410a60 ldr x0, [x19, #528] } 2cf4: a94153f3 ldp x19, x20, [sp, #16] 2cf8: a8c27bfd ldp x29, x30, [sp], #32 2cfc: d65f03c0 ret assert_rcu_or_wq_mutex_or_pool_mutex(wq); 2d00: 94000000 bl 0 <rcu_read_lock_held> 2d04: 35ffff60 cbnz w0, 2cf0 <unbound_pwq_by_node.constprop.15+0x28> 2d08: 12800001 mov w1, #0xffffffff // #-1 2d0c: 91036260 add x0, x19, #0xd8 2d10: 94000000 bl 0 <lock_is_held_type> 2d14: 35fffee0 cbnz w0, 2cf0 <unbound_pwq_by_node.constprop.15+0x28> 2d18: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 2d1c: 91000000 add x0, x0, #0x0 2d20: 9102e000 add x0, x0, #0xb8 2d24: 12800001 mov w1, #0xffffffff // #-1 2d28: 94000000 bl 0 <lock_is_held_type> 2d2c: 35fffe20 cbnz w0, 2cf0 <unbound_pwq_by_node.constprop.15+0x28> 2d30: 52800023 mov w3, #0x1 // #1 2d34: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 2d38: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 2d3c: 91000042 add x2, x2, #0x0 2d40: 91000000 add x0, x0, #0x0 2d44: 52804ae1 mov w1, #0x257 // #599 2d48: 39002283 strb w3, [x20, #8] 2d4c: 94000000 bl 0 <lockdep_rcu_suspicious> 2d50: 17ffffe8 b 2cf0 <unbound_pwq_by_node.constprop.15+0x28> 2d54: d503201f nop 0000000000002d58 <wq_pool_ids_show>: { 2d58: a9bd7bfd stp x29, x30, [sp, #-48]! 2d5c: 910003fd mov x29, sp 2d60: a90153f3 stp x19, x20, [sp, #16] 2d64: aa0203f4 mov x20, x2 2d68: a9025bf5 stp x21, x22, [sp, #32] lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_); 2d6c: 90000015 adrp x21, 0 <rcu_lock_map> 2d70: 910002b5 add x21, x21, #0x0 return wq_dev->wq; 2d74: f85f8013 ldur x19, [x0, #-8] #endif /* !CONFIG_HOTPLUG_CPU */ /* Wrappers which go away once all code is converted */ static inline void cpu_hotplug_begin(void) { cpus_write_lock(); } static inline void cpu_hotplug_done(void) { cpus_write_unlock(); } static inline void get_online_cpus(void) { cpus_read_lock(); } 2d78: 94000000 bl 0 <cpus_read_lock> __rcu_read_lock(); 2d7c: 94000000 bl 0 <__rcu_read_lock> lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_); 2d80: 90000006 adrp x6, 0 <pwq_activate_delayed_work> 2d84: aa1503e0 mov x0, x21 2d88: 910000c6 add x6, x6, #0x0 2d8c: d2800005 mov x5, #0x0 // #0 2d90: 52800004 mov w4, #0x0 // #0 2d94: 52800043 mov w3, #0x2 // #2 2d98: 52800002 mov w2, #0x0 // #0 2d9c: 52800001 mov w1, #0x0 // #0 2da0: 94000000 bl 0 <lock_acquire> RCU_LOCKDEP_WARN(!rcu_is_watching(), 2da4: 94000000 bl 0 <debug_lockdep_rcu_enabled> 2da8: 340000a0 cbz w0, 2dbc <wq_pool_ids_show+0x64> 2dac: 90000016 adrp x22, 0 <pwq_activate_delayed_work> 2db0: 910002d6 add x22, x22, #0x0 2db4: 39401ac0 ldrb w0, [x22, #6] 2db8: 340006a0 cbz w0, 2e8c <wq_pool_ids_show+0x134> unbound_pwq_by_node(wq, node)->pool->id); 2dbc: aa1303e0 mov x0, x19 2dc0: 97ffffc2 bl 2cc8 <unbound_pwq_by_node.constprop.15> written += scnprintf(buf + written, PAGE_SIZE - written, 2dc4: f9400000 ldr x0, [x0] 2dc8: 52800004 mov w4, #0x0 // #0 2dcc: d2820001 mov x1, #0x1000 // #4096 2dd0: 90000003 adrp x3, 0 <pwq_activate_delayed_work> 2dd4: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 2dd8: 91000063 add x3, x3, #0x0 2ddc: b940f805 ldr w5, [x0, #248] 2de0: 91000042 add x2, x2, #0x0 2de4: aa1403e0 mov x0, x20 2de8: 94000000 bl 0 <scnprintf> 2dec: 2a0003f3 mov w19, w0 written += scnprintf(buf + written, PAGE_SIZE - written, "\n"); 2df0: 93407c00 sxtw x0, w0 2df4: d2820001 mov x1, #0x1000 // #4096 2df8: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 2dfc: cb000021 sub x1, x1, x0 2e00: 91000042 add x2, x2, #0x0 2e04: 8b000280 add x0, x20, x0 2e08: 94000000 bl 0 <scnprintf> 2e0c: 2a0003f4 mov w20, w0 RCU_LOCKDEP_WARN(!rcu_is_watching(), 2e10: 94000000 bl 0 <debug_lockdep_rcu_enabled> 2e14: 340000a0 cbz w0, 2e28 <wq_pool_ids_show+0xd0> 2e18: 90000016 adrp x22, 0 <pwq_activate_delayed_work> 2e1c: 910002d6 add x22, x22, #0x0 2e20: 39401ec0 ldrb w0, [x22, #7] 2e24: 340001c0 cbz w0, 2e5c <wq_pool_ids_show+0x104> __rcu_read_unlock(); 2e28: 94000000 bl 0 <__rcu_read_unlock> 2e2c: 0b140273 add w19, w19, w20 lock_release(map, 1, _THIS_IP_); 2e30: aa1503e0 mov x0, x21 2e34: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 2e38: 91000042 add x2, x2, #0x0 2e3c: 52800021 mov w1, #0x1 // #1 2e40: 94000000 bl 0 <lock_release> static inline void put_online_cpus(void) { cpus_read_unlock(); } 2e44: 94000000 bl 0 <cpus_read_unlock> return written; 2e48: 93407e60 sxtw x0, w19 } 2e4c: a94153f3 ldp x19, x20, [sp, #16] 2e50: a9425bf5 ldp x21, x22, [sp, #32] 2e54: a8c37bfd ldp x29, x30, [sp], #48 2e58: d65f03c0 ret RCU_LOCKDEP_WARN(!rcu_is_watching(), 2e5c: 94000000 bl 0 <rcu_is_watching> 2e60: 72001c1f tst w0, #0xff 2e64: 54fffe21 b.ne 2e28 <wq_pool_ids_show+0xd0> // b.any 2e68: 52800023 mov w3, #0x1 // #1 2e6c: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 2e70: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 2e74: 91000042 add x2, x2, #0x0 2e78: 91000000 add x0, x0, #0x0 2e7c: 528051a1 mov w1, #0x28d // #653 2e80: 39001ec3 strb w3, [x22, #7] 2e84: 94000000 bl 0 <lockdep_rcu_suspicious> 2e88: 17ffffe8 b 2e28 <wq_pool_ids_show+0xd0> RCU_LOCKDEP_WARN(!rcu_is_watching(), 2e8c: 94000000 bl 0 <rcu_is_watching> 2e90: 72001c1f tst w0, #0xff 2e94: 54fff941 b.ne 2dbc <wq_pool_ids_show+0x64> // b.any 2e98: 52800023 mov w3, #0x1 // #1 2e9c: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 2ea0: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 2ea4: 91000042 add x2, x2, #0x0 2ea8: 91000000 add x0, x0, #0x0 2eac: 52804b41 mov w1, #0x25a // #602 2eb0: 39001ac3 strb w3, [x22, #6] 2eb4: 94000000 bl 0 <lockdep_rcu_suspicious> 2eb8: 17ffffc1 b 2dbc <wq_pool_ids_show+0x64> 2ebc: d503201f nop 0000000000002ec0 <__queue_work>: { 2ec0: a9b97bfd stp x29, x30, [sp, #-112]! 2ec4: 910003fd mov x29, sp 2ec8: a9025bf5 stp x21, x22, [sp, #32] 2ecc: aa0103f6 mov x22, x1 2ed0: a9046bf9 stp x25, x26, [sp, #64] 2ed4: 2a0003f9 mov w25, w0 unsigned int req_cpu = cpu; 2ed8: 2a0003fa mov w26, w0 { 2edc: a90573fb stp x27, x28, [sp, #80] 2ee0: aa0203fc mov x28, x2 if (unlikely(wq->flags & __WQ_DRAINING) && 2ee4: b9420020 ldr w0, [x1, #512] 2ee8: 37801bc0 tbnz w0, #16, 3260 <__queue_work+0x3a0> lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_); 2eec: 90000015 adrp x21, 0 <rcu_lock_map> 2ef0: 910002b5 add x21, x21, #0x0 2ef4: a90153f3 stp x19, x20, [sp, #16] 2ef8: a90363f7 stp x23, x24, [sp, #48] __rcu_read_lock(); 2efc: 94000000 bl 0 <__rcu_read_lock> lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_); 2f00: 90000006 adrp x6, 0 <pwq_activate_delayed_work> 2f04: aa1503e0 mov x0, x21 2f08: 910000c6 add x6, x6, #0x0 2f0c: d2800005 mov x5, #0x0 // #0 2f10: 52800004 mov w4, #0x0 // #0 2f14: 52800043 mov w3, #0x2 // #2 2f18: 52800002 mov w2, #0x0 // #0 2f1c: 52800001 mov w1, #0x0 // #0 2f20: 94000000 bl 0 <lock_acquire> RCU_LOCKDEP_WARN(!rcu_is_watching(), 2f24: 94000000 bl 0 <debug_lockdep_rcu_enabled> 2f28: 35000c20 cbnz w0, 30ac <__queue_work+0x1ec> return val * GOLDEN_RATIO_64 >> (64 - bits); 2f2c: d2907d74 mov x20, #0x83eb // #33771 if (likely(!wq_debug_force_rr_cpu)) { 2f30: 90000017 adrp x23, 0 <pwq_activate_delayed_work> 2f34: f2b016b4 movk x20, #0x80b5, lsl #16 2f38: 910002f7 add x23, x23, #0x0 2f3c: f2d0c8d4 movk x20, #0x8646, lsl #32 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); 2f40: 90000018 adrp x24, 0 <__per_cpu_offset> 2f44: f2ec3914 movk x20, #0x61c8, lsl #48 new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask); 2f48: 910142fb add x27, x23, #0x50 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); 2f4c: 91000318 add x24, x24, #0x0 2f50: 9b147f94 mul x20, x28, x20 2f54: d37afe94 lsr x20, x20, #58 if (wq->flags & WQ_UNBOUND) { 2f58: b94202c0 ldr w0, [x22, #512] 2f5c: 36080720 tbz w0, #1, 3040 <__queue_work+0x180> if (req_cpu == WORK_CPU_UNBOUND) 2f60: 7101035f cmp w26, #0x40 2f64: 54001220 b.eq 31a8 <__queue_work+0x2e8> // b.none pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); 2f68: aa1603e0 mov x0, x22 2f6c: 97ffff57 bl 2cc8 <unbound_pwq_by_node.constprop.15> 2f70: aa0003f3 mov x19, x0 last_pool = get_work_pool(work); 2f74: aa1c03e0 mov x0, x28 2f78: 97fffabe bl 1a70 <get_work_pool> 2f7c: aa0003e2 mov x2, x0 if (last_pool && last_pool != pwq->pool) { 2f80: b4000720 cbz x0, 3064 <__queue_work+0x1a4> 2f84: f9400261 ldr x1, [x19] 2f88: eb00003f cmp x1, x0 2f8c: 54000b01 b.ne 30ec <__queue_work+0x22c> // b.any spin_lock(&pwq->pool->lock); 2f90: aa0203e0 mov x0, x2 2f94: 94000000 bl 0 <rt_spin_lock> if (unlikely(!pwq->refcnt)) { 2f98: b9401a60 ldr w0, [x19, #24] 2f9c: 34000c20 cbz w0, 3120 <__queue_work+0x260> 2fa0: aa1c03e0 mov x0, x28 2fa4: f8408c01 ldr x1, [x0, #8]! if (WARN_ON(!list_empty(&work->entry))) 2fa8: eb01001f cmp x0, x1 2fac: 540016c1 b.ne 3284 <__queue_work+0x3c4> // b.any pwq->nr_in_flight[pwq->work_color]++; 2fb0: b9401263 ldr w3, [x19, #16] 2fb4: 8b23ca61 add x1, x19, w3, sxtw #2 return color << WORK_STRUCT_COLOR_SHIFT; 2fb8: 531c6c63 lsl w3, w3, #4 pwq->nr_in_flight[pwq->work_color]++; 2fbc: b9401c20 ldr w0, [x1, #28] 2fc0: 11000400 add w0, w0, #0x1 2fc4: b9001c20 str w0, [x1, #28] if (likely(pwq->nr_active < pwq->max_active)) { 2fc8: 294b0660 ldp w0, w1, [x19, #88] 2fcc: 6b01001f cmp w0, w1 2fd0: 540015ea b.ge 328c <__queue_work+0x3cc> // b.tcont worklist = &pwq->pool->worklist; 2fd4: f9400261 ldr x1, [x19] pwq->nr_active++; 2fd8: 11000400 add w0, w0, #0x1 2fdc: b9005a60 str w0, [x19, #88] worklist = &pwq->pool->worklist; 2fe0: 91042022 add x2, x1, #0x108 2fe4: f9408420 ldr x0, [x1, #264] if (list_empty(worklist)) 2fe8: eb00005f cmp x2, x0 2fec: 54000c60 b.eq 3178 <__queue_work+0x2b8> // b.none insert_work(pwq, work, worklist, work_flags); 2ff0: aa1c03e1 mov x1, x28 2ff4: aa1303e0 mov x0, x19 2ff8: 97fffcda bl 2360 <insert_work> spin_unlock(&pwq->pool->lock); 2ffc: f9400260 ldr x0, [x19] 3000: 94000000 bl 0 <rt_spin_unlock> RCU_LOCKDEP_WARN(!rcu_is_watching(), 3004: 94000000 bl 0 <debug_lockdep_rcu_enabled> 3008: 35000320 cbnz w0, 306c <__queue_work+0x1ac> __rcu_read_unlock(); 300c: 94000000 bl 0 <__rcu_read_unlock> lock_release(map, 1, _THIS_IP_); 3010: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 3014: aa1503e0 mov x0, x21 3018: 91000042 add x2, x2, #0x0 301c: 52800021 mov w1, #0x1 // #1 3020: 94000000 bl 0 <lock_release> 3024: a94153f3 ldp x19, x20, [sp, #16] 3028: a94363f7 ldp x23, x24, [sp, #48] } 302c: a9425bf5 ldp x21, x22, [sp, #32] 3030: a9446bf9 ldp x25, x26, [sp, #64] 3034: a94573fb ldp x27, x28, [sp, #80] 3038: a8c77bfd ldp x29, x30, [sp], #112 303c: d65f03c0 ret if (req_cpu == WORK_CPU_UNBOUND) 3040: 7101035f cmp w26, #0x40 3044: 54000a20 b.eq 3188 <__queue_work+0x2c8> // b.none pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); 3048: f879db00 ldr x0, [x24, w25, sxtw #3] 304c: f94106d3 ldr x19, [x22, #520] 3050: 8b000273 add x19, x19, x0 last_pool = get_work_pool(work); 3054: aa1c03e0 mov x0, x28 3058: 97fffa86 bl 1a70 <get_work_pool> 305c: aa0003e2 mov x2, x0 if (last_pool && last_pool != pwq->pool) { 3060: b5fff920 cbnz x0, 2f84 <__queue_work+0xc4> 3064: f9400262 ldr x2, [x19] 3068: 17ffffca b 2f90 <__queue_work+0xd0> RCU_LOCKDEP_WARN(!rcu_is_watching(), 306c: 90000013 adrp x19, 0 <pwq_activate_delayed_work> 3070: 91000273 add x19, x19, #0x0 3074: 39401e60 ldrb w0, [x19, #7] 3078: 35fffca0 cbnz w0, 300c <__queue_work+0x14c> 307c: 94000000 bl 0 <rcu_is_watching> 3080: 72001c1f tst w0, #0xff 3084: 54fffc41 b.ne 300c <__queue_work+0x14c> // b.any 3088: 52800023 mov w3, #0x1 // #1 308c: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 3090: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 3094: 91000042 add x2, x2, #0x0 3098: 91000000 add x0, x0, #0x0 309c: 528051a1 mov w1, #0x28d // #653 30a0: 39001e63 strb w3, [x19, #7] 30a4: 94000000 bl 0 <lockdep_rcu_suspicious> 30a8: 17ffffd9 b 300c <__queue_work+0x14c> RCU_LOCKDEP_WARN(!rcu_is_watching(), 30ac: 90000013 adrp x19, 0 <pwq_activate_delayed_work> 30b0: 91000273 add x19, x19, #0x0 30b4: 39401a60 ldrb w0, [x19, #6] 30b8: 35fff3a0 cbnz w0, 2f2c <__queue_work+0x6c> 30bc: 94000000 bl 0 <rcu_is_watching> 30c0: 72001c1f tst w0, #0xff 30c4: 54fff341 b.ne 2f2c <__queue_work+0x6c> // b.any 30c8: 52800023 mov w3, #0x1 // #1 30cc: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 30d0: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 30d4: 91000042 add x2, x2, #0x0 30d8: 91000000 add x0, x0, #0x0 30dc: 52804b41 mov w1, #0x25a // #602 30e0: 39001a63 strb w3, [x19, #6] 30e4: 94000000 bl 0 <lockdep_rcu_suspicious> 30e8: 17ffff91 b 2f2c <__queue_work+0x6c> spin_lock(&last_pool->lock); 30ec: f90037e0 str x0, [sp, #104] 30f0: 94000000 bl 0 <rt_spin_lock> hash_for_each_possible(pool->busy_hash, worker, hentry, 30f4: f94037e2 ldr x2, [sp, #104] 30f8: d37d1680 ubfiz x0, x20, #3, #6 30fc: 8b000040 add x0, x2, x0 3100: f940f800 ldr x0, [x0, #496] 3104: b5000260 cbnz x0, 3150 <__queue_work+0x290> spin_unlock(&last_pool->lock); 3108: aa0203e0 mov x0, x2 310c: 94000000 bl 0 <rt_spin_unlock> spin_lock(&pwq->pool->lock); 3110: f9400260 ldr x0, [x19] 3114: 94000000 bl 0 <rt_spin_lock> if (unlikely(!pwq->refcnt)) { 3118: b9401a60 ldr w0, [x19, #24] 311c: 35fff420 cbnz w0, 2fa0 <__queue_work+0xe0> if (wq->flags & WQ_UNBOUND) { 3120: b94202c0 ldr w0, [x22, #512] 3124: 36080d80 tbz w0, #1, 32d4 <__queue_work+0x414> spin_unlock(&pwq->pool->lock); 3128: f9400260 ldr x0, [x19] 312c: 94000000 bl 0 <rt_spin_unlock> unsigned long get_wchan(struct task_struct *p); static inline void cpu_relax(void) { asm volatile("yield" ::: "memory"); 3130: d503203f yield goto retry; 3134: 17ffff89 b 2f58 <__queue_work+0x98> if (worker->current_work == work && 3138: f9400c03 ldr x3, [x0, #24] 313c: f9400f81 ldr x1, [x28, #24] 3140: eb01007f cmp x3, x1 3144: 540000e0 b.eq 3160 <__queue_work+0x2a0> // b.none hash_for_each_possible(pool->busy_hash, worker, hentry, 3148: f9400000 ldr x0, [x0] 314c: b4fffde0 cbz x0, 3108 <__queue_work+0x248> if (worker->current_work == work && 3150: f9400801 ldr x1, [x0, #16] 3154: eb01039f cmp x28, x1 3158: 54ffff81 b.ne 3148 <__queue_work+0x288> // b.any 315c: 17fffff7 b 3138 <__queue_work+0x278> if (worker && worker->current_pwq->wq == wq) { 3160: f9401000 ldr x0, [x0, #32] 3164: f9400401 ldr x1, [x0, #8] 3168: eb16003f cmp x1, x22 316c: 54fffce1 b.ne 3108 <__queue_work+0x248> // b.any pwq = worker->current_pwq; 3170: aa0003f3 mov x19, x0 3174: 17ffff89 b 2f98 <__queue_work+0xd8> pwq->pool->watchdog_ts = jiffies; 3178: 90000000 adrp x0, 0 <jiffies> 317c: f9400000 ldr x0, [x0] 3180: f9008020 str x0, [x1, #256] 3184: 17ffff9b b 2ff0 <__queue_work+0x130> cpu = raw_smp_processor_id(); 3188: 90000000 adrp x0, 0 <cpu_number> 318c: 91000000 add x0, x0, #0x0 /* * We want to allow caching the value, so avoid using volatile and * instead use a fake stack read to hazard against barrier(). */ asm(ALTERNATIVE("mrs %0, tpidr_el1", 3190: d538d081 mrs x1, tpidr_el1 3194: b8616819 ldr w25, [x0, x1] pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); 3198: f94106d3 ldr x19, [x22, #520] 319c: f879db00 ldr x0, [x24, w25, sxtw #3] 31a0: 8b000273 add x19, x19, x0 31a4: 17ffffac b 3054 <__queue_work+0x194> if (likely(!wq_debug_force_rr_cpu)) { 31a8: 3949a2e1 ldrb w1, [x23, #616] cpu = wq_select_unbound_cpu(raw_smp_processor_id()); 31ac: 90000000 adrp x0, 0 <cpu_number> 31b0: d538d082 mrs x2, tpidr_el1 31b4: 91000000 add x0, x0, #0x0 31b8: b8606859 ldr w25, [x2, x0] if (likely(!wq_debug_force_rr_cpu)) { 31bc: 350007c1 cbnz w1, 32b4 <__queue_work+0x3f4> return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); 31c0: 7100033f cmp w25, #0x0 31c4: 1100ff20 add w0, w25, #0x3f 31c8: 1a99b000 csel w0, w0, w25, lt // lt = tstop 31cc: 13067c00 asr w0, w0, #6 31d0: 93407c00 sxtw x0, w0 31d4: f8607b60 ldr x0, [x27, x0, lsl #3] 31d8: 9ad92400 lsr x0, x0, x25 if (cpumask_test_cpu(cpu, wq_unbound_cpumask)) 31dc: 3707ec60 tbnz w0, #0, 2f68 <__queue_work+0xa8> if (cpumask_empty(wq_unbound_cpumask)) 31e0: f9402ae0 ldr x0, [x23, #80] 31e4: b4ffec20 cbz x0, 2f68 <__queue_work+0xa8> new_cpu = __this_cpu_read(wq_rr_cpu_last); 31e8: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 31ec: 91000000 add x0, x0, #0x0 31f0: 94000000 bl 0 <__this_cpu_preempt_check> 31f4: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 31f8: 91000013 add x19, x0, #0x0 31fc: d538d081 mrs x1, tpidr_el1 3200: aa1303e0 mov x0, x19 new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask); 3204: 90000003 adrp x3, 0 <__cpu_online_mask> 3208: b8616800 ldr w0, [x0, x1] 320c: 91000063 add x3, x3, #0x0 3210: aa0303e2 mov x2, x3 3214: aa1b03e1 mov x1, x27 3218: 94000000 bl 0 <cpumask_next_and> 321c: 2a0003e1 mov w1, w0 if (unlikely(new_cpu >= nr_cpu_ids)) { 3220: 90000004 adrp x4, 0 <nr_cpu_ids> 3224: b9400080 ldr w0, [x4] 3228: 6b00003f cmp w1, w0 322c: 90000000 adrp x0, 0 <__cpu_online_mask> 3230: 91000003 add x3, x0, #0x0 3234: 540006a2 b.cs 3308 <__queue_work+0x448> // b.hs, b.nlast return new_cpu; 3238: 2a0103f9 mov w25, w1 __this_cpu_write(wq_rr_cpu_last, new_cpu); 323c: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 3240: 91000000 add x0, x0, #0x0 return new_cpu; 3244: b9006be1 str w1, [sp, #104] __this_cpu_write(wq_rr_cpu_last, new_cpu); 3248: 94000000 bl 0 <__this_cpu_preempt_check> 324c: b9406be1 ldr w1, [sp, #104] 3250: aa1303e0 mov x0, x19 3254: d538d082 mrs x2, tpidr_el1 3258: b8226801 str w1, [x0, x2] return new_cpu; 325c: 17ffff43 b 2f68 <__queue_work+0xa8> 3260: d5384100 mrs x0, sp_el0 3264: b9401001 ldr w1, [x0, #16] if (in_task() && (current->flags & PF_WQ_WORKER)) 3268: 12183021 and w1, w1, #0x1fff00 326c: 12106021 and w1, w1, #0xffff01ff 3270: 35000061 cbnz w1, 327c <__queue_work+0x3bc> 3274: b9403401 ldr w1, [x0, #52] 3278: 37280101 tbnz w1, #5, 3298 <__queue_work+0x3d8> WARN_ON_ONCE(!is_chained_work(wq))) 327c: d4210000 brk #0x800 3280: 17ffff6b b 302c <__queue_work+0x16c> if (WARN_ON(!list_empty(&work->entry))) 3284: d4210000 brk #0x800 3288: 17ffff5d b 2ffc <__queue_work+0x13c> work_flags |= WORK_STRUCT_DELAYED; 328c: 321f0063 orr w3, w3, #0x2 worklist = &pwq->delayed_works; 3290: 91018262 add x2, x19, #0x60 3294: 17ffff57 b 2ff0 <__queue_work+0x130> return kthread_data(current); 3298: 94000000 bl 0 <kthread_data> return worker && worker->current_pwq->wq == wq; 329c: b4ffff00 cbz x0, 327c <__queue_work+0x3bc> 32a0: f9401000 ldr x0, [x0, #32] 32a4: f9400400 ldr x0, [x0, #8] 32a8: eb0002df cmp x22, x0 32ac: 54fffe81 b.ne 327c <__queue_work+0x3bc> // b.any 32b0: 17ffff0f b 2eec <__queue_work+0x2c> } else if (!printed_dbg_warning) { 32b4: 3949a6e0 ldrb w0, [x23, #617] 32b8: 35fff940 cbnz w0, 31e0 <__queue_work+0x320> pr_warn("workqueue: round-robin CPU selection forced, expect performance impact\n"); 32bc: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 32c0: 91000000 add x0, x0, #0x0 32c4: 94000000 bl 0 <printk> printed_dbg_warning = true; 32c8: 52800020 mov w0, #0x1 // #1 32cc: 3909a6e0 strb w0, [x23, #617] 32d0: 17ffffc4 b 31e0 <__queue_work+0x320> WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt", 32d4: 90000003 adrp x3, 0 <pwq_activate_delayed_work> 32d8: 91000063 add x3, x3, #0x0 32dc: 39402460 ldrb w0, [x3, #9] 32e0: 35ffe600 cbnz w0, 2fa0 <__queue_work+0xe0> 32e4: 52800024 mov w4, #0x1 // #1 32e8: 2a1903e2 mov w2, w25 32ec: 9106a2c1 add x1, x22, #0x1a8 32f0: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 32f4: 91000000 add x0, x0, #0x0 32f8: 39002464 strb w4, [x3, #9] 32fc: 94000000 bl 0 <printk> 3300: d4210000 brk #0x800 3304: 17ffff27 b 2fa0 <__queue_work+0xe0> new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask); 3308: aa1b03e1 mov x1, x27 330c: aa0303e2 mov x2, x3 3310: 12800000 mov w0, #0xffffffff // #-1 3314: 94000000 bl 0 <cpumask_next_and> if (unlikely(new_cpu >= nr_cpu_ids)) 3318: 90000004 adrp x4, 0 <nr_cpu_ids> new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask); 331c: 2a0003e1 mov w1, w0 if (unlikely(new_cpu >= nr_cpu_ids)) 3320: b9400080 ldr w0, [x4] 3324: 6b00003f cmp w1, w0 3328: 54fff883 b.cc 3238 <__queue_work+0x378> // b.lo, b.ul, b.last 332c: 17ffff0f b 2f68 <__queue_work+0xa8> 0000000000003330 <queue_work_on>: { 3330: a9bc7bfd stp x29, x30, [sp, #-64]! 3334: 910003fd mov x29, sp 3338: a9025bf5 stp x21, x22, [sp, #32] local_lock_irqsave(pendingb_lock,flags); 333c: 90000015 adrp x21, 0 <__per_cpu_offset> 3340: 910002b5 add x21, x21, #0x0 { 3344: aa0203f6 mov x22, x2 3348: a90153f3 stp x19, x20, [sp, #16] local_lock_irqsave(pendingb_lock,flags); 334c: 90000013 adrp x19, 0 <pwq_activate_delayed_work> { 3350: a90363f7 stp x23, x24, [sp, #48] 3354: aa0103f8 mov x24, x1 3358: 2a0003f7 mov w23, w0 local_lock_irqsave(pendingb_lock,flags); 335c: 94000000 bl 0 <migrate_disable> 3360: 91000273 add x19, x19, #0x0 3364: 94000000 bl 0 <debug_smp_processor_id> 3368: 91002274 add x20, x19, #0x8 336c: f8605aa0 ldr x0, [x21, w0, uxtw #3] 3370: d5384101 mrs x1, sp_el0 3374: 8b000294 add x20, x20, x0 __local_unlock_irq(&per_cpu(lvar, cpu)); \ } while (0) static inline int __local_lock_irqsave(struct local_irq_lock *lv) { if (lv->owner != current) { 3378: f9407a80 ldr x0, [x20, #240] 337c: eb01001f cmp x0, x1 3380: 540005a0 b.eq 3434 <queue_work_on+0x104> // b.none spin_lock_irqsave(&lv->lock, lv->flags); 3384: f900829f str xzr, [x20, #256] 3388: aa1403e0 mov x0, x20 338c: 94000000 bl 0 <rt_spin_lock> LL_WARN(lv->owner); 3390: f9407a80 ldr x0, [x20, #240] 3394: b5000880 cbnz x0, 34a4 <queue_work_on+0x174> LL_WARN(lv->nestcnt); 3398: b940fa80 ldr w0, [x20, #248] 339c: 35000800 cbnz w0, 349c <queue_work_on+0x16c> 33a0: d5384101 mrs x1, sp_el0 lv->nestcnt = 1; 33a4: 52800020 mov w0, #0x1 // #1 lv->owner = current; 33a8: f9007a81 str x1, [x20, #240] lv->nestcnt = 1; 33ac: b900fa80 str w0, [x20, #248] 33b0: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 33b4: 91000000 add x0, x0, #0x0 bool ret = false; 33b8: 52800014 mov w20, #0x0 // #0 local_lock_irqsave(pendingb_lock,flags); 33bc: 94000000 bl 0 <__this_cpu_preempt_check> if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 33c0: aa1603e1 mov x1, x22 33c4: 52800000 mov w0, #0x0 // #0 33c8: 94000000 bl 0 <test_and_set_bit> 33cc: 340004c0 cbz w0, 3464 <queue_work_on+0x134> local_unlock_irqrestore(pendingb_lock, flags); 33d0: 91002261 add x1, x19, #0x8 33d4: aa0103f3 mov x19, x1 33d8: 94000000 bl 0 <debug_smp_processor_id> 33dc: f8605aa1 ldr x1, [x21, w0, uxtw #3] 33e0: 8b010261 add x1, x19, x1 } while (0) static inline int __local_unlock_irqrestore(struct local_irq_lock *lv, unsigned long flags) { LL_WARN(!lv->nestcnt); 33e4: b940f820 ldr w0, [x1, #248] 33e8: 34000560 cbz w0, 3494 <queue_work_on+0x164> LL_WARN(lv->owner != current); 33ec: f9407822 ldr x2, [x1, #240] 33f0: d5384100 mrs x0, sp_el0 33f4: eb00005f cmp x2, x0 33f8: 540005a1 b.ne 34ac <queue_work_on+0x17c> // b.any if (--lv->nestcnt) 33fc: b940f820 ldr w0, [x1, #248] 3400: 51000400 sub w0, w0, #0x1 3404: b900f820 str w0, [x1, #248] 3408: 350000a0 cbnz w0, 341c <queue_work_on+0xec> return 0; lv->owner = NULL; spin_unlock_irqrestore(&lv->lock, lv->flags); 340c: aa0103e0 mov x0, x1 lv->owner = NULL; 3410: f900783f str xzr, [x1, #240] spin_unlock_irqrestore(&lv->lock, lv->flags); 3414: 94000000 bl 0 <rt_spin_unlock> 3418: 94000000 bl 0 <migrate_enable> } 341c: 2a1403e0 mov w0, w20 3420: a94153f3 ldp x19, x20, [sp, #16] 3424: a9425bf5 ldp x21, x22, [sp, #32] 3428: a94363f7 ldp x23, x24, [sp, #48] 342c: a8c47bfd ldp x29, x30, [sp], #64 3430: d65f03c0 ret lv->nestcnt++; 3434: b940fa80 ldr w0, [x20, #248] 3438: 11000400 add w0, w0, #0x1 343c: b900fa80 str w0, [x20, #248] bool ret = false; 3440: 52800014 mov w20, #0x0 // #0 local_lock_irqsave(pendingb_lock,flags); 3444: 94000000 bl 0 <migrate_enable> 3448: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 344c: 91000000 add x0, x0, #0x0 3450: 94000000 bl 0 <__this_cpu_preempt_check> if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 3454: aa1603e1 mov x1, x22 3458: 52800000 mov w0, #0x0 // #0 345c: 94000000 bl 0 <test_and_set_bit> 3460: 35fffb80 cbnz w0, 33d0 <queue_work_on+0xa0> __queue_work(cpu, wq, work); 3464: aa1603e2 mov x2, x22 3468: aa1803e1 mov x1, x24 346c: 2a1703e0 mov w0, w23 3470: 97fffe94 bl 2ec0 <__queue_work> local_unlock_irqrestore(pendingb_lock, flags); 3474: 91002261 add x1, x19, #0x8 ret = true; 3478: 52800034 mov w20, #0x1 // #1 local_unlock_irqrestore(pendingb_lock, flags); 347c: aa0103f3 mov x19, x1 3480: 94000000 bl 0 <debug_smp_processor_id> 3484: f8605aa1 ldr x1, [x21, w0, uxtw #3] 3488: 8b010261 add x1, x19, x1 LL_WARN(!lv->nestcnt); 348c: b940f820 ldr w0, [x1, #248] 3490: 35fffae0 cbnz w0, 33ec <queue_work_on+0xbc> 3494: d4210000 brk #0x800 3498: 17ffffd5 b 33ec <queue_work_on+0xbc> LL_WARN(lv->nestcnt); 349c: d4210000 brk #0x800 34a0: 17ffffc0 b 33a0 <queue_work_on+0x70> LL_WARN(lv->owner); 34a4: d4210000 brk #0x800 34a8: 17ffffbc b 3398 <queue_work_on+0x68> LL_WARN(lv->owner != current); 34ac: d4210000 brk #0x800 34b0: 17ffffd3 b 33fc <queue_work_on+0xcc> 34b4: d503201f nop 00000000000034b8 <put_pwq>: { 34b8: a9be7bfd stp x29, x30, [sp, #-32]! lockdep_assert_held(&pwq->pool->lock); 34bc: 90000001 adrp x1, 0 <debug_locks> { 34c0: 910003fd mov x29, sp 34c4: f9000bf3 str x19, [sp, #16] 34c8: aa0003f3 mov x19, x0 lockdep_assert_held(&pwq->pool->lock); 34cc: b9400020 ldr w0, [x1] 34d0: 35000100 cbnz w0, 34f0 <put_pwq+0x38> if (likely(--pwq->refcnt)) 34d4: b9401a60 ldr w0, [x19, #24] 34d8: 51000400 sub w0, w0, #0x1 34dc: b9001a60 str w0, [x19, #24] 34e0: 340001c0 cbz w0, 3518 <put_pwq+0x60> } 34e4: f9400bf3 ldr x19, [sp, #16] 34e8: a8c27bfd ldp x29, x30, [sp], #32 34ec: d65f03c0 ret lockdep_assert_held(&pwq->pool->lock); 34f0: f9400260 ldr x0, [x19] 34f4: 12800001 mov w1, #0xffffffff // #-1 34f8: 91030000 add x0, x0, #0xc0 34fc: 94000000 bl 0 <lock_is_held_type> 3500: 35fffea0 cbnz w0, 34d4 <put_pwq+0x1c> 3504: d4210000 brk #0x800 if (likely(--pwq->refcnt)) 3508: b9401a60 ldr w0, [x19, #24] 350c: 51000400 sub w0, w0, #0x1 3510: b9001a60 str w0, [x19, #24] 3514: 35fffe80 cbnz w0, 34e4 <put_pwq+0x2c> if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND))) 3518: f9400660 ldr x0, [x19, #8] 351c: b9420000 ldr w0, [x0, #512] 3520: 360800e0 tbz w0, #1, 353c <put_pwq+0x84> * queued and leaves it in the same position on the kernel-global * workqueue otherwise. */ static inline bool schedule_work(struct work_struct *work) { return queue_work(system_wq, work); 3524: 90000001 adrp x1, 0 <pwq_activate_delayed_work> return queue_work_on(WORK_CPU_UNBOUND, wq, work); 3528: 91024262 add x2, x19, #0x90 352c: 52800800 mov w0, #0x40 // #64 3530: f9400021 ldr x1, [x1] 3534: 94000000 bl 3330 <queue_work_on> 3538: 17ffffeb b 34e4 <put_pwq+0x2c> 353c: d4210000 brk #0x800 3540: 17ffffe9 b 34e4 <put_pwq+0x2c> 3544: d503201f nop 0000000000003548 <put_pwq_unlocked.part.9>: static void put_pwq_unlocked(struct pool_workqueue *pwq) 3548: a9bc7bfd stp x29, x30, [sp, #-64]! 354c: 910003fd mov x29, sp 3550: a90153f3 stp x19, x20, [sp, #16] 3554: a9025bf5 stp x21, x22, [sp, #32] lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_); 3558: 90000015 adrp x21, 0 <rcu_lock_map> 355c: 910002b5 add x21, x21, #0x0 3560: f9001bf7 str x23, [sp, #48] 3564: aa0003f7 mov x23, x0 __rcu_read_lock(); 3568: 94000000 bl 0 <__rcu_read_lock> lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_); 356c: aa1503e0 mov x0, x21 3570: 90000006 adrp x6, 0 <pwq_activate_delayed_work> 3574: 910000c6 add x6, x6, #0x0 3578: d2800005 mov x5, #0x0 // #0 357c: 52800004 mov w4, #0x0 // #0 3580: 52800043 mov w3, #0x2 // #2 3584: 52800002 mov w2, #0x0 // #0 3588: 52800001 mov w1, #0x0 // #0 358c: 94000000 bl 0 <lock_acquire> RCU_LOCKDEP_WARN(!rcu_is_watching(), 3590: 94000000 bl 0 <debug_lockdep_rcu_enabled> 3594: 340000a0 cbz w0, 35a8 <put_pwq_unlocked.part.9+0x60> 3598: 90000013 adrp x19, 0 <pwq_activate_delayed_work> 359c: 91000273 add x19, x19, #0x0 35a0: 39401a60 ldrb w0, [x19, #6] 35a4: 34000900 cbz w0, 36c4 <put_pwq_unlocked.part.9+0x17c> local_spin_lock_irq(pendingb_lock, &pwq->pool->lock); 35a8: 90000016 adrp x22, 0 <__per_cpu_offset> 35ac: 910002d6 add x22, x22, #0x0 35b0: 94000000 bl 0 <migrate_disable> 35b4: 90000013 adrp x19, 0 <pwq_activate_delayed_work> 35b8: 94000000 bl 0 <debug_smp_processor_id> 35bc: 91000273 add x19, x19, #0x0 35c0: f8605ac0 ldr x0, [x22, w0, uxtw #3] 35c4: 91002274 add x20, x19, #0x8 35c8: 8b000294 add x20, x20, x0 spin_lock_irqsave(&lv->lock, lv->flags); 35cc: aa1403e0 mov x0, x20 35d0: f900829f str xzr, [x20, #256] 35d4: 94000000 bl 0 <rt_spin_lock> LL_WARN(lv->owner); 35d8: f9407a80 ldr x0, [x20, #240] 35dc: b5000980 cbnz x0, 370c <put_pwq_unlocked.part.9+0x1c4> LL_WARN(lv->nestcnt); 35e0: b940fa80 ldr w0, [x20, #248] 35e4: 35000880 cbnz w0, 36f4 <put_pwq_unlocked.part.9+0x1ac> 35e8: d5384101 mrs x1, sp_el0 lv->nestcnt = 1; 35ec: 52800020 mov w0, #0x1 // #1 lv->owner = current; 35f0: f9007a81 str x1, [x20, #240] lv->nestcnt = 1; 35f4: b900fa80 str w0, [x20, #248] 35f8: f94002e0 ldr x0, [x23] 35fc: 94000000 bl 0 <rt_spin_lock> put_pwq(pwq); 3600: aa1703e0 mov x0, x23 3604: 97ffffad bl 34b8 <put_pwq> local_spin_unlock_irq(pendingb_lock, &pwq->pool->lock); 3608: f94002e0 ldr x0, [x23] 360c: 94000000 bl 0 <rt_spin_unlock> 3610: 91002261 add x1, x19, #0x8 3614: aa0103f3 mov x19, x1 3618: 94000000 bl 0 <debug_smp_processor_id> 361c: f8605ac1 ldr x1, [x22, w0, uxtw #3] 3620: 8b010261 add x1, x19, x1 LL_WARN(!lv->nestcnt); 3624: b940f820 ldr w0, [x1, #248] 3628: 340006e0 cbz w0, 3704 <put_pwq_unlocked.part.9+0x1bc> LL_WARN(lv->owner != current); 362c: f9407822 ldr x2, [x1, #240] 3630: d5384100 mrs x0, sp_el0 3634: eb00005f cmp x2, x0 3638: 54000621 b.ne 36fc <put_pwq_unlocked.part.9+0x1b4> // b.any spin_unlock_irq(&lv->lock); 363c: aa0103e0 mov x0, x1 lv->owner = NULL; 3640: f900783f str xzr, [x1, #240] lv->nestcnt = 0; 3644: b900f83f str wzr, [x1, #248] spin_unlock_irq(&lv->lock); 3648: 94000000 bl 0 <rt_spin_unlock> 364c: 94000000 bl 0 <migrate_enable> RCU_LOCKDEP_WARN(!rcu_is_watching(), 3650: 94000000 bl 0 <debug_lockdep_rcu_enabled> 3654: 340000a0 cbz w0, 3668 <put_pwq_unlocked.part.9+0x120> 3658: 90000013 adrp x19, 0 <pwq_activate_delayed_work> 365c: 91000273 add x19, x19, #0x0 3660: 39401e60 ldrb w0, [x19, #7] 3664: 34000180 cbz w0, 3694 <put_pwq_unlocked.part.9+0x14c> __rcu_read_unlock(); 3668: 94000000 bl 0 <__rcu_read_unlock> lock_release(map, 1, _THIS_IP_); 366c: aa1503e0 mov x0, x21 3670: 52800021 mov w1, #0x1 // #1 3674: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 3678: 91000042 add x2, x2, #0x0 367c: 94000000 bl 0 <lock_release> } 3680: a94153f3 ldp x19, x20, [sp, #16] 3684: a9425bf5 ldp x21, x22, [sp, #32] 3688: f9401bf7 ldr x23, [sp, #48] 368c: a8c47bfd ldp x29, x30, [sp], #64 3690: d65f03c0 ret RCU_LOCKDEP_WARN(!rcu_is_watching(), 3694: 94000000 bl 0 <rcu_is_watching> 3698: 72001c1f tst w0, #0xff 369c: 54fffe61 b.ne 3668 <put_pwq_unlocked.part.9+0x120> // b.any 36a0: 52800023 mov w3, #0x1 // #1 36a4: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 36a8: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 36ac: 91000042 add x2, x2, #0x0 36b0: 91000000 add x0, x0, #0x0 36b4: 528051a1 mov w1, #0x28d // #653 36b8: 39001e63 strb w3, [x19, #7] 36bc: 94000000 bl 0 <lockdep_rcu_suspicious> 36c0: 17ffffea b 3668 <put_pwq_unlocked.part.9+0x120> RCU_LOCKDEP_WARN(!rcu_is_watching(), 36c4: 94000000 bl 0 <rcu_is_watching> 36c8: 72001c1f tst w0, #0xff 36cc: 54fff6e1 b.ne 35a8 <put_pwq_unlocked.part.9+0x60> // b.any 36d0: 52800023 mov w3, #0x1 // #1 36d4: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 36d8: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 36dc: 91000042 add x2, x2, #0x0 36e0: 91000000 add x0, x0, #0x0 36e4: 52804b41 mov w1, #0x25a // #602 36e8: 39001a63 strb w3, [x19, #6] 36ec: 94000000 bl 0 <lockdep_rcu_suspicious> 36f0: 17ffffae b 35a8 <put_pwq_unlocked.part.9+0x60> LL_WARN(lv->nestcnt); 36f4: d4210000 brk #0x800 36f8: 17ffffbc b 35e8 <put_pwq_unlocked.part.9+0xa0> LL_WARN(lv->owner != current); 36fc: d4210000 brk #0x800 3700: 17ffffcf b 363c <put_pwq_unlocked.part.9+0xf4> LL_WARN(!lv->nestcnt); 3704: d4210000 brk #0x800 3708: 17ffffc9 b 362c <put_pwq_unlocked.part.9+0xe4> LL_WARN(lv->owner); 370c: d4210000 brk #0x800 3710: 17ffffb4 b 35e0 <put_pwq_unlocked.part.9+0x98> 3714: d503201f nop 0000000000003718 <apply_wqattrs_cleanup>: if (ctx) { 3718: b4000260 cbz x0, 3764 <apply_wqattrs_cleanup+0x4c> { 371c: a9be7bfd stp x29, x30, [sp, #-32]! 3720: 910003fd mov x29, sp 3724: f9000bf3 str x19, [sp, #16] 3728: aa0003f3 mov x19, x0 put_pwq_unlocked(ctx->pwq_tbl[node]); 372c: f9401400 ldr x0, [x0, #40] if (pwq) { 3730: b4000040 cbz x0, 3738 <apply_wqattrs_cleanup+0x20> 3734: 97ffff85 bl 3548 <put_pwq_unlocked.part.9> put_pwq_unlocked(ctx->dfl_pwq); 3738: f9401260 ldr x0, [x19, #32] if (pwq) { 373c: b4000040 cbz x0, 3744 <apply_wqattrs_cleanup+0x2c> 3740: 97ffff82 bl 3548 <put_pwq_unlocked.part.9> free_workqueue_attrs(ctx->attrs); 3744: f9400660 ldr x0, [x19, #8] if (attrs) { 3748: b4000040 cbz x0, 3750 <apply_wqattrs_cleanup+0x38> kfree(attrs); 374c: 94000000 bl 0 <kfree> kfree(ctx); 3750: aa1303e0 mov x0, x19 3754: 94000000 bl 0 <kfree> } 3758: f9400bf3 ldr x19, [sp, #16] 375c: a8c27bfd ldp x29, x30, [sp], #32 3760: d65f03c0 ret 3764: d65f03c0 ret 0000000000003768 <apply_wqattrs_prepare>: { 3768: a9bc7bfd stp x29, x30, [sp, #-64]! lockdep_assert_held(&wq_pool_mutex); 376c: 90000002 adrp x2, 0 <debug_locks> { 3770: 910003fd mov x29, sp 3774: a90363f7 stp x23, x24, [sp, #48] 3778: aa0003f8 mov x24, x0 lockdep_assert_held(&wq_pool_mutex); 377c: b9400040 ldr w0, [x2] { 3780: a90153f3 stp x19, x20, [sp, #16] 3784: a9025bf5 stp x21, x22, [sp, #32] 3788: aa0103f6 mov x22, x1 lockdep_assert_held(&wq_pool_mutex); 378c: 35000b80 cbnz w0, 38fc <apply_wqattrs_prepare+0x194> return kmem_cache_alloc_trace( 3790: 90000015 adrp x21, 0 <kmalloc_caches> 3794: 910002b5 add x21, x21, #0x0 void *ret = kmem_cache_alloc(s, flags); 3798: 52901801 mov w1, #0x80c0 // #32960 379c: 72a02801 movk w1, #0x140, lsl #16 37a0: f9401ea0 ldr x0, [x21, #56] 37a4: 94000000 bl 0 <kmem_cache_alloc> 37a8: aa0003f4 mov x20, x0 37ac: f9401ea0 ldr x0, [x21, #56] 37b0: 52901801 mov w1, #0x80c0 // #32960 37b4: 72a02801 movk w1, #0x140, lsl #16 37b8: 94000000 bl 0 <kmem_cache_alloc> 37bc: aa0003f3 mov x19, x0 37c0: f9401ea0 ldr x0, [x21, #56] if (!attrs) 37c4: b4000ad3 cbz x19, 391c <apply_wqattrs_prepare+0x1b4> 37c8: 90000017 adrp x23, 0 <__cpu_possible_mask> 37cc: 52901801 mov w1, #0x80c0 // #32960 37d0: 72a02801 movk w1, #0x140, lsl #16 37d4: f94002e2 ldr x2, [x23] 37d8: f9000662 str x2, [x19, #8] 37dc: 94000000 bl 0 <kmem_cache_alloc> 37e0: aa0003f5 mov x21, x0 37e4: b4000760 cbz x0, 38d0 <apply_wqattrs_prepare+0x168> 37e8: f94002e0 ldr x0, [x23] 37ec: f90006a0 str x0, [x21, #8] if (!ctx || !new_attrs || !tmp_attrs) 37f0: b40006d4 cbz x20, 38c8 <apply_wqattrs_prepare+0x160> return (*dst = *src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)) != 0; 37f4: 90000000 adrp x0, 0 <pwq_activate_delayed_work> to->no_numa = from->no_numa; 37f8: 394042c4 ldrb w4, [x22, #16] *dst = *src; 37fc: f94006c2 ldr x2, [x22, #8] 3800: 390042a4 strb w4, [x21, #16] return (*dst = *src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)) != 0; 3804: f9400003 ldr x3, [x0] ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs); 3808: aa1303e1 mov x1, x19 to->nice = from->nice; 380c: b94002c5 ldr w5, [x22] ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs); 3810: aa1803e0 mov x0, x24 3814: ea020062 ands x2, x3, x2 to->nice = from->nice; 3818: b90002a5 str w5, [x21] 381c: 9a831042 csel x2, x2, x3, ne // ne = any *dst = *src; 3820: f90006a2 str x2, [x21, #8] 3824: b9000265 str w5, [x19] ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs); 3828: f9000662 str x2, [x19, #8] to->no_numa = from->no_numa; 382c: 39004264 strb w4, [x19, #16] ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs); 3830: 97fffc20 bl 28b0 <alloc_unbound_pwq> 3834: f9001280 str x0, [x20, #32] if (!ctx->dfl_pwq) 3838: b4000480 cbz x0, 38c8 <apply_wqattrs_prepare+0x160> if (wq_calc_node_cpumask(new_attrs, node, -1, tmp_attrs->cpumask)) { 383c: 910022a2 add x2, x21, #0x8 3840: 12800001 mov w1, #0xffffffff // #-1 3844: aa1303e0 mov x0, x19 3848: 97fffcda bl 2bb0 <wq_calc_node_cpumask.constprop.13> 384c: 72001c1f tst w0, #0xff 3850: 54000301 b.ne 38b0 <apply_wqattrs_prepare+0x148> // b.any ctx->dfl_pwq->refcnt++; 3854: f9401281 ldr x1, [x20, #32] 3858: b9401820 ldr w0, [x1, #24] 385c: 11000400 add w0, w0, #0x1 3860: b9001820 str w0, [x1, #24] ctx->pwq_tbl[node] = ctx->dfl_pwq; 3864: f9401280 ldr x0, [x20, #32] 3868: f9001680 str x0, [x20, #40] return (*dst = *src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)) != 0; 386c: f94002e1 ldr x1, [x23] ctx->attrs = new_attrs; 3870: a9004e98 stp x24, x19, [x20] to->no_numa = from->no_numa; 3874: 394042c2 ldrb w2, [x22, #16] *dst = *src; 3878: f94006c4 ldr x4, [x22, #8] kfree(attrs); 387c: aa1503e0 mov x0, x21 to->nice = from->nice; 3880: b94002c3 ldr w3, [x22] return (*dst = *src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)) != 0; 3884: 8a040021 and x1, x1, x4 3888: b9000263 str w3, [x19] 388c: f9000661 str x1, [x19, #8] to->no_numa = from->no_numa; 3890: 39004262 strb w2, [x19, #16] kfree(attrs); 3894: 94000000 bl 0 <kfree> } 3898: aa1403e0 mov x0, x20 389c: a94153f3 ldp x19, x20, [sp, #16] 38a0: a9425bf5 ldp x21, x22, [sp, #32] 38a4: a94363f7 ldp x23, x24, [sp, #48] 38a8: a8c47bfd ldp x29, x30, [sp], #64 38ac: d65f03c0 ret ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs); 38b0: aa1503e1 mov x1, x21 38b4: aa1803e0 mov x0, x24 38b8: 97fffbfe bl 28b0 <alloc_unbound_pwq> 38bc: f9001680 str x0, [x20, #40] if (!ctx->pwq_tbl[node]) 38c0: b5fffd60 cbnz x0, 386c <apply_wqattrs_prepare+0x104> 38c4: d503201f nop kfree(attrs); 38c8: aa1503e0 mov x0, x21 38cc: 94000000 bl 0 <kfree> 38d0: aa1303e0 mov x0, x19 38d4: 94000000 bl 0 <kfree> apply_wqattrs_cleanup(ctx); 38d8: aa1403e0 mov x0, x20 return NULL; 38dc: d2800014 mov x20, #0x0 // #0 apply_wqattrs_cleanup(ctx); 38e0: 97ffff8e bl 3718 <apply_wqattrs_cleanup> } 38e4: aa1403e0 mov x0, x20 38e8: a94153f3 ldp x19, x20, [sp, #16] 38ec: a9425bf5 ldp x21, x22, [sp, #32] 38f0: a94363f7 ldp x23, x24, [sp, #48] 38f4: a8c47bfd ldp x29, x30, [sp], #64 38f8: d65f03c0 ret 38fc: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 3900: 91000000 add x0, x0, #0x0 3904: 9102e000 add x0, x0, #0xb8 3908: 12800001 mov w1, #0xffffffff // #-1 390c: 94000000 bl 0 <lock_is_held_type> lockdep_assert_held(&wq_pool_mutex); 3910: 35fff400 cbnz w0, 3790 <apply_wqattrs_prepare+0x28> 3914: d4210000 brk #0x800 3918: 17ffff9e b 3790 <apply_wqattrs_prepare+0x28> 391c: 52901801 mov w1, #0x80c0 // #32960 3920: 72a02801 movk w1, #0x140, lsl #16 3924: 94000000 bl 0 <kmem_cache_alloc> if (!attrs) 3928: b4fffd80 cbz x0, 38d8 <apply_wqattrs_prepare+0x170> *dst = *src; 392c: 90000002 adrp x2, 0 <__cpu_possible_mask> 3930: f9400042 ldr x2, [x2] 3934: f9000402 str x2, [x0, #8] kfree(attrs); 3938: 94000000 bl 0 <kfree> if (attrs) { 393c: 17ffffe7 b 38d8 <apply_wqattrs_prepare+0x170> 0000000000003940 <apply_workqueue_attrs_locked>: { 3940: a9be7bfd stp x29, x30, [sp, #-32]! 3944: 910003fd mov x29, sp if (WARN_ON(!(wq->flags & WQ_UNBOUND))) 3948: b9420002 ldr w2, [x0, #512] 394c: 36080242 tbz w2, #1, 3994 <apply_workqueue_attrs_locked+0x54> 3950: f9400003 ldr x3, [x0] if (!list_empty(&wq->pwqs)) { 3954: eb03001f cmp x0, x3 3958: 54000260 b.eq 39a4 <apply_workqueue_attrs_locked+0x64> // b.none if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) 395c: 379801c2 tbnz w2, #19, 3994 <apply_workqueue_attrs_locked+0x54> 3960: f9000bf3 str x19, [sp, #16] wq->flags &= ~__WQ_ORDERED; 3964: 120e7842 and w2, w2, #0xfffdffff 3968: b9020002 str w2, [x0, #512] ctx = apply_wqattrs_prepare(wq, attrs); 396c: 97ffff7f bl 3768 <apply_wqattrs_prepare> 3970: aa0003f3 mov x19, x0 if (!ctx) 3974: b40001c0 cbz x0, 39ac <apply_workqueue_attrs_locked+0x6c> apply_wqattrs_commit(ctx); 3978: 97fffcb4 bl 2c48 <apply_wqattrs_commit> apply_wqattrs_cleanup(ctx); 397c: aa1303e0 mov x0, x19 3980: 97ffff66 bl 3718 <apply_wqattrs_cleanup> return 0; 3984: f9400bf3 ldr x19, [sp, #16] 3988: 52800000 mov w0, #0x0 // #0 } 398c: a8c27bfd ldp x29, x30, [sp], #32 3990: d65f03c0 ret if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) 3994: d4210000 brk #0x800 return -EINVAL; 3998: 128002a0 mov w0, #0xffffffea // #-22 } 399c: a8c27bfd ldp x29, x30, [sp], #32 39a0: d65f03c0 ret 39a4: f9000bf3 str x19, [sp, #16] 39a8: 17fffff1 b 396c <apply_workqueue_attrs_locked+0x2c> return -ENOMEM; 39ac: 12800160 mov w0, #0xfffffff4 // #-12 39b0: f9400bf3 ldr x19, [sp, #16] 39b4: 17fffff6 b 398c <apply_workqueue_attrs_locked+0x4c> 00000000000039b8 <wq_numa_store>: { 39b8: d10143ff sub sp, sp, #0x50 39bc: a9017bfd stp x29, x30, [sp, #16] 39c0: 910043fd add x29, sp, #0x10 39c4: a90253f3 stp x19, x20, [sp, #32] 39c8: 90000013 adrp x19, 0 <__stack_chk_guard> 39cc: 91000273 add x19, x19, #0x0 39d0: a9035bf5 stp x21, x22, [sp, #48] mutex_lock(&wq_pool_mutex); 39d4: 90000014 adrp x20, 0 <pwq_activate_delayed_work> 39d8: 91000294 add x20, x20, #0x0 { 39dc: a90463f7 stp x23, x24, [sp, #64] 39e0: aa0203f7 mov x23, x2 39e4: aa0303f8 mov x24, x3 return wq_dev->wq; 39e8: f85f8016 ldur x22, [x0, #-8] { 39ec: f9400260 ldr x0, [x19] 39f0: f90007e0 str x0, [sp, #8] 39f4: d2800000 mov x0, #0x0 // #0 static inline void get_online_cpus(void) { cpus_read_lock(); } 39f8: 94000000 bl 0 <cpus_read_lock> mutex_lock(&wq_pool_mutex); 39fc: aa1403e0 mov x0, x20 3a00: 94000000 bl 0 <_mutex_lock> attrs = wq_sysfs_prep_attrs(wq); 3a04: aa1603e0 mov x0, x22 3a08: 97fff3b8 bl 8e8 <wq_sysfs_prep_attrs> if (!attrs) 3a0c: b4000320 cbz x0, 3a70 <wq_numa_store+0xb8> if (sscanf(buf, "%d", &v) == 1) { 3a10: aa0003f5 mov x21, x0 3a14: 90000001 adrp x1, 0 <pwq_activate_delayed_work> 3a18: aa1703e0 mov x0, x23 3a1c: 910013e2 add x2, sp, #0x4 3a20: 91000021 add x1, x1, #0x0 3a24: 94000000 bl 0 <sscanf> 3a28: 7100041f cmp w0, #0x1 3a2c: 540002c0 b.eq 3a84 <wq_numa_store+0xcc> // b.none mutex_unlock(&wq_pool_mutex); 3a30: aa1403e0 mov x0, x20 3a34: 94000000 bl 0 <_mutex_unlock> static inline void put_online_cpus(void) { cpus_read_unlock(); } 3a38: 94000000 bl 0 <cpus_read_unlock> kfree(attrs); 3a3c: aa1503e0 mov x0, x21 3a40: 94000000 bl 0 <kfree> 3a44: 928002a0 mov x0, #0xffffffffffffffea // #-22 } 3a48: f94007e2 ldr x2, [sp, #8] 3a4c: f9400261 ldr x1, [x19] 3a50: ca010041 eor x1, x2, x1 3a54: b50003a1 cbnz x1, 3ac8 <wq_numa_store+0x110> 3a58: a9417bfd ldp x29, x30, [sp, #16] 3a5c: a94253f3 ldp x19, x20, [sp, #32] 3a60: a9435bf5 ldp x21, x22, [sp, #48] 3a64: a94463f7 ldp x23, x24, [sp, #64] 3a68: 910143ff add sp, sp, #0x50 3a6c: d65f03c0 ret mutex_unlock(&wq_pool_mutex); 3a70: aa1403e0 mov x0, x20 3a74: 94000000 bl 0 <_mutex_unlock> 3a78: 94000000 bl 0 <cpus_read_unlock> 3a7c: 92800160 mov x0, #0xfffffffffffffff4 // #-12 3a80: 17fffff2 b 3a48 <wq_numa_store+0x90> attrs->no_numa = !v; 3a84: b94007e2 ldr w2, [sp, #4] ret = apply_workqueue_attrs_locked(wq, attrs); 3a88: aa1503e1 mov x1, x21 3a8c: aa1603e0 mov x0, x22 attrs->no_numa = !v; 3a90: 7100005f cmp w2, #0x0 3a94: 1a9f17e2 cset w2, eq // eq = none 3a98: 390042a2 strb w2, [x21, #16] ret = apply_workqueue_attrs_locked(wq, attrs); 3a9c: 97ffffa9 bl 3940 <apply_workqueue_attrs_locked> 3aa0: 2a0003f6 mov w22, w0 mutex_unlock(&wq_pool_mutex); 3aa4: aa1403e0 mov x0, x20 3aa8: 94000000 bl 0 <_mutex_unlock> 3aac: 94000000 bl 0 <cpus_read_unlock> kfree(attrs); 3ab0: aa1503e0 mov x0, x21 3ab4: 94000000 bl 0 <kfree> return ret ?: count; 3ab8: 93407ec0 sxtw x0, w22 3abc: 710002df cmp w22, #0x0 3ac0: 9a981000 csel x0, x0, x24, ne // ne = any 3ac4: 17ffffe1 b 3a48 <wq_numa_store+0x90> } 3ac8: 94000000 bl 0 <__stack_chk_fail> 3acc: d503201f nop 0000000000003ad0 <wq_cpumask_store>: { 3ad0: a9bc7bfd stp x29, x30, [sp, #-64]! 3ad4: 910003fd mov x29, sp 3ad8: a90153f3 stp x19, x20, [sp, #16] mutex_lock(&wq_pool_mutex); 3adc: 90000013 adrp x19, 0 <pwq_activate_delayed_work> 3ae0: 91000273 add x19, x19, #0x0 { 3ae4: a9025bf5 stp x21, x22, [sp, #32] 3ae8: aa0203f6 mov x22, x2 3aec: aa0303f5 mov x21, x3 3af0: f9001bf7 str x23, [sp, #48] return wq_dev->wq; 3af4: f85f8017 ldur x23, [x0, #-8] static inline void get_online_cpus(void) { cpus_read_lock(); } 3af8: 94000000 bl 0 <cpus_read_lock> mutex_lock(&wq_pool_mutex); 3afc: aa1303e0 mov x0, x19 3b00: 94000000 bl 0 <_mutex_lock> attrs = wq_sysfs_prep_attrs(wq); 3b04: aa1703e0 mov x0, x23 3b08: 97fff378 bl 8e8 <wq_sysfs_prep_attrs> if (!attrs) 3b0c: b40005a0 cbz x0, 3bc0 <wq_cpumask_store+0xf0> * * Returns -errno, or 0 for success. */ static inline int cpumask_parse(const char *buf, struct cpumask *dstp) { char *nl = strchr(buf, '\n'); 3b10: 52800141 mov w1, #0xa // #10 3b14: aa0003f4 mov x20, x0 3b18: aa1603e0 mov x0, x22 3b1c: 94000000 bl 0 <strchr> unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf); 3b20: 4b160001 sub w1, w0, w22 3b24: b4000460 cbz x0, 3bb0 <wq_cpumask_store+0xe0> } static inline int bitmap_parse(const char *buf, unsigned int buflen, unsigned long *maskp, int nmaskbits) { return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits); 3b28: aa1603e0 mov x0, x22 3b2c: 52800804 mov w4, #0x40 // #64 3b30: 91002283 add x3, x20, #0x8 3b34: 52800002 mov w2, #0x0 // #0 3b38: 94000000 bl 0 <__bitmap_parse> 3b3c: 2a0003f6 mov w22, w0 if (!ret) 3b40: 34000180 cbz w0, 3b70 <wq_cpumask_store+0xa0> mutex_unlock(&wq_pool_mutex); 3b44: aa1303e0 mov x0, x19 3b48: 94000000 bl 0 <_mutex_unlock> static inline void put_online_cpus(void) { cpus_read_unlock(); } 3b4c: 94000000 bl 0 <cpus_read_unlock> kfree(attrs); 3b50: aa1403e0 mov x0, x20 3b54: 94000000 bl 0 <kfree> return ret ?: count; 3b58: 93407ec0 sxtw x0, w22 } 3b5c: a94153f3 ldp x19, x20, [sp, #16] 3b60: a9425bf5 ldp x21, x22, [sp, #32] 3b64: f9401bf7 ldr x23, [sp, #48] 3b68: a8c47bfd ldp x29, x30, [sp], #64 3b6c: d65f03c0 ret ret = apply_workqueue_attrs_locked(wq, attrs); 3b70: aa1403e1 mov x1, x20 3b74: aa1703e0 mov x0, x23 3b78: 97ffff72 bl 3940 <apply_workqueue_attrs_locked> 3b7c: 2a0003f6 mov w22, w0 mutex_unlock(&wq_pool_mutex); 3b80: aa1303e0 mov x0, x19 3b84: 94000000 bl 0 <_mutex_unlock> 3b88: 94000000 bl 0 <cpus_read_unlock> kfree(attrs); 3b8c: aa1403e0 mov x0, x20 3b90: 94000000 bl 0 <kfree> return ret ?: count; 3b94: aa1503e0 mov x0, x21 3b98: 35fffe16 cbnz w22, 3b58 <wq_cpumask_store+0x88> } 3b9c: a94153f3 ldp x19, x20, [sp, #16] 3ba0: a9425bf5 ldp x21, x22, [sp, #32] 3ba4: f9401bf7 ldr x23, [sp, #48] 3ba8: a8c47bfd ldp x29, x30, [sp], #64 3bac: d65f03c0 ret 3bb0: aa1603e0 mov x0, x22 3bb4: 94000000 bl 0 <strlen> 3bb8: 2a0003e1 mov w1, w0 3bbc: 17ffffdb b 3b28 <wq_cpumask_store+0x58> mutex_unlock(&wq_pool_mutex); 3bc0: aa1303e0 mov x0, x19 3bc4: 94000000 bl 0 <_mutex_unlock> 3bc8: 94000000 bl 0 <cpus_read_unlock> 3bcc: 92800160 mov x0, #0xfffffffffffffff4 // #-12 } 3bd0: a94153f3 ldp x19, x20, [sp, #16] 3bd4: a9425bf5 ldp x21, x22, [sp, #32] 3bd8: f9401bf7 ldr x23, [sp, #48] 3bdc: a8c47bfd ldp x29, x30, [sp], #64 3be0: d65f03c0 ret 3be4: d503201f nop 0000000000003be8 <wq_nice_store>: { 3be8: a9bc7bfd stp x29, x30, [sp, #-64]! 3bec: 910003fd mov x29, sp 3bf0: a90153f3 stp x19, x20, [sp, #16] mutex_lock(&wq_pool_mutex); 3bf4: 90000013 adrp x19, 0 <pwq_activate_delayed_work> 3bf8: 91000273 add x19, x19, #0x0 { 3bfc: a9025bf5 stp x21, x22, [sp, #32] 3c00: aa0303f5 mov x21, x3 3c04: f9001bf7 str x23, [sp, #48] 3c08: aa0203f7 mov x23, x2 return wq_dev->wq; 3c0c: f85f8016 ldur x22, [x0, #-8] static inline void get_online_cpus(void) { cpus_read_lock(); } 3c10: 94000000 bl 0 <cpus_read_lock> mutex_lock(&wq_pool_mutex); 3c14: aa1303e0 mov x0, x19 3c18: 94000000 bl 0 <_mutex_lock> attrs = wq_sysfs_prep_attrs(wq); 3c1c: aa1603e0 mov x0, x22 3c20: 97fff332 bl 8e8 <wq_sysfs_prep_attrs> if (!attrs) 3c24: b40004a0 cbz x0, 3cb8 <wq_nice_store+0xd0> if (sscanf(buf, "%d", &attrs->nice) == 1 && 3c28: aa0003f4 mov x20, x0 3c2c: 90000001 adrp x1, 0 <pwq_activate_delayed_work> 3c30: aa1703e0 mov x0, x23 3c34: aa1403e2 mov x2, x20 3c38: 91000021 add x1, x1, #0x0 3c3c: 94000000 bl 0 <sscanf> 3c40: 7100041f cmp w0, #0x1 3c44: 54000180 b.eq 3c74 <wq_nice_store+0x8c> // b.none mutex_unlock(&wq_pool_mutex); 3c48: aa1303e0 mov x0, x19 3c4c: 94000000 bl 0 <_mutex_unlock> static inline void put_online_cpus(void) { cpus_read_unlock(); } 3c50: 94000000 bl 0 <cpus_read_unlock> kfree(attrs); 3c54: aa1403e0 mov x0, x20 3c58: 94000000 bl 0 <kfree> 3c5c: 928002a0 mov x0, #0xffffffffffffffea // #-22 } 3c60: a94153f3 ldp x19, x20, [sp, #16] 3c64: a9425bf5 ldp x21, x22, [sp, #32] 3c68: f9401bf7 ldr x23, [sp, #48] 3c6c: a8c47bfd ldp x29, x30, [sp], #64 3c70: d65f03c0 ret attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE) 3c74: b9400280 ldr w0, [x20] 3c78: 11005000 add w0, w0, #0x14 3c7c: 71009c1f cmp w0, #0x27 3c80: 54fffe48 b.hi 3c48 <wq_nice_store+0x60> // b.pmore ret = apply_workqueue_attrs_locked(wq, attrs); 3c84: aa1403e1 mov x1, x20 3c88: aa1603e0 mov x0, x22 3c8c: 97ffff2d bl 3940 <apply_workqueue_attrs_locked> 3c90: 2a0003f6 mov w22, w0 mutex_unlock(&wq_pool_mutex); 3c94: aa1303e0 mov x0, x19 3c98: 94000000 bl 0 <_mutex_unlock> 3c9c: 94000000 bl 0 <cpus_read_unlock> kfree(attrs); 3ca0: aa1403e0 mov x0, x20 3ca4: 94000000 bl 0 <kfree> return ret ?: count; 3ca8: aa1503e0 mov x0, x21 3cac: 34fffdb6 cbz w22, 3c60 <wq_nice_store+0x78> 3cb0: 93407ec0 sxtw x0, w22 3cb4: 17ffffeb b 3c60 <wq_nice_store+0x78> mutex_unlock(&wq_pool_mutex); 3cb8: aa1303e0 mov x0, x19 3cbc: 94000000 bl 0 <_mutex_unlock> 3cc0: 94000000 bl 0 <cpus_read_unlock> 3cc4: 92800160 mov x0, #0xfffffffffffffff4 // #-12 } 3cc8: a94153f3 ldp x19, x20, [sp, #16] 3ccc: a9425bf5 ldp x21, x22, [sp, #32] 3cd0: f9401bf7 ldr x23, [sp, #48] 3cd4: a8c47bfd ldp x29, x30, [sp], #64 3cd8: d65f03c0 ret 3cdc: d503201f nop 0000000000003ce0 <wq_update_unbound_numa>: lockdep_assert_held(&wq_pool_mutex); 3ce0: 90000000 adrp x0, 0 <debug_locks> 3ce4: b9400000 ldr w0, [x0] 3ce8: 35000040 cbnz w0, 3cf0 <wq_update_unbound_numa+0x10> 3cec: d65f03c0 ret { 3cf0: a9bf7bfd stp x29, x30, [sp, #-16]! 3cf4: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 3cf8: 91000000 add x0, x0, #0x0 3cfc: 910003fd mov x29, sp 3d00: 9102e000 add x0, x0, #0xb8 3d04: 12800001 mov w1, #0xffffffff // #-1 3d08: 94000000 bl 0 <lock_is_held_type> lockdep_assert_held(&wq_pool_mutex); 3d0c: 35000040 cbnz w0, 3d14 <wq_update_unbound_numa+0x34> 3d10: d4210000 brk #0x800 } 3d14: a8c17bfd ldp x29, x30, [sp], #16 3d18: d65f03c0 ret 3d1c: d503201f nop 0000000000003d20 <pwq_dec_nr_in_flight>: { 3d20: a9bd7bfd stp x29, x30, [sp, #-48]! if (color == WORK_NO_COLOR) 3d24: 71003c3f cmp w1, #0xf { 3d28: 910003fd mov x29, sp 3d2c: f9000bf3 str x19, [sp, #16] 3d30: aa0003f3 mov x19, x0 if (color == WORK_NO_COLOR) 3d34: 54000240 b.eq 3d7c <pwq_dec_nr_in_flight+0x5c> // b.none pwq->nr_in_flight[color]--; 3d38: 8b21c809 add x9, x0, w1, sxtw #2 3d3c: aa0003e2 mov x2, x0 3d40: b9401d20 ldr w0, [x9, #28] 3d44: 51000400 sub w0, w0, #0x1 3d48: b9001d20 str w0, [x9, #28] pwq->nr_active--; 3d4c: b9405a60 ldr w0, [x19, #88] 3d50: 51000400 sub w0, w0, #0x1 3d54: b9005a60 str w0, [x19, #88] 3d58: f8460c43 ldr x3, [x2, #96]! if (!list_empty(&pwq->delayed_works)) { 3d5c: eb03005f cmp x2, x3 3d60: 54000080 b.eq 3d70 <pwq_dec_nr_in_flight+0x50> // b.none if (pwq->nr_active < pwq->max_active) 3d64: b9405e62 ldr w2, [x19, #92] 3d68: 6b02001f cmp w0, w2 3d6c: 5400012b b.lt 3d90 <pwq_dec_nr_in_flight+0x70> // b.tstop if (likely(pwq->flush_color != color)) 3d70: b9401660 ldr w0, [x19, #20] 3d74: 6b01001f cmp w0, w1 3d78: 54000180 b.eq 3da8 <pwq_dec_nr_in_flight+0x88> // b.none put_pwq(pwq); 3d7c: aa1303e0 mov x0, x19 3d80: 97fffdce bl 34b8 <put_pwq> } 3d84: f9400bf3 ldr x19, [sp, #16] 3d88: a8c37bfd ldp x29, x30, [sp], #48 3d8c: d65f03c0 ret struct work_struct *work = list_first_entry(&pwq->delayed_works, 3d90: f9403260 ldr x0, [x19, #96] 3d94: b9002fe1 str w1, [sp, #44] pwq_activate_delayed_work(work); 3d98: d1002000 sub x0, x0, #0x8 3d9c: 97fff099 bl 0 <pwq_activate_delayed_work> 3da0: b9402fe1 ldr w1, [sp, #44] 3da4: 17fffff3 b 3d70 <pwq_dec_nr_in_flight+0x50> if (pwq->nr_in_flight[color]) 3da8: b9401d20 ldr w0, [x9, #28] 3dac: 35fffe80 cbnz w0, 3d7c <pwq_dec_nr_in_flight+0x5c> pwq->flush_color = -1; 3db0: 12800000 mov w0, #0xffffffff // #-1 3db4: b9001660 str w0, [x19, #20] if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush)) 3db8: f9400660 ldr x0, [x19, #8] 3dbc: 91044000 add x0, x0, #0x110 3dc0: f9800011 prfm pstl1strm, [x0] 3dc4: 885f7c01 ldxr w1, [x0] 3dc8: 51000421 sub w1, w1, #0x1 3dcc: 8802fc01 stlxr w2, w1, [x0] 3dd0: 35ffffa2 cbnz w2, 3dc4 <pwq_dec_nr_in_flight+0xa4> 3dd4: d5033bbf dmb ish 3dd8: 35fffd21 cbnz w1, 3d7c <pwq_dec_nr_in_flight+0x5c> complete(&pwq->wq->first_flusher->done); 3ddc: f9400660 ldr x0, [x19, #8] 3de0: f9408c00 ldr x0, [x0, #280] 3de4: 91006000 add x0, x0, #0x18 3de8: 94000000 bl 0 <complete> 3dec: 17ffffe4 b 3d7c <pwq_dec_nr_in_flight+0x5c> 0000000000003df0 <process_one_work>: { 3df0: d10243ff sub sp, sp, #0x90 3df4: aa0103e4 mov x4, x1 3df8: a9047bfd stp x29, x30, [sp, #64] 3dfc: 910103fd add x29, sp, #0x40 3e00: a9065bf5 stp x21, x22, [sp, #96] 3e04: 90000016 adrp x22, 0 <__stack_chk_guard> 3e08: 910002d6 add x22, x22, #0x0 3e0c: a90553f3 stp x19, x20, [sp, #80] 3e10: aa0103f3 mov x19, x1 3e14: f94002c1 ldr x1, [x22] 3e18: f9001fe1 str x1, [sp, #56] 3e1c: d2800001 mov x1, #0x0 // #0 3e20: a90763f7 stp x23, x24, [sp, #112] 3e24: aa0003f4 mov x20, x0 3e28: f8420480 ldr x0, [x4], #32 struct worker_pool *pool = worker->pool; 3e2c: f9402698 ldr x24, [x20, #72] return (void *)(data & WORK_STRUCT_WQ_DATA_MASK); 3e30: 9278dc15 and x21, x0, #0xffffffffffffff00 3e34: f27e001f tst x0, #0x4 3e38: 9a9f12b5 csel x21, x21, xzr, ne // ne = any *to = *from; 3e3c: a9420e62 ldp x2, x3, [x19, #32] bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE; 3e40: f94006a5 ldr x5, [x21, #8] 3e44: a9008fe2 stp x2, x3, [sp, #8] WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && 3e48: b940ff02 ldr w2, [x24, #252] 3e4c: a9410480 ldp x0, x1, [x4, #16] 3e50: a90187e0 stp x0, x1, [sp, #24] bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE; 3e54: b94200b7 ldr w23, [x5, #512] 3e58: a9420480 ldp x0, x1, [x4, #32] to->class_cache[i] = NULL; 3e5c: a9017fff stp xzr, xzr, [sp, #16] 3e60: 121b02f7 and w23, w23, #0x20 *to = *from; 3e64: a90287e0 stp x0, x1, [sp, #40] WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) && 3e68: 37100122 tbnz w2, #2, 3e8c <process_one_work+0x9c> 3e6c: 90000000 adrp x0, 0 <cpu_number> 3e70: b940f301 ldr w1, [x24, #240] 3e74: 91000000 add x0, x0, #0x0 3e78: d538d082 mrs x2, tpidr_el1 3e7c: b8626800 ldr w0, [x0, x2] 3e80: 6b01001f cmp w0, w1 3e84: 54000040 b.eq 3e8c <process_one_work+0x9c> // b.none 3e88: d4210000 brk #0x800 3e8c: d2907d62 mov x2, #0x83eb // #33771 3e90: f2b016a2 movk x2, #0x80b5, lsl #16 3e94: f2d0c8c2 movk x2, #0x8646, lsl #32 3e98: f2ec3902 movk x2, #0x61c8, lsl #48 3e9c: 9b027e62 mul x2, x19, x2 3ea0: d37afc42 lsr x2, x2, #58 hash_for_each_possible(pool->busy_hash, worker, hentry, 3ea4: 8b020f00 add x0, x24, x2, lsl #3 3ea8: f940f804 ldr x4, [x0, #496] 3eac: b4001384 cbz x4, 411c <process_one_work+0x32c> 3eb0: aa0403e0 mov x0, x4 3eb4: 14000003 b 3ec0 <process_one_work+0xd0> 3eb8: f9400000 ldr x0, [x0] 3ebc: b40004c0 cbz x0, 3f54 <process_one_work+0x164> if (worker->current_work == work && 3ec0: f9400801 ldr x1, [x0, #16] 3ec4: eb01027f cmp x19, x1 3ec8: 54ffff81 b.ne 3eb8 <process_one_work+0xc8> // b.any 3ecc: f9400c03 ldr x3, [x0, #24] 3ed0: f9400e61 ldr x1, [x19, #24] 3ed4: eb01007f cmp x3, x1 3ed8: 54ffff01 b.ne 3eb8 <process_one_work+0xc8> // b.any list_for_each_entry_safe_from(work, n, NULL, entry) { 3edc: aa1303e1 mov x1, x19 move_linked_works(work, &collision->scheduled, NULL); 3ee0: 9100c006 add x6, x0, #0x30 list_for_each_entry_safe_from(work, n, NULL, entry) { 3ee4: f8408c23 ldr x3, [x1, #8]! 3ee8: d1002062 sub x2, x3, #0x8 3eec: b4001041 cbz x1, 40f4 <process_one_work+0x304> __list_del(entry->prev, entry->next); 3ef0: f9400a64 ldr x4, [x19, #16] next->prev = prev; 3ef4: f9000464 str x4, [x3, #8] case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 3ef8: f9000083 str x3, [x4] __list_add(new, head->prev, head); 3efc: f9401c03 ldr x3, [x0, #56] next->prev = new; 3f00: f9001c01 str x1, [x0, #56] new->prev = prev; 3f04: a9008e66 stp x6, x3, [x19, #8] 3f08: f9000061 str x1, [x3] if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) 3f0c: f9400261 ldr x1, [x19] 3f10: 37180181 tbnz w1, #3, 3f40 <process_one_work+0x150> 3f14: 14000078 b 40f4 <process_one_work+0x304> __list_del(entry->prev, entry->next); 3f18: f9400844 ldr x4, [x2, #16] next->prev = prev; 3f1c: f9000464 str x4, [x3, #8] 3f20: f9000083 str x3, [x4] __list_add(new, head->prev, head); 3f24: f9401c03 ldr x3, [x0, #56] next->prev = new; 3f28: f9001c01 str x1, [x0, #56] new->prev = prev; 3f2c: a9008c46 stp x6, x3, [x2, #8] 3f30: f9000061 str x1, [x3] 3f34: f9400041 ldr x1, [x2] list_for_each_entry_safe_from(work, n, NULL, entry) { 3f38: aa0503e2 mov x2, x5 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) 3f3c: 36180dc1 tbz w1, #3, 40f4 <process_one_work+0x304> list_for_each_entry_safe_from(work, n, NULL, entry) { 3f40: aa0203e1 mov x1, x2 3f44: f8408c23 ldr x3, [x1, #8]! 3f48: d1002065 sub x5, x3, #0x8 3f4c: b5fffe61 cbnz x1, 3f18 <process_one_work+0x128> 3f50: 14000069 b 40f4 <process_one_work+0x304> hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); 3f54: 9100f842 add x2, x2, #0x3e 3f58: f90043f9 str x25, [sp, #128] n->next = first; 3f5c: f9000284 str x4, [x20] 3f60: 8b020f02 add x2, x24, x2, lsl #3 first->pprev = &n->next; 3f64: f9000494 str x20, [x4, #8] 3f68: f9000054 str x20, [x2] return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) & 3f6c: aa1303e0 mov x0, x19 worker->current_work = work; 3f70: a900ce82 stp x2, x19, [x20, #8] worker->current_func = work->func; 3f74: f9400e61 ldr x1, [x19, #24] worker->current_pwq = pwq; 3f78: a901d681 stp x1, x21, [x20, #24] __list_del(entry->prev, entry->next); 3f7c: a9408662 ldp x2, x1, [x19, #8] return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) & 3f80: f8408419 ldr x25, [x0], #8 next->prev = prev; 3f84: f9000441 str x1, [x2, #8] 3f88: f9000022 str x2, [x1] 3f8c: 53041f39 ubfx w25, w25, #4, #4 3f90: f9000660 str x0, [x19, #8] list->prev = list; 3f94: f9000a60 str x0, [x19, #16] if (unlikely(cpu_intensive)) 3f98: 35000d17 cbnz w23, 4138 <process_one_work+0x348> __READ_ONCE_SIZE; 3f9c: f9408700 ldr x0, [x24, #264] return !list_empty(&pool->worklist) && __need_more_worker(pool); 3fa0: 91042301 add x1, x24, #0x108 3fa4: eb00003f cmp x1, x0 3fa8: 540000a0 b.eq 3fbc <process_one_work+0x1cc> // b.none 3fac: b9458300 ldr w0, [x24, #1408] 3fb0: 35000060 cbnz w0, 3fbc <process_one_work+0x1cc> wake_up_worker(pool); 3fb4: aa1803e0 mov x0, x24 3fb8: 97fff11c bl 428 <wake_up_worker> set_work_pool_and_clear_pending(work, pool->id); 3fbc: b940fb00 ldr w0, [x24, #248] smp_wmb(); 3fc0: d5033abf dmb ishst 3fc4: f9400261 ldr x1, [x19] set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); 3fc8: 937b7c00 sbfiz x0, x0, #5, #32 WARN_ON_ONCE(!work_pending(work)); 3fcc: 36000b21 tbz w1, #0, 4130 <process_one_work+0x340> case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 3fd0: f9000260 str x0, [x19] smp_mb(); 3fd4: d5033bbf dmb ish spin_unlock_irq(&pool->lock); 3fd8: aa1803e0 mov x0, x24 3fdc: 94000000 bl 0 <rt_spin_unlock> lock_map_acquire(&pwq->wq->lockdep_map); 3fe0: f94006a0 ldr x0, [x21, #8] 3fe4: d2800005 mov x5, #0x0 // #0 3fe8: 52800024 mov w4, #0x1 // #1 3fec: 52800003 mov w3, #0x0 // #0 3ff0: 52800002 mov w2, #0x0 // #0 3ff4: 52800001 mov w1, #0x0 // #0 3ff8: 9105e000 add x0, x0, #0x178 3ffc: 90000006 adrp x6, 0 <pwq_activate_delayed_work> 4000: 910000c6 add x6, x6, #0x0 4004: 94000000 bl 0 <lock_acquire> lock_map_acquire(&lockdep_map); 4008: d2800005 mov x5, #0x0 // #0 400c: 52800024 mov w4, #0x1 // #1 4010: 52800003 mov w3, #0x0 // #0 4014: 52800002 mov w2, #0x0 // #0 4018: 910023e0 add x0, sp, #0x8 401c: 52800001 mov w1, #0x0 // #0 4020: 90000006 adrp x6, 0 <pwq_activate_delayed_work> 4024: 910000c6 add x6, x6, #0x0 4028: 94000000 bl 0 <lock_acquire> worker->current_func(work); 402c: f9400e81 ldr x1, [x20, #24] 4030: aa1303e0 mov x0, x19 4034: d63f0020 blr x1 lock_map_release(&lockdep_map); 4038: 910023e0 add x0, sp, #0x8 403c: 52800021 mov w1, #0x1 // #1 4040: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 4044: 91000042 add x2, x2, #0x0 4048: 94000000 bl 0 <lock_release> lock_map_release(&pwq->wq->lockdep_map); 404c: f94006a0 ldr x0, [x21, #8] 4050: 52800021 mov w1, #0x1 // #1 4054: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 4058: 91000042 add x2, x2, #0x0 405c: 9105e000 add x0, x0, #0x178 4060: 94000000 bl 0 <lock_release> 4064: d5384100 mrs x0, sp_el0 __READ_ONCE_SIZE; 4068: b9401001 ldr w1, [x0, #16] if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 406c: 35000ba1 cbnz w1, 41e0 <process_one_work+0x3f0> 4070: 90000001 adrp x1, 0 <debug_locks> 4074: b9400021 ldr w1, [x1] 4078: 35000ae1 cbnz w1, 41d4 <process_one_work+0x3e4> cond_resched_rcu_qs(); 407c: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 4080: 91000000 add x0, x0, #0x0 4084: 528113a1 mov w1, #0x89d // #2205 4088: 52800002 mov w2, #0x0 // #0 408c: 94000000 bl 0 <___might_sleep> 4090: 94000000 bl 0 <rcu_all_qs> 4094: d5384100 mrs x0, sp_el0 4098: 3951c001 ldrb w1, [x0, #1136] 409c: 72001c3f tst w1, #0xff 40a0: 54000040 b.eq 40a8 <process_one_work+0x2b8> // b.none case 1: *(volatile __u8 *)p = *(__u8 *)res; break; 40a4: 3911c01f strb wzr, [x0, #1136] spin_lock_irq(&pool->lock); 40a8: aa1803e0 mov x0, x24 40ac: 94000000 bl 0 <rt_spin_lock> if (unlikely(cpu_intensive)) 40b0: 35000697 cbnz w23, 4180 <process_one_work+0x390> return !h->pprev; 40b4: f9400680 ldr x0, [x20, #8] worker->last_func = worker->current_func; 40b8: f9400e81 ldr x1, [x20, #24] 40bc: f9004e81 str x1, [x20, #152] if (!hlist_unhashed(n)) { 40c0: b40000c0 cbz x0, 40d8 <process_one_work+0x2e8> struct hlist_node *next = n->next; 40c4: f9400281 ldr x1, [x20] case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 40c8: f9000001 str x1, [x0] if (next) 40cc: b4000041 cbz x1, 40d4 <process_one_work+0x2e4> next->pprev = pprev; 40d0: f9000420 str x0, [x1, #8] h->pprev = NULL; 40d4: a9007e9f stp xzr, xzr, [x20] pwq_dec_nr_in_flight(pwq, work_color); 40d8: 2a1903e1 mov w1, w25 worker->current_func = NULL; 40dc: a9017e9f stp xzr, xzr, [x20, #16] pwq_dec_nr_in_flight(pwq, work_color); 40e0: aa1503e0 mov x0, x21 worker->current_pwq = NULL; 40e4: f900129f str xzr, [x20, #32] worker->desc_valid = false; 40e8: 3900a29f strb wzr, [x20, #40] pwq_dec_nr_in_flight(pwq, work_color); 40ec: 97ffff0d bl 3d20 <pwq_dec_nr_in_flight> 40f0: f94043f9 ldr x25, [sp, #128] } 40f4: f9401fe1 ldr x1, [sp, #56] 40f8: f94002c0 ldr x0, [x22] 40fc: ca000020 eor x0, x1, x0 4100: b5000920 cbnz x0, 4224 <process_one_work+0x434> 4104: a9447bfd ldp x29, x30, [sp, #64] 4108: a94553f3 ldp x19, x20, [sp, #80] 410c: a9465bf5 ldp x21, x22, [sp, #96] 4110: a94763f7 ldp x23, x24, [sp, #112] 4114: 910243ff add sp, sp, #0x90 4118: d65f03c0 ret hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); 411c: 9100f842 add x2, x2, #0x3e 4120: f90043f9 str x25, [sp, #128] n->next = first; 4124: f900029f str xzr, [x20] 4128: 8b020f02 add x2, x24, x2, lsl #3 if (first) 412c: 17ffff8f b 3f68 <process_one_work+0x178> WARN_ON_ONCE(!work_pending(work)); 4130: d4210000 brk #0x800 4134: 17ffffa7 b 3fd0 <process_one_work+0x1e0> struct worker_pool *pool = worker->pool; 4138: a9440682 ldp x2, x1, [x20, #64] 413c: d5384100 mrs x0, sp_el0 WARN_ON_ONCE(worker->task != current); 4140: eb00005f cmp x2, x0 4144: 54000661 b.ne 4210 <process_one_work+0x420> // b.any !(worker->flags & WORKER_NOT_RUNNING)) { 4148: b9406a80 ldr w0, [x20, #104] 414c: 52803902 mov w2, #0x1c8 // #456 if ((flags & WORKER_NOT_RUNNING) && 4150: 6a02001f tst w0, w2 4154: 54000101 b.ne 4174 <process_one_work+0x384> // b.any 4158: 91160021 add x1, x1, #0x580 415c: f9800031 prfm pstl1strm, [x1] 4160: 885f7c20 ldxr w0, [x1] 4164: 51000400 sub w0, w0, #0x1 4168: 88027c20 stxr w2, w0, [x1] 416c: 35ffffa2 cbnz w2, 4160 <process_one_work+0x370> 4170: b9406a80 ldr w0, [x20, #104] worker->flags |= flags; 4174: 321a0000 orr w0, w0, #0x40 4178: b9006a80 str w0, [x20, #104] 417c: 17ffff88 b 3f9c <process_one_work+0x1ac> struct worker_pool *pool = worker->pool; 4180: a9440e82 ldp x2, x3, [x20, #64] 4184: d5384101 mrs x1, sp_el0 unsigned int oflags = worker->flags; 4188: b9406a80 ldr w0, [x20, #104] WARN_ON_ONCE(worker->task != current); 418c: eb01005f cmp x2, x1 4190: 2a0003e1 mov w1, w0 4194: 54000421 b.ne 4218 <process_one_work+0x428> // b.any worker->flags &= ~flags; 4198: 12197822 and w2, w1, #0xffffffbf 419c: b9006a82 str w2, [x20, #104] if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) 41a0: 52803902 mov w2, #0x1c8 // #456 41a4: 6a02001f tst w0, w2 41a8: 54fff860 b.eq 40b4 <process_one_work+0x2c4> // b.none if (!(worker->flags & WORKER_NOT_RUNNING)) 41ac: 52803100 mov w0, #0x188 // #392 41b0: 6a00003f tst w1, w0 41b4: 54fff801 b.ne 40b4 <process_one_work+0x2c4> // b.any ATOMIC_OPS(add, add) 41b8: 91160062 add x2, x3, #0x580 41bc: f9800051 prfm pstl1strm, [x2] 41c0: 885f7c40 ldxr w0, [x2] 41c4: 11000400 add w0, w0, #0x1 41c8: 88017c40 stxr w1, w0, [x2] 41cc: 35ffffa1 cbnz w1, 41c0 <process_one_work+0x3d0> 41d0: 17ffffb9 b 40b4 <process_one_work+0x2c4> if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 41d4: b94ab000 ldr w0, [x0, #2736] 41d8: 7100001f cmp w0, #0x0 41dc: 54fff50d b.le 407c <process_one_work+0x28c> 41e0: d5384113 mrs x19, sp_el0 __READ_ONCE_SIZE; 41e4: b9401262 ldr w2, [x19, #16] pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n" 41e8: 911e6261 add x1, x19, #0x798 41ec: b945aa63 ldr w3, [x19, #1448] 41f0: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 41f4: f9400e84 ldr x4, [x20, #24] 41f8: 91000000 add x0, x0, #0x0 41fc: 94000000 bl 0 <printk> debug_show_held_locks(current); 4200: aa1303e0 mov x0, x19 4204: 94000000 bl 0 <debug_show_held_locks> dump_stack(); 4208: 94000000 bl 0 <dump_stack> 420c: 17ffff9c b 407c <process_one_work+0x28c> WARN_ON_ONCE(worker->task != current); 4210: d4210000 brk #0x800 4214: 17ffffcd b 4148 <process_one_work+0x358> WARN_ON_ONCE(worker->task != current); 4218: d4210000 brk #0x800 421c: b9406a81 ldr w1, [x20, #104] 4220: 17ffffde b 4198 <process_one_work+0x3a8> 4224: f90043f9 str x25, [sp, #128] } 4228: 94000000 bl 0 <__stack_chk_fail> 422c: d503201f nop 0000000000004230 <worker_thread>: { 4230: a9bb7bfd stp x29, x30, [sp, #-80]! 4234: 910003fd mov x29, sp 4238: a90153f3 stp x19, x20, [sp, #16] 423c: aa0003f3 mov x19, x0 4240: a9025bf5 stp x21, x22, [sp, #32] 4244: a90363f7 stp x23, x24, [sp, #48] 4248: 90000018 adrp x24, 0 <pwq_activate_delayed_work> 424c: 91000318 add x24, x24, #0x0 struct worker_pool *pool = worker->pool; 4250: a9445401 ldp x1, x21, [x0, #64] wake_up(&wq_manager_wait); 4254: 91040318 add x24, x24, #0x100 worker->task->flags |= PF_WQ_WORKER; 4258: b9403420 ldr w0, [x1, #52] 425c: 321b0000 orr w0, w0, #0x20 4260: b9003420 str w0, [x1, #52] 4264: 14000028 b 4304 <worker_thread+0xd4> process_one_work(worker, work); 4268: aa1303e0 mov x0, x19 426c: 97fffee1 bl 3df0 <process_one_work> 4270: f9401a60 ldr x0, [x19, #48] if (unlikely(!list_empty(&worker->scheduled))) 4274: eb00029f cmp x20, x0 4278: 54001181 b.ne 44a8 <worker_thread+0x278> // b.any 427c: f94086a0 ldr x0, [x21, #264] return !list_empty(&pool->worklist) && 4280: eb0002ff cmp x23, x0 4284: 54000080 b.eq 4294 <worker_thread+0x64> // b.none 4288: b94582a0 ldr w0, [x21, #1408] 428c: 7100041f cmp w0, #0x1 4290: 54000b4d b.le 43f8 <worker_thread+0x1c8> struct worker_pool *pool = worker->pool; 4294: a9440662 ldp x2, x1, [x19, #64] 4298: d5384100 mrs x0, sp_el0 WARN_ON_ONCE(worker->task != current); 429c: eb00005f cmp x2, x0 42a0: 54001f21 b.ne 4684 <worker_thread+0x454> // b.any !(worker->flags & WORKER_NOT_RUNNING)) { 42a4: b9406a60 ldr w0, [x19, #104] 42a8: 52803902 mov w2, #0x1c8 // #456 if ((flags & WORKER_NOT_RUNNING) && 42ac: 6a02001f tst w0, w2 42b0: 54000101 b.ne 42d0 <worker_thread+0xa0> // b.any ATOMIC_OPS(sub, sub) 42b4: 91160021 add x1, x1, #0x580 42b8: f9800031 prfm pstl1strm, [x1] 42bc: 885f7c20 ldxr w0, [x1] 42c0: 51000400 sub w0, w0, #0x1 42c4: 88027c20 stxr w2, w0, [x1] 42c8: 35ffffa2 cbnz w2, 42bc <worker_thread+0x8c> 42cc: b9406a60 ldr w0, [x19, #104] worker->flags |= flags; 42d0: 321d0000 orr w0, w0, #0x8 42d4: b9006a60 str w0, [x19, #104] worker_enter_idle(worker); 42d8: aa1303e0 mov x0, x19 42dc: 97ffef87 bl f8 <worker_enter_idle> spin_unlock_irq(&pool->lock); 42e0: aa1503e0 mov x0, x21 __set_current_state(TASK_IDLE); 42e4: 90000001 adrp x1, 0 <pwq_activate_delayed_work> 42e8: 91000021 add x1, x1, #0x0 42ec: d2808043 mov x3, #0x402 // #1026 42f0: d5384102 mrs x2, sp_el0 42f4: f9000c43 str x3, [x2, #24] 42f8: f90b8841 str x1, [x2, #5904] spin_unlock_irq(&pool->lock); 42fc: 94000000 bl 0 <rt_spin_unlock> schedule(); 4300: 94000000 bl 0 <schedule> spin_lock_irq(&pool->lock); 4304: aa1503e0 mov x0, x21 4308: 94000000 bl 0 <rt_spin_lock> if (unlikely(worker->flags & WORKER_DIE)) { 430c: b9406a60 ldr w0, [x19, #104] 4310: 37080e40 tbnz w0, #1, 44d8 <worker_thread+0x2a8> struct worker_pool *pool = worker->pool; 4314: f9402662 ldr x2, [x19, #72] if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE))) 4318: 36101b20 tbz w0, #2, 467c <worker_thread+0x44c> WARN_ON_ONCE(worker->task != current); 431c: f9402263 ldr x3, [x19, #64] 4320: d5384101 mrs x1, sp_el0 4324: eb01007f cmp x3, x1 4328: 54001be1 b.ne 46a4 <worker_thread+0x474> // b.any worker->flags &= ~flags; 432c: 121d7800 and w0, w0, #0xfffffffb 4330: b9006a60 str w0, [x19, #104] preempt_disable(); 4334: 52800020 mov w0, #0x1 // #1 pool->nr_idle--; 4338: b9411c41 ldr w1, [x2, #284] 433c: 51000421 sub w1, w1, #0x1 4340: b9011c41 str w1, [x2, #284] preempt_disable(); 4344: 94000000 bl 0 <preempt_count_add> __list_del(entry->prev, entry->next); 4348: a9400261 ldp x1, x0, [x19] next->prev = prev; 434c: f9000420 str x0, [x1, #8] case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 4350: f9000001 str x1, [x0] 4354: f9000273 str x19, [x19] list->prev = list; 4358: f9000673 str x19, [x19, #8] preempt_enable(); 435c: 52800020 mov w0, #0x1 // #1 4360: 94000000 bl 0 <preempt_count_sub> 4364: d5384100 mrs x0, sp_el0 __READ_ONCE_SIZE; 4368: b9401001 ldr w1, [x0, #16] 436c: 34001901 cbz w1, 468c <worker_thread+0x45c> 4370: f94086a0 ldr x0, [x21, #264] return !list_empty(&pool->worklist) && __need_more_worker(pool); 4374: 910422b7 add x23, x21, #0x108 4378: eb0002ff cmp x23, x0 437c: 54fffae0 b.eq 42d8 <worker_thread+0xa8> // b.none 4380: b94582a0 ldr w0, [x21, #1408] 4384: 35fffaa0 cbnz w0, 42d8 <worker_thread+0xa8> if (unlikely(!may_start_working(pool)) && manage_workers(worker)) 4388: b9411ea0 ldr w0, [x21, #284] 438c: 90000016 adrp x22, 0 <jiffies> 4390: 910002d6 add x22, x22, #0x0 4394: 34000da0 cbz w0, 4548 <worker_thread+0x318> 4398: f9401a60 ldr x0, [x19, #48] WARN_ON_ONCE(!list_empty(&worker->scheduled)); 439c: 9100c274 add x20, x19, #0x30 43a0: eb00029f cmp x20, x0 43a4: 54001561 b.ne 4650 <worker_thread+0x420> // b.any struct worker_pool *pool = worker->pool; 43a8: a9440e62 ldp x2, x3, [x19, #64] 43ac: d5384100 mrs x0, sp_el0 unsigned int oflags = worker->flags; 43b0: b9406a61 ldr w1, [x19, #104] WARN_ON_ONCE(worker->task != current); 43b4: eb00005f cmp x2, x0 43b8: 2a0103e2 mov w2, w1 43bc: 540015a1 b.ne 4670 <worker_thread+0x440> // b.any worker->flags &= ~flags; 43c0: 12802100 mov w0, #0xfffffef7 // #-265 43c4: 0a000040 and w0, w2, w0 43c8: b9006a60 str w0, [x19, #104] if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) 43cc: 52803900 mov w0, #0x1c8 // #456 43d0: 6a00003f tst w1, w0 43d4: 54000120 b.eq 43f8 <worker_thread+0x1c8> // b.none if (!(worker->flags & WORKER_NOT_RUNNING)) 43d8: 721a045f tst w2, #0xc0 43dc: 540000e1 b.ne 43f8 <worker_thread+0x1c8> // b.any ATOMIC_OPS(add, add) 43e0: 91160062 add x2, x3, #0x580 43e4: f9800051 prfm pstl1strm, [x2] 43e8: 885f7c40 ldxr w0, [x2] 43ec: 11000400 add w0, w0, #0x1 43f0: 88017c40 stxr w1, w0, [x2] 43f4: 35ffffa1 cbnz w1, 43e8 <worker_thread+0x1b8> list_first_entry(&pool->worklist, 43f8: f94086a0 ldr x0, [x21, #264] pool->watchdog_ts = jiffies; 43fc: f94002c1 ldr x1, [x22] 4400: f90082a1 str x1, [x21, #256] list_first_entry(&pool->worklist, 4404: d1002001 sub x1, x0, #0x8 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) { 4408: f85f8002 ldur x2, [x0, #-8] 440c: 361ff2e2 tbz w2, #3, 4268 <worker_thread+0x38> list_for_each_entry_safe_from(work, n, NULL, entry) { 4410: f9400422 ldr x2, [x1, #8] 4414: d1002041 sub x1, x2, #0x8 4418: b4000400 cbz x0, 4498 <worker_thread+0x268> __list_del(entry->prev, entry->next); 441c: f9400403 ldr x3, [x0, #8] next->prev = prev; 4420: f9000443 str x3, [x2, #8] case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 4424: f9000062 str x2, [x3] __list_add(new, head->prev, head); 4428: f9401e62 ldr x2, [x19, #56] next->prev = new; 442c: f9001e60 str x0, [x19, #56] new->prev = prev; 4430: a9000814 stp x20, x2, [x0] 4434: f9000040 str x0, [x2] if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) 4438: f85f8000 ldur x0, [x0, #-8] 443c: 37180180 tbnz w0, #3, 446c <worker_thread+0x23c> 4440: 14000016 b 4498 <worker_thread+0x268> __list_del(entry->prev, entry->next); 4444: f9400823 ldr x3, [x1, #16] next->prev = prev; 4448: f9000443 str x3, [x2, #8] 444c: f9000062 str x2, [x3] __list_add(new, head->prev, head); 4450: f9401e62 ldr x2, [x19, #56] next->prev = new; 4454: f9001e60 str x0, [x19, #56] new->prev = prev; 4458: a9008834 stp x20, x2, [x1, #8] 445c: f9000040 str x0, [x2] 4460: f9400020 ldr x0, [x1] list_for_each_entry_safe_from(work, n, NULL, entry) { 4464: aa0403e1 mov x1, x4 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) 4468: 36180180 tbz w0, #3, 4498 <worker_thread+0x268> list_for_each_entry_safe_from(work, n, NULL, entry) { 446c: aa0103e0 mov x0, x1 4470: f8408c02 ldr x2, [x0, #8]! 4474: d1002044 sub x4, x2, #0x8 4478: b5fffe60 cbnz x0, 4444 <worker_thread+0x214> __READ_ONCE_SIZE; 447c: f9401a60 ldr x0, [x19, #48] while (!list_empty(&worker->scheduled)) { 4480: eb00029f cmp x20, x0 4484: 54ffefc0 b.eq 427c <worker_thread+0x4c> // b.none struct work_struct *work = list_first_entry(&worker->scheduled, 4488: f9401a61 ldr x1, [x19, #48] process_one_work(worker, work); 448c: aa1303e0 mov x0, x19 4490: d1002021 sub x1, x1, #0x8 4494: 97fffe57 bl 3df0 <process_one_work> 4498: f9401a60 ldr x0, [x19, #48] while (!list_empty(&worker->scheduled)) { 449c: eb00029f cmp x20, x0 44a0: 54ffff41 b.ne 4488 <worker_thread+0x258> // b.any 44a4: 17ffff76 b 427c <worker_thread+0x4c> 44a8: f9401a60 ldr x0, [x19, #48] 44ac: eb00029f cmp x20, x0 44b0: 54ffee60 b.eq 427c <worker_thread+0x4c> // b.none 44b4: d503201f nop struct work_struct *work = list_first_entry(&worker->scheduled, 44b8: f9401a61 ldr x1, [x19, #48] process_one_work(worker, work); 44bc: aa1303e0 mov x0, x19 44c0: d1002021 sub x1, x1, #0x8 44c4: 97fffe4b bl 3df0 <process_one_work> 44c8: f9401a60 ldr x0, [x19, #48] while (!list_empty(&worker->scheduled)) { 44cc: eb00029f cmp x20, x0 44d0: 54ffff41 b.ne 44b8 <worker_thread+0x288> // b.any 44d4: 17ffff6a b 427c <worker_thread+0x4c> spin_unlock_irq(&pool->lock); 44d8: aa1503e0 mov x0, x21 44dc: 94000000 bl 0 <rt_spin_unlock> 44e0: f9400260 ldr x0, [x19] WARN_ON_ONCE(!list_empty(&worker->entry)); 44e4: eb00027f cmp x19, x0 44e8: 54000ee1 b.ne 46c4 <worker_thread+0x494> // b.any worker->task->flags &= ~PF_WQ_WORKER; 44ec: f9402263 ldr x3, [x19, #64] extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); static inline void set_task_comm(struct task_struct *tsk, const char *from) { __set_task_comm(tsk, from, false); 44f0: 52800002 mov w2, #0x0 // #0 44f4: 90000001 adrp x1, 0 <pwq_activate_delayed_work> 44f8: 91000021 add x1, x1, #0x0 44fc: b9403460 ldr w0, [x3, #52] 4500: 121a7800 and w0, w0, #0xffffffdf 4504: b9003460 str w0, [x3, #52] 4508: f9402260 ldr x0, [x19, #64] 450c: 94000000 bl 0 <__set_task_comm> ida_simple_remove(&pool->worker_ida, worker->id); 4510: b9406e61 ldr w1, [x19, #108] 4514: 9113e2a0 add x0, x21, #0x4f8 4518: 94000000 bl 0 <ida_simple_remove> worker_detach_from_pool(worker, pool); 451c: aa1503e1 mov x1, x21 4520: aa1303e0 mov x0, x19 4524: 97fff2af bl fe0 <worker_detach_from_pool> kfree(worker); 4528: aa1303e0 mov x0, x19 452c: 94000000 bl 0 <kfree> } 4530: 52800000 mov w0, #0x0 // #0 4534: a94153f3 ldp x19, x20, [sp, #16] 4538: a9425bf5 ldp x21, x22, [sp, #32] 453c: a94363f7 ldp x23, x24, [sp, #48] 4540: a8c57bfd ldp x29, x30, [sp], #80 4544: d65f03c0 ret struct worker_pool *pool = worker->pool; 4548: f9402674 ldr x20, [x19, #72] if (pool->flags & POOL_MANAGER_ACTIVE) 454c: b940fe80 ldr w0, [x20, #252] 4550: 3707f240 tbnz w0, #0, 4398 <worker_thread+0x168> 4554: a9046bf9 stp x25, x26, [sp, #64] pool->flags |= POOL_MANAGER_ACTIVE; 4558: 9106429a add x26, x20, #0x190 455c: 91042299 add x25, x20, #0x108 4560: 32000000 orr w0, w0, #0x1 4564: b900fe80 str w0, [x20, #252] pool->manager = worker; 4568: f901fa93 str x19, [x20, #1008] spin_unlock_irq(&pool->lock); 456c: aa1403e0 mov x0, x20 4570: 94000000 bl 0 <rt_spin_unlock> mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); 4574: f94002c1 ldr x1, [x22] 4578: aa1a03e0 mov x0, x26 457c: 91000821 add x1, x1, #0x2 4580: 94000000 bl 0 <mod_timer> if (create_worker(pool) || !need_to_create_worker(pool)) 4584: aa1403e0 mov x0, x20 4588: 97fff68e bl 1fc0 <create_worker> 458c: b5000220 cbnz x0, 45d0 <worker_thread+0x3a0> 4590: f9408680 ldr x0, [x20, #264] return !list_empty(&pool->worklist) && __need_more_worker(pool); 4594: eb00033f cmp x25, x0 4598: 540001c0 b.eq 45d0 <worker_thread+0x3a0> // b.none 459c: b9458280 ldr w0, [x20, #1408] 45a0: 35000180 cbnz w0, 45d0 <worker_thread+0x3a0> return need_more_worker(pool) && !may_start_working(pool); 45a4: b9411e80 ldr w0, [x20, #284] 45a8: 35000140 cbnz w0, 45d0 <worker_thread+0x3a0> schedule_timeout_interruptible(CREATE_COOLDOWN); 45ac: d2801f40 mov x0, #0xfa // #250 45b0: 94000000 bl 0 <schedule_timeout_interruptible> 45b4: f9408680 ldr x0, [x20, #264] return !list_empty(&pool->worklist) && __need_more_worker(pool); 45b8: eb00033f cmp x25, x0 45bc: 540000a0 b.eq 45d0 <worker_thread+0x3a0> // b.none 45c0: b9458280 ldr w0, [x20, #1408] 45c4: 35000060 cbnz w0, 45d0 <worker_thread+0x3a0> return need_more_worker(pool) && !may_start_working(pool); 45c8: b9411e80 ldr w0, [x20, #284] 45cc: 34fffdc0 cbz w0, 4584 <worker_thread+0x354> del_timer_sync(&pool->mayday_timer); 45d0: aa1a03e0 mov x0, x26 45d4: 94000000 bl 0 <del_timer_sync> spin_lock_irq(&pool->lock); 45d8: aa1403e0 mov x0, x20 45dc: 94000000 bl 0 <rt_spin_lock> 45e0: f9408680 ldr x0, [x20, #264] return !list_empty(&pool->worklist) && __need_more_worker(pool); 45e4: eb00033f cmp x25, x0 45e8: 54000080 b.eq 45f8 <worker_thread+0x3c8> // b.none 45ec: b9458280 ldr w0, [x20, #1408] 45f0: 34000640 cbz w0, 46b8 <worker_thread+0x488> 45f4: d503201f nop pool->flags &= ~POOL_MANAGER_ACTIVE; 45f8: b940fe80 ldr w0, [x20, #252] wake_up(&wq_manager_wait); 45fc: d2800003 mov x3, #0x0 // #0 pool->manager = NULL; 4600: f901fa9f str xzr, [x20, #1008] wake_up(&wq_manager_wait); 4604: 52800022 mov w2, #0x1 // #1 pool->flags &= ~POOL_MANAGER_ACTIVE; 4608: 121f7800 and w0, w0, #0xfffffffe 460c: b900fe80 str w0, [x20, #252] wake_up(&wq_manager_wait); 4610: 52800061 mov w1, #0x3 // #3 4614: aa1803e0 mov x0, x24 4618: 94000000 bl 0 <__wake_up> 461c: f94086a0 ldr x0, [x21, #264] return !list_empty(&pool->worklist) && __need_more_worker(pool); 4620: eb0002ff cmp x23, x0 4624: 54000460 b.eq 46b0 <worker_thread+0x480> // b.none 4628: b94582a0 ldr w0, [x21, #1408] 462c: 35000420 cbnz w0, 46b0 <worker_thread+0x480> if (unlikely(!may_start_working(pool)) && manage_workers(worker)) 4630: b9411ea0 ldr w0, [x21, #284] 4634: 340004c0 cbz w0, 46cc <worker_thread+0x49c> 4638: a9446bf9 ldp x25, x26, [sp, #64] WARN_ON_ONCE(!list_empty(&worker->scheduled)); 463c: 9100c274 add x20, x19, #0x30 4640: f9401a60 ldr x0, [x19, #48] 4644: eb00029f cmp x20, x0 4648: 54ffeb00 b.eq 43a8 <worker_thread+0x178> // b.none 464c: d503201f nop 4650: d4210000 brk #0x800 struct worker_pool *pool = worker->pool; 4654: a9440e62 ldp x2, x3, [x19, #64] 4658: d5384100 mrs x0, sp_el0 unsigned int oflags = worker->flags; 465c: b9406a61 ldr w1, [x19, #104] WARN_ON_ONCE(worker->task != current); 4660: eb00005f cmp x2, x0 4664: 2a0103e2 mov w2, w1 4668: 54ffeac0 b.eq 43c0 <worker_thread+0x190> // b.none 466c: d503201f nop 4670: d4210000 brk #0x800 4674: b9406a62 ldr w2, [x19, #104] 4678: 17ffff52 b 43c0 <worker_thread+0x190> if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE))) 467c: d4210000 brk #0x800 4680: 17ffff3c b 4370 <worker_thread+0x140> WARN_ON_ONCE(worker->task != current); 4684: d4210000 brk #0x800 4688: 17ffff07 b 42a4 <worker_thread+0x74> 468c: f9400001 ldr x1, [x0] 4690: 37080061 tbnz w1, #1, 469c <worker_thread+0x46c> 4694: f9400000 ldr x0, [x0] 4698: 3637e6c0 tbz w0, #6, 4370 <worker_thread+0x140> preempt_enable(); 469c: 94000000 bl 0 <preempt_schedule> 46a0: 17ffff34 b 4370 <worker_thread+0x140> WARN_ON_ONCE(worker->task != current); 46a4: d4210000 brk #0x800 46a8: b9406a60 ldr w0, [x19, #104] 46ac: 17ffff20 b 432c <worker_thread+0xfc> 46b0: a9446bf9 ldp x25, x26, [sp, #64] 46b4: 17ffff09 b 42d8 <worker_thread+0xa8> return need_more_worker(pool) && !may_start_working(pool); 46b8: b9411e80 ldr w0, [x20, #284] 46bc: 34fff580 cbz w0, 456c <worker_thread+0x33c> 46c0: 17ffffce b 45f8 <worker_thread+0x3c8> WARN_ON_ONCE(!list_empty(&worker->entry)); 46c4: d4210000 brk #0x800 46c8: 17ffff89 b 44ec <worker_thread+0x2bc> struct worker_pool *pool = worker->pool; 46cc: f9402674 ldr x20, [x19, #72] if (pool->flags & POOL_MANAGER_ACTIVE) 46d0: b940fe80 ldr w0, [x20, #252] 46d4: 3607f420 tbz w0, #0, 4558 <worker_thread+0x328> 46d8: a9446bf9 ldp x25, x26, [sp, #64] 46dc: 17ffffd8 b 463c <worker_thread+0x40c> 00000000000046e0 <try_to_grab_pending>: { 46e0: a9bc7bfd stp x29, x30, [sp, #-64]! 46e4: 910003fd mov x29, sp 46e8: a90363f7 stp x23, x24, [sp, #48] local_lock_irqsave(pendingb_lock, *flags); 46ec: 90000018 adrp x24, 0 <__per_cpu_offset> 46f0: 91000318 add x24, x24, #0x0 { 46f4: aa0203f7 mov x23, x2 46f8: a90153f3 stp x19, x20, [sp, #16] local_lock_irqsave(pendingb_lock, *flags); 46fc: 90000014 adrp x20, 0 <pwq_activate_delayed_work> { 4700: a9025bf5 stp x21, x22, [sp, #32] 4704: 12001c36 and w22, w1, #0xff 4708: aa0003f5 mov x21, x0 local_lock_irqsave(pendingb_lock, *flags); 470c: 94000000 bl 0 <migrate_disable> 4710: 91000294 add x20, x20, #0x0 4714: 94000000 bl 0 <debug_smp_processor_id> 4718: 91002293 add x19, x20, #0x8 471c: f8605b00 ldr x0, [x24, w0, uxtw #3] 4720: d5384101 mrs x1, sp_el0 4724: 8b000273 add x19, x19, x0 if (lv->owner != current) { 4728: f9407a60 ldr x0, [x19, #240] 472c: eb01001f cmp x0, x1 4730: 54000ce0 b.eq 48cc <try_to_grab_pending+0x1ec> // b.none spin_lock_irqsave(&lv->lock, lv->flags); 4734: f900827f str xzr, [x19, #256] 4738: aa1303e0 mov x0, x19 473c: 94000000 bl 0 <rt_spin_lock> LL_WARN(lv->owner); 4740: f9407a60 ldr x0, [x19, #240] 4744: b5001080 cbnz x0, 4954 <try_to_grab_pending+0x274> LL_WARN(lv->nestcnt); 4748: b940fa60 ldr w0, [x19, #248] 474c: 35001000 cbnz w0, 494c <try_to_grab_pending+0x26c> 4750: d5384101 mrs x1, sp_el0 lv->nestcnt = 1; 4754: 52800020 mov w0, #0x1 // #1 lv->owner = current; 4758: f9007a61 str x1, [x19, #240] lv->nestcnt = 1; 475c: b900fa60 str w0, [x19, #248] 4760: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 4764: 91000000 add x0, x0, #0x0 4768: 94000000 bl 0 <__this_cpu_preempt_check> 476c: 91042280 add x0, x20, #0x108 4770: d538d081 mrs x1, tpidr_el1 4774: f8616800 ldr x0, [x0, x1] 4778: f90002e0 str x0, [x23] if (is_dwork) { 477c: 34000176 cbz w22, 47a8 <try_to_grab_pending+0xc8> if (likely(del_timer(&dwork->timer))) 4780: 910142a0 add x0, x21, #0x50 4784: 94000000 bl 0 <del_timer> return 1; 4788: 52800021 mov w1, #0x1 // #1 if (likely(del_timer(&dwork->timer))) 478c: 340000e0 cbz w0, 47a8 <try_to_grab_pending+0xc8> } 4790: 2a0103e0 mov w0, w1 4794: a94153f3 ldp x19, x20, [sp, #16] 4798: a9425bf5 ldp x21, x22, [sp, #32] 479c: a94363f7 ldp x23, x24, [sp, #48] 47a0: a8c47bfd ldp x29, x30, [sp], #64 47a4: d65f03c0 ret if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) 47a8: aa1503e1 mov x1, x21 47ac: 52800000 mov w0, #0x0 // #0 47b0: 94000000 bl 0 <test_and_set_bit> 47b4: 2a0003e1 mov w1, w0 47b8: 34fffec0 cbz w0, 4790 <try_to_grab_pending+0xb0> __rcu_read_lock(); 47bc: 94000000 bl 0 <__rcu_read_lock> lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_); 47c0: 90000013 adrp x19, 0 <rcu_lock_map> 47c4: 91000273 add x19, x19, #0x0 47c8: 90000006 adrp x6, 0 <pwq_activate_delayed_work> 47cc: aa1303e0 mov x0, x19 47d0: 910000c6 add x6, x6, #0x0 47d4: d2800005 mov x5, #0x0 // #0 47d8: 52800004 mov w4, #0x0 // #0 47dc: 52800043 mov w3, #0x2 // #2 47e0: 52800002 mov w2, #0x0 // #0 47e4: 52800001 mov w1, #0x0 // #0 47e8: 94000000 bl 0 <lock_acquire> RCU_LOCKDEP_WARN(!rcu_is_watching(), 47ec: 94000000 bl 0 <debug_lockdep_rcu_enabled> 47f0: 340000a0 cbz w0, 4804 <try_to_grab_pending+0x124> 47f4: 90000016 adrp x22, 0 <pwq_activate_delayed_work> 47f8: 910002d6 add x22, x22, #0x0 47fc: 39401ac0 ldrb w0, [x22, #6] 4800: 34000ae0 cbz w0, 495c <try_to_grab_pending+0x27c> pool = get_work_pool(work); 4804: aa1503e0 mov x0, x21 4808: 97fff49a bl 1a70 <get_work_pool> 480c: aa0003f6 mov x22, x0 if (!pool) 4810: b4000160 cbz x0, 483c <try_to_grab_pending+0x15c> spin_lock(&pool->lock); 4814: 94000000 bl 0 <rt_spin_lock> 4818: f94002a0 ldr x0, [x21] if (data & WORK_STRUCT_PWQ) 481c: 361000c0 tbz w0, #2, 4834 <try_to_grab_pending+0x154> if (pwq && pwq->pool == pool) { 4820: f278dc09 ands x9, x0, #0xffffffffffffff00 4824: 54000080 b.eq 4834 <try_to_grab_pending+0x154> // b.none 4828: f9400120 ldr x0, [x9] 482c: eb16001f cmp x0, x22 4830: 54000b60 b.eq 499c <try_to_grab_pending+0x2bc> // b.none spin_unlock(&pool->lock); 4834: aa1603e0 mov x0, x22 4838: 94000000 bl 0 <rt_spin_unlock> RCU_LOCKDEP_WARN(!rcu_is_watching(), 483c: 94000000 bl 0 <debug_lockdep_rcu_enabled> 4840: 35000500 cbnz w0, 48e0 <try_to_grab_pending+0x200> __rcu_read_unlock(); 4844: 94000000 bl 0 <__rcu_read_unlock> local_unlock_irqrestore(pendingb_lock, *flags); 4848: 91002294 add x20, x20, #0x8 lock_release(map, 1, _THIS_IP_); 484c: aa1303e0 mov x0, x19 4850: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 4854: 91000042 add x2, x2, #0x0 4858: 52800021 mov w1, #0x1 // #1 485c: 94000000 bl 0 <lock_release> 4860: 94000000 bl 0 <debug_smp_processor_id> 4864: f8605b00 ldr x0, [x24, w0, uxtw #3] 4868: 8b000294 add x20, x20, x0 LL_WARN(!lv->nestcnt); 486c: b940fa80 ldr w0, [x20, #248] 4870: 340008e0 cbz w0, 498c <try_to_grab_pending+0x2ac> LL_WARN(lv->owner != current); 4874: f9407a81 ldr x1, [x20, #240] 4878: d5384100 mrs x0, sp_el0 487c: eb00003f cmp x1, x0 4880: 540008a1 b.ne 4994 <try_to_grab_pending+0x2b4> // b.any if (--lv->nestcnt) 4884: b940fa80 ldr w0, [x20, #248] 4888: 51000400 sub w0, w0, #0x1 488c: b900fa80 str w0, [x20, #248] 4890: 34000480 cbz w0, 4920 <try_to_grab_pending+0x240> 4894: f94002a0 ldr x0, [x21] return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING); 4898: d2800282 mov x2, #0x14 // #20 return -ENOENT; 489c: 12800021 mov w1, #0xfffffffe // #-2 return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING); 48a0: 8a020000 and x0, x0, x2 if (work_is_canceling(work)) 48a4: f100401f cmp x0, #0x10 48a8: 54fff740 b.eq 4790 <try_to_grab_pending+0xb0> // b.none cpu_chill(); 48ac: 94000000 bl 0 <cpu_chill> return -EAGAIN; 48b0: 12800141 mov w1, #0xfffffff5 // #-11 } 48b4: 2a0103e0 mov w0, w1 48b8: a94153f3 ldp x19, x20, [sp, #16] 48bc: a9425bf5 ldp x21, x22, [sp, #32] 48c0: a94363f7 ldp x23, x24, [sp, #48] 48c4: a8c47bfd ldp x29, x30, [sp], #64 48c8: d65f03c0 ret lv->nestcnt++; 48cc: b940fa60 ldr w0, [x19, #248] 48d0: 11000400 add w0, w0, #0x1 48d4: b900fa60 str w0, [x19, #248] local_lock_irqsave(pendingb_lock, *flags); 48d8: 94000000 bl 0 <migrate_enable> 48dc: 17ffffa1 b 4760 <try_to_grab_pending+0x80> RCU_LOCKDEP_WARN(!rcu_is_watching(), 48e0: 90000016 adrp x22, 0 <pwq_activate_delayed_work> 48e4: 910002d6 add x22, x22, #0x0 48e8: 39401ec0 ldrb w0, [x22, #7] 48ec: 35fffac0 cbnz w0, 4844 <try_to_grab_pending+0x164> 48f0: 94000000 bl 0 <rcu_is_watching> 48f4: 72001c1f tst w0, #0xff 48f8: 54fffa61 b.ne 4844 <try_to_grab_pending+0x164> // b.any 48fc: 52800023 mov w3, #0x1 // #1 4900: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 4904: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 4908: 91000042 add x2, x2, #0x0 490c: 91000000 add x0, x0, #0x0 4910: 528051a1 mov w1, #0x28d // #653 4914: 39001ec3 strb w3, [x22, #7] 4918: 94000000 bl 0 <lockdep_rcu_suspicious> 491c: 17ffffca b 4844 <try_to_grab_pending+0x164> spin_unlock_irqrestore(&lv->lock, lv->flags); 4920: aa1403e0 mov x0, x20 lv->owner = NULL; 4924: f9007a9f str xzr, [x20, #240] spin_unlock_irqrestore(&lv->lock, lv->flags); 4928: 94000000 bl 0 <rt_spin_unlock> local_unlock_irqrestore(pendingb_lock, *flags); 492c: 94000000 bl 0 <migrate_enable> 4930: f94002a0 ldr x0, [x21] return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING); 4934: d2800282 mov x2, #0x14 // #20 return -ENOENT; 4938: 12800021 mov w1, #0xfffffffe // #-2 return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING); 493c: 8a020000 and x0, x0, x2 if (work_is_canceling(work)) 4940: f100401f cmp x0, #0x10 4944: 54fffb41 b.ne 48ac <try_to_grab_pending+0x1cc> // b.any 4948: 17ffff92 b 4790 <try_to_grab_pending+0xb0> LL_WARN(lv->nestcnt); 494c: d4210000 brk #0x800 4950: 17ffff80 b 4750 <try_to_grab_pending+0x70> LL_WARN(lv->owner); 4954: d4210000 brk #0x800 4958: 17ffff7c b 4748 <try_to_grab_pending+0x68> RCU_LOCKDEP_WARN(!rcu_is_watching(), 495c: 94000000 bl 0 <rcu_is_watching> 4960: 72001c1f tst w0, #0xff 4964: 54fff501 b.ne 4804 <try_to_grab_pending+0x124> // b.any 4968: 52800023 mov w3, #0x1 // #1 496c: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 4970: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 4974: 91000042 add x2, x2, #0x0 4978: 91000000 add x0, x0, #0x0 497c: 52804b41 mov w1, #0x25a // #602 4980: 39001ac3 strb w3, [x22, #6] 4984: 94000000 bl 0 <lockdep_rcu_suspicious> 4988: 17ffff9f b 4804 <try_to_grab_pending+0x124> LL_WARN(!lv->nestcnt); 498c: d4210000 brk #0x800 4990: 17ffffb9 b 4874 <try_to_grab_pending+0x194> LL_WARN(lv->owner != current); 4994: d4210000 brk #0x800 4998: 17ffffbb b 4884 <try_to_grab_pending+0x1a4> if (*work_data_bits(work) & WORK_STRUCT_DELAYED) 499c: f94002a0 ldr x0, [x21] 49a0: 37080420 tbnz w0, #1, 4a24 <try_to_grab_pending+0x344> __list_del(entry->prev, entry->next); 49a4: a94086a3 ldp x3, x1, [x21, #8] next->prev = prev; 49a8: f9000461 str x1, [x3, #8] list_del_init(&work->entry); 49ac: 910022a2 add x2, x21, #0x8 pwq_dec_nr_in_flight(pwq, get_work_color(work)); 49b0: aa0903e0 mov x0, x9 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 49b4: f9000023 str x3, [x1] 49b8: f90006a2 str x2, [x21, #8] return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) & 49bc: f94002a1 ldr x1, [x21] list->prev = list; 49c0: f9000aa2 str x2, [x21, #16] pwq_dec_nr_in_flight(pwq, get_work_color(work)); 49c4: 53041c21 ubfx w1, w1, #4, #4 49c8: 97fffcd6 bl 3d20 <pwq_dec_nr_in_flight> 49cc: f94002a0 ldr x0, [x21] set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 49d0: b980fac1 ldrsw x1, [x22, #248] 49d4: d37be821 lsl x1, x1, #5 WARN_ON_ONCE(!work_pending(work)); 49d8: 36000440 tbz w0, #0, 4a60 <try_to_grab_pending+0x380> atomic_long_set(&work->data, data | flags | work_static(work)); 49dc: b2400021 orr x1, x1, #0x1 spin_unlock(&pool->lock); 49e0: aa1603e0 mov x0, x22 49e4: f90002a1 str x1, [x21] 49e8: 94000000 bl 0 <rt_spin_unlock> RCU_LOCKDEP_WARN(!rcu_is_watching(), 49ec: 94000000 bl 0 <debug_lockdep_rcu_enabled> 49f0: 340000a0 cbz w0, 4a04 <try_to_grab_pending+0x324> 49f4: 90000014 adrp x20, 0 <pwq_activate_delayed_work> 49f8: 91000294 add x20, x20, #0x0 49fc: 39401e80 ldrb w0, [x20, #7] 4a00: 34000180 cbz w0, 4a30 <try_to_grab_pending+0x350> __rcu_read_unlock(); 4a04: 94000000 bl 0 <__rcu_read_unlock> lock_release(map, 1, _THIS_IP_); 4a08: 52800021 mov w1, #0x1 // #1 4a0c: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 4a10: aa1303e0 mov x0, x19 4a14: 91000042 add x2, x2, #0x0 4a18: 94000000 bl 0 <lock_release> return 1; 4a1c: 52800021 mov w1, #0x1 // #1 4a20: 17ffff5c b 4790 <try_to_grab_pending+0xb0> pwq_activate_delayed_work(work); 4a24: aa1503e0 mov x0, x21 4a28: 97ffed76 bl 0 <pwq_activate_delayed_work> 4a2c: 17ffffde b 49a4 <try_to_grab_pending+0x2c4> RCU_LOCKDEP_WARN(!rcu_is_watching(), 4a30: 94000000 bl 0 <rcu_is_watching> 4a34: 72001c1f tst w0, #0xff 4a38: 54fffe61 b.ne 4a04 <try_to_grab_pending+0x324> // b.any 4a3c: 52800023 mov w3, #0x1 // #1 4a40: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 4a44: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 4a48: 91000042 add x2, x2, #0x0 4a4c: 91000000 add x0, x0, #0x0 4a50: 528051a1 mov w1, #0x28d // #653 4a54: 39001e83 strb w3, [x20, #7] 4a58: 94000000 bl 0 <lockdep_rcu_suspicious> 4a5c: 17ffffea b 4a04 <try_to_grab_pending+0x324> WARN_ON_ONCE(!work_pending(work)); 4a60: d4210000 brk #0x800 4a64: 17ffffde b 49dc <try_to_grab_pending+0x2fc> 0000000000004a68 <__cancel_work.part.10>: static bool __cancel_work(struct work_struct *work, bool is_dwork) 4a68: d10103ff sub sp, sp, #0x40 4a6c: a9017bfd stp x29, x30, [sp, #16] 4a70: 910043fd add x29, sp, #0x10 4a74: a90253f3 stp x19, x20, [sp, #32] 4a78: 90000014 adrp x20, 0 <__stack_chk_guard> 4a7c: 91000294 add x20, x20, #0x0 4a80: aa0003f3 mov x19, x0 4a84: f9400280 ldr x0, [x20] 4a88: f90007e0 str x0, [sp, #8] 4a8c: d2800000 mov x0, #0x0 // #0 4a90: a9035bf5 stp x21, x22, [sp, #48] 4a94: 12001c36 and w22, w1, #0xff ret = try_to_grab_pending(work, is_dwork, &flags); 4a98: 910003e2 mov x2, sp 4a9c: 2a1603e1 mov w1, w22 4aa0: aa1303e0 mov x0, x19 4aa4: 97ffff0f bl 46e0 <try_to_grab_pending> 4aa8: 2a0003f5 mov w21, w0 } while (unlikely(ret == -EAGAIN)); 4aac: 31002c1f cmn w0, #0xb 4ab0: 54ffff40 b.eq 4a98 <__cancel_work.part.10+0x30> // b.none if (unlikely(ret < 0)) 4ab4: 37f80600 tbnz w0, #31, 4b74 <__cancel_work.part.10+0x10c> __READ_ONCE_SIZE; 4ab8: f9400261 ldr x1, [x19] return data >> WORK_OFFQ_POOL_SHIFT; 4abc: d3459020 ubfx x0, x1, #5, #32 if (data & WORK_STRUCT_PWQ) 4ac0: 36100081 tbz w1, #2, 4ad0 <__cancel_work.part.10+0x68> (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id; 4ac4: 9278dc21 and x1, x1, #0xffffffffffffff00 4ac8: f9400020 ldr x0, [x1] 4acc: b940f800 ldr w0, [x0, #248] smp_wmb(); 4ad0: d5033abf dmb ishst 4ad4: f9400261 ldr x1, [x19] set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); 4ad8: 937b7c00 sbfiz x0, x0, #5, #32 WARN_ON_ONCE(!work_pending(work)); 4adc: 36000501 tbz w1, #0, 4b7c <__cancel_work.part.10+0x114> case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 4ae0: f9000260 str x0, [x19] smp_mb(); 4ae4: d5033bbf dmb ish local_unlock_irqrestore(pendingb_lock, flags); 4ae8: 90000001 adrp x1, 0 <pwq_activate_delayed_work> 4aec: 91000021 add x1, x1, #0x0 4af0: 91002021 add x1, x1, #0x8 4af4: aa0103f3 mov x19, x1 4af8: 94000000 bl 0 <debug_smp_processor_id> 4afc: 90000001 adrp x1, 0 <__per_cpu_offset> 4b00: 91000021 add x1, x1, #0x0 4b04: f8605821 ldr x1, [x1, w0, uxtw #3] 4b08: 8b010261 add x1, x19, x1 LL_WARN(!lv->nestcnt); 4b0c: b940f820 ldr w0, [x1, #248] 4b10: 340003a0 cbz w0, 4b84 <__cancel_work.part.10+0x11c> LL_WARN(lv->owner != current); 4b14: f9407822 ldr x2, [x1, #240] 4b18: d5384100 mrs x0, sp_el0 4b1c: eb00005f cmp x2, x0 4b20: 54000361 b.ne 4b8c <__cancel_work.part.10+0x124> // b.any if (--lv->nestcnt) 4b24: b940f820 ldr w0, [x1, #248] 4b28: 51000400 sub w0, w0, #0x1 4b2c: b900f820 str w0, [x1, #248] 4b30: 34000180 cbz w0, 4b60 <__cancel_work.part.10+0xf8> return ret; 4b34: 710002bf cmp w21, #0x0 4b38: 1a9f07e0 cset w0, ne // ne = any } 4b3c: f94007e2 ldr x2, [sp, #8] 4b40: f9400281 ldr x1, [x20] 4b44: ca010041 eor x1, x2, x1 4b48: b5000261 cbnz x1, 4b94 <__cancel_work.part.10+0x12c> 4b4c: a9417bfd ldp x29, x30, [sp, #16] 4b50: a94253f3 ldp x19, x20, [sp, #32] 4b54: a9435bf5 ldp x21, x22, [sp, #48] 4b58: 910103ff add sp, sp, #0x40 4b5c: d65f03c0 ret spin_unlock_irqrestore(&lv->lock, lv->flags); 4b60: aa0103e0 mov x0, x1 lv->owner = NULL; 4b64: f900783f str xzr, [x1, #240] spin_unlock_irqrestore(&lv->lock, lv->flags); 4b68: 94000000 bl 0 <rt_spin_unlock> local_unlock_irqrestore(pendingb_lock, flags); 4b6c: 94000000 bl 0 <migrate_enable> 4b70: 17fffff1 b 4b34 <__cancel_work.part.10+0xcc> return false; 4b74: 52800000 mov w0, #0x0 // #0 4b78: 17fffff1 b 4b3c <__cancel_work.part.10+0xd4> WARN_ON_ONCE(!work_pending(work)); 4b7c: d4210000 brk #0x800 4b80: 17ffffd8 b 4ae0 <__cancel_work.part.10+0x78> LL_WARN(!lv->nestcnt); 4b84: d4210000 brk #0x800 4b88: 17ffffe3 b 4b14 <__cancel_work.part.10+0xac> LL_WARN(lv->owner != current); 4b8c: d4210000 brk #0x800 4b90: 17ffffe5 b 4b24 <__cancel_work.part.10+0xbc> } 4b94: 94000000 bl 0 <__stack_chk_fail> 0000000000004b98 <cancel_delayed_work>: { 4b98: a9bf7bfd stp x29, x30, [sp, #-16]! 4b9c: 52800021 mov w1, #0x1 // #1 4ba0: 910003fd mov x29, sp 4ba4: 97ffffb1 bl 4a68 <__cancel_work.part.10> } 4ba8: a8c17bfd ldp x29, x30, [sp], #16 4bac: d65f03c0 ret 0000000000004bb0 <__cancel_work_timer>: { 4bb0: d10243ff sub sp, sp, #0x90 4bb4: a9047bfd stp x29, x30, [sp, #64] 4bb8: 910103fd add x29, sp, #0x40 4bbc: a9065bf5 stp x21, x22, [sp, #96] 4bc0: 90000016 adrp x22, 0 <__stack_chk_guard> 4bc4: 910002d6 add x22, x22, #0x0 4bc8: 90000015 adrp x21, 0 <pwq_activate_delayed_work> 4bcc: 910002b5 add x21, x21, #0x0 4bd0: a90553f3 stp x19, x20, [sp, #80] 4bd4: aa0003f4 mov x20, x0 4bd8: f94002c0 ldr x0, [x22] 4bdc: f9001fe0 str x0, [sp, #56] 4be0: d2800000 mov x0, #0x0 // #0 cwait.wait.func = cwt_wakefn; 4be4: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 4be8: 91000013 add x19, x0, #0x0 { 4bec: a90763f7 stp x23, x24, [sp, #112] 4bf0: 12001c38 and w24, w1, #0xff 4bf4: a9086bf9 stp x25, x26, [sp, #128] prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait, 4bf8: 910f02ba add x26, x21, #0x3c0 WRITE_ONCE(list->next, list); 4bfc: 910083f9 add x25, sp, #0x20 ret = try_to_grab_pending(work, is_dwork, &flags); 4c00: 910003e2 mov x2, sp 4c04: 2a1803e1 mov w1, w24 4c08: aa1403e0 mov x0, x20 4c0c: 97fffeb5 bl 46e0 <try_to_grab_pending> 4c10: 2a0003f7 mov w23, w0 if (unlikely(ret == -ENOENT)) { 4c14: 3100081f cmn w0, #0x2 4c18: 54000960 b.eq 4d44 <__cancel_work_timer+0x194> // b.none } while (unlikely(ret < 0)); 4c1c: 37ffff20 tbnz w0, #31, 4c00 <__cancel_work_timer+0x50> __READ_ONCE_SIZE; 4c20: f9400281 ldr x1, [x20] return data >> WORK_OFFQ_POOL_SHIFT; 4c24: d3459020 ubfx x0, x1, #5, #32 if (data & WORK_STRUCT_PWQ) 4c28: 36100081 tbz w1, #2, 4c38 <__cancel_work_timer+0x88> (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id; 4c2c: 9278dc21 and x1, x1, #0xffffffffffffff00 4c30: f9400020 ldr x0, [x1] 4c34: b940f800 ldr w0, [x0, #248] 4c38: f9400281 ldr x1, [x20] pool_id <<= WORK_OFFQ_POOL_SHIFT; 4c3c: 937b7c00 sbfiz x0, x0, #5, #32 WARN_ON_ONCE(!work_pending(work)); 4c40: 36000b41 tbz w1, #0, 4da8 <__cancel_work_timer+0x1f8> local_unlock_irqrestore(pendingb_lock, flags); 4c44: 90000001 adrp x1, 0 <pwq_activate_delayed_work> 4c48: 91000021 add x1, x1, #0x0 4c4c: 91002021 add x1, x1, #0x8 atomic_long_set(&work->data, data | flags | work_static(work)); 4c50: d2800222 mov x2, #0x11 // #17 4c54: aa020000 orr x0, x0, x2 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 4c58: f9000280 str x0, [x20] local_unlock_irqrestore(pendingb_lock, flags); 4c5c: aa0103f3 mov x19, x1 4c60: 94000000 bl 0 <debug_smp_processor_id> 4c64: 90000001 adrp x1, 0 <__per_cpu_offset> 4c68: 91000021 add x1, x1, #0x0 4c6c: f8605821 ldr x1, [x1, w0, uxtw #3] 4c70: 8b010261 add x1, x19, x1 LL_WARN(!lv->nestcnt); 4c74: b940f820 ldr w0, [x1, #248] 4c78: 340008a0 cbz w0, 4d8c <__cancel_work_timer+0x1dc> LL_WARN(lv->owner != current); 4c7c: f9407822 ldr x2, [x1, #240] 4c80: d5384100 mrs x0, sp_el0 4c84: eb00005f cmp x2, x0 4c88: 540008c1 b.ne 4da0 <__cancel_work_timer+0x1f0> // b.any if (--lv->nestcnt) 4c8c: b940f820 ldr w0, [x1, #248] 4c90: 51000400 sub w0, w0, #0x1 4c94: b900f820 str w0, [x1, #248] 4c98: 350000a0 cbnz w0, 4cac <__cancel_work_timer+0xfc> spin_unlock_irqrestore(&lv->lock, lv->flags); 4c9c: aa0103e0 mov x0, x1 lv->owner = NULL; 4ca0: f900783f str xzr, [x1, #240] spin_unlock_irqrestore(&lv->lock, lv->flags); 4ca4: 94000000 bl 0 <rt_spin_unlock> 4ca8: 94000000 bl 0 <migrate_enable> if (wq_online) 4cac: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 4cb0: 39400000 ldrb w0, [x0] 4cb4: 350003a0 cbnz w0, 4d28 <__cancel_work_timer+0x178> smp_wmb(); /* see set_work_pool_and_clear_pending() */ 4cb8: d5033abf dmb ishst 4cbc: f9400280 ldr x0, [x20] WARN_ON_ONCE(!work_pending(work)); 4cc0: 360003e0 tbz w0, #0, 4d3c <__cancel_work_timer+0x18c> 4cc4: b27b7be0 mov x0, #0xfffffffe0 // #68719476704 4cc8: f9000280 str x0, [x20] smp_mb(); 4ccc: d5033bbf dmb ish __READ_ONCE_SIZE; 4cd0: 910f02a0 add x0, x21, #0x3c0 if (waitqueue_active(&cancel_waitq)) 4cd4: 9112c2b5 add x21, x21, #0x4b0 4cd8: f9407801 ldr x1, [x0, #240] 4cdc: eb15003f cmp x1, x21 4ce0: 540000a0 b.eq 4cf4 <__cancel_work_timer+0x144> // b.none __wake_up(&cancel_waitq, TASK_NORMAL, 1, work); 4ce4: aa1403e3 mov x3, x20 4ce8: 52800022 mov w2, #0x1 // #1 4cec: 52800061 mov w1, #0x3 // #3 4cf0: 94000000 bl 0 <__wake_up> return ret; 4cf4: 710002ff cmp w23, #0x0 } 4cf8: f9401fe0 ldr x0, [sp, #56] 4cfc: f94002c1 ldr x1, [x22] 4d00: ca010001 eor x1, x0, x1 4d04: 1a9f07e0 cset w0, ne // ne = any 4d08: b5000581 cbnz x1, 4db8 <__cancel_work_timer+0x208> 4d0c: a9447bfd ldp x29, x30, [sp, #64] 4d10: a94553f3 ldp x19, x20, [sp, #80] 4d14: a9465bf5 ldp x21, x22, [sp, #96] 4d18: a94763f7 ldp x23, x24, [sp, #112] 4d1c: a9486bf9 ldp x25, x26, [sp, #128] 4d20: 910243ff add sp, sp, #0x90 4d24: d65f03c0 ret flush_work(work); 4d28: aa1403e0 mov x0, x20 4d2c: 94000000 bl 23e0 <flush_work> smp_wmb(); /* see set_work_pool_and_clear_pending() */ 4d30: d5033abf dmb ishst 4d34: f9400280 ldr x0, [x20] WARN_ON_ONCE(!work_pending(work)); 4d38: 3707fc60 tbnz w0, #0, 4cc4 <__cancel_work_timer+0x114> 4d3c: d4210000 brk #0x800 4d40: 17ffffe1 b 4cc4 <__cancel_work_timer+0x114> 4d44: d5384103 mrs x3, sp_el0 prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait, 4d48: 910023e1 add x1, sp, #0x8 4d4c: aa1a03e0 mov x0, x26 4d50: 52800042 mov w2, #0x2 // #2 init_wait(&cwait.wait); 4d54: b9000bff str wzr, [sp, #8] cwait.wait.func = cwt_wakefn; 4d58: a9014fe3 stp x3, x19, [sp, #16] case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 4d5c: f90013f9 str x25, [sp, #32] cwait.work = work; 4d60: a902d3f9 stp x25, x20, [sp, #40] prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait, 4d64: 94000000 bl 0 <prepare_to_wait_exclusive> return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING); 4d68: d2800281 mov x1, #0x14 // #20 __READ_ONCE_SIZE; 4d6c: f9400280 ldr x0, [x20] 4d70: 8a010000 and x0, x0, x1 if (work_is_canceling(work)) 4d74: f100401f cmp x0, #0x10 4d78: 540001c0 b.eq 4db0 <__cancel_work_timer+0x200> // b.none finish_wait(&cancel_waitq, &cwait.wait); 4d7c: 910023e1 add x1, sp, #0x8 4d80: aa1a03e0 mov x0, x26 4d84: 94000000 bl 0 <finish_wait> 4d88: 17ffff9e b 4c00 <__cancel_work_timer+0x50> LL_WARN(!lv->nestcnt); 4d8c: d4210000 brk #0x800 LL_WARN(lv->owner != current); 4d90: f9407822 ldr x2, [x1, #240] 4d94: d5384100 mrs x0, sp_el0 4d98: eb00005f cmp x2, x0 4d9c: 54fff780 b.eq 4c8c <__cancel_work_timer+0xdc> // b.none 4da0: d4210000 brk #0x800 4da4: 17ffffba b 4c8c <__cancel_work_timer+0xdc> WARN_ON_ONCE(!work_pending(work)); 4da8: d4210000 brk #0x800 4dac: 17ffffa6 b 4c44 <__cancel_work_timer+0x94> schedule(); 4db0: 94000000 bl 0 <schedule> 4db4: 17fffff2 b 4d7c <__cancel_work_timer+0x1cc> } 4db8: 94000000 bl 0 <__stack_chk_fail> 4dbc: d503201f nop 0000000000004dc0 <cancel_work_sync>: { 4dc0: a9bf7bfd stp x29, x30, [sp, #-16]! return __cancel_work_timer(work, false); 4dc4: 52800001 mov w1, #0x0 // #0 { 4dc8: 910003fd mov x29, sp return __cancel_work_timer(work, false); 4dcc: 97ffff79 bl 4bb0 <__cancel_work_timer> } 4dd0: a8c17bfd ldp x29, x30, [sp], #16 4dd4: d65f03c0 ret 0000000000004dd8 <cancel_delayed_work_sync>: { 4dd8: a9bf7bfd stp x29, x30, [sp, #-16]! return __cancel_work_timer(&dwork->work, true); 4ddc: 52800021 mov w1, #0x1 // #1 { 4de0: 910003fd mov x29, sp return __cancel_work_timer(&dwork->work, true); 4de4: 97ffff73 bl 4bb0 <__cancel_work_timer> } 4de8: a8c17bfd ldp x29, x30, [sp], #16 4dec: d65f03c0 ret 0000000000004df0 <rescuer_thread>: { 4df0: d10243ff sub sp, sp, #0x90 set_user_nice(current, RESCUER_NICE_LEVEL); 4df4: 92800261 mov x1, #0xffffffffffffffec // #-20 { 4df8: a9037bfd stp x29, x30, [sp, #48] 4dfc: 9100c3fd add x29, sp, #0x30 4e00: a90453f3 stp x19, x20, [sp, #64] 4e04: a9055bf5 stp x21, x22, [sp, #80] 4e08: a90663f7 stp x23, x24, [sp, #96] 4e0c: 90000018 adrp x24, 0 <pwq_activate_delayed_work> 4e10: 91000318 add x24, x24, #0x0 4e14: a9076bf9 stp x25, x26, [sp, #112] 4e18: 9000001a adrp x26, 0 <pwq_activate_delayed_work> 4e1c: a90873fb stp x27, x28, [sp, #128] 4e20: aa0003fc mov x28, x0 4e24: 90000000 adrp x0, 0 <__stack_chk_guard> 4e28: 91000000 add x0, x0, #0x0 4e2c: f9000fe0 str x0, [sp, #24] 4e30: f9400002 ldr x2, [x0] 4e34: f90017e2 str x2, [sp, #40] 4e38: d2800002 mov x2, #0x0 // #0 struct list_head *scheduled = &rescuer->scheduled; 4e3c: 9100c39b add x27, x28, #0x30 4e40: d5384100 mrs x0, sp_el0 struct workqueue_struct *wq = rescuer->rescue_wq; 4e44: f9404b96 ldr x22, [x28, #144] set_user_nice(current, RESCUER_NICE_LEVEL); 4e48: 94000000 bl 0 <set_user_nice> rescuer->task->flags |= PF_WQ_WORKER; 4e4c: f9402381 ldr x1, [x28, #64] 4e50: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 4e54: 91000000 add x0, x0, #0x0 4e58: f9000be0 str x0, [sp, #16] 4e5c: 91000340 add x0, x26, #0x0 4e60: f90003e0 str x0, [sp] 4e64: b9403420 ldr w0, [x1, #52] 4e68: 910502d7 add x23, x22, #0x140 4e6c: 321b0000 orr w0, w0, #0x20 4e70: b9003420 str w0, [x1, #52] 4e74: d503201f nop case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 4e78: d2808041 mov x1, #0x402 // #1026 4e7c: d5384100 mrs x0, sp_el0 4e80: f9000c01 str x1, [x0, #24] set_current_state(TASK_IDLE); 4e84: f9400be1 ldr x1, [sp, #16] 4e88: f90b8801 str x1, [x0, #5904] 4e8c: d5033bbf dmb ish should_stop = kthread_should_stop(); 4e90: 94000000 bl 0 <kthread_should_stop> 4e94: 12001c00 and w0, w0, #0xff spin_lock_irq(&wq_mayday_lock); 4e98: f94003e1 ldr x1, [sp] should_stop = kthread_should_stop(); 4e9c: b9000fe0 str w0, [sp, #12] spin_lock_irq(&wq_mayday_lock); 4ea0: 910b4035 add x21, x1, #0x2d0 4ea4: aa1503e0 mov x0, x21 4ea8: 94000000 bl 0 <rt_spin_lock> __READ_ONCE_SIZE; 4eac: f940a2c0 ldr x0, [x22, #320] while (!list_empty(&wq->maydays)) { 4eb0: eb0002ff cmp x23, x0 4eb4: 54000d40 b.eq 505c <rescuer_thread+0x26c> // b.none struct pool_workqueue *pwq = list_first_entry(&wq->maydays, 4eb8: f940a2d4 ldr x20, [x22, #320] 4ebc: d5384101 mrs x1, sp_el0 spin_unlock_irq(&wq_mayday_lock); 4ec0: aa1503e0 mov x0, x21 struct pool_workqueue *pwq = list_first_entry(&wq->maydays, 4ec4: d102029a sub x26, x20, #0x80 struct worker_pool *pool = pwq->pool; 4ec8: f8580293 ldur x19, [x20, #-128] __set_current_state(TASK_RUNNING); 4ecc: f9000c3f str xzr, [x1, #24] 4ed0: f90b8838 str x24, [x1, #5904] __list_del(entry->prev, entry->next); 4ed4: a9400682 ldp x2, x1, [x20] next->prev = prev; 4ed8: f9000441 str x1, [x2, #8] case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 4edc: f9000022 str x2, [x1] 4ee0: f9000294 str x20, [x20] list->prev = list; 4ee4: f9000694 str x20, [x20, #8] spin_unlock_irq(&wq_mayday_lock); 4ee8: 94000000 bl 0 <rt_spin_unlock> worker_attach_to_pool(rescuer, pool); 4eec: aa1303e1 mov x1, x19 4ef0: aa1c03e0 mov x0, x28 4ef4: 97fff0e5 bl 1288 <worker_attach_to_pool> spin_lock_irq(&pool->lock); 4ef8: aa1303e0 mov x0, x19 4efc: 94000000 bl 0 <rt_spin_lock> rescuer->pool = pool; 4f00: f9002793 str x19, [x28, #72] __READ_ONCE_SIZE; 4f04: f9401b80 ldr x0, [x28, #48] WARN_ON_ONCE(!list_empty(scheduled)); 4f08: eb00037f cmp x27, x0 4f0c: 54000f81 b.ne 50fc <rescuer_thread+0x30c> // b.any list_for_each_entry_safe(work, n, &pool->worklist, entry) { 4f10: f9408664 ldr x4, [x19, #264] 4f14: 91042279 add x25, x19, #0x108 bool first = true; 4f18: 52800028 mov w8, #0x1 // #1 list_for_each_entry_safe(work, n, &pool->worklist, entry) { 4f1c: eb04033f cmp x25, x4 4f20: f85f8482 ldr x2, [x4], #-8 4f24: d1002042 sub x2, x2, #0x8 4f28: 54000121 b.ne 4f4c <rescuer_thread+0x15c> // b.any 4f2c: 14000035 b 5000 <rescuer_thread+0x210> 4f30: aa0203e0 mov x0, x2 4f34: aa0203e4 mov x4, x2 first = false; 4f38: 52800008 mov w8, #0x0 // #0 list_for_each_entry_safe(work, n, &pool->worklist, entry) { 4f3c: f8408c02 ldr x2, [x0, #8]! 4f40: d1002042 sub x2, x2, #0x8 4f44: eb19001f cmp x0, x25 4f48: 540005c0 b.eq 5000 <rescuer_thread+0x210> // b.none 4f4c: f9400081 ldr x1, [x4] return (void *)(data & WORK_STRUCT_WQ_DATA_MASK); 4f50: 9278dc20 and x0, x1, #0xffffffffffffff00 4f54: f27e003f tst x1, #0x4 4f58: 9a9f1000 csel x0, x0, xzr, ne // ne = any if (get_work_pwq(work) == pwq) { 4f5c: eb00035f cmp x26, x0 4f60: 54fffe81 b.ne 4f30 <rescuer_thread+0x140> // b.any if (first) 4f64: 34000088 cbz w8, 4f74 <rescuer_thread+0x184> pool->watchdog_ts = jiffies; 4f68: 90000000 adrp x0, 0 <jiffies> 4f6c: f9400000 ldr x0, [x0] 4f70: f9008260 str x0, [x19, #256] list_for_each_entry_safe_from(work, n, NULL, entry) { 4f74: aa0403e0 mov x0, x4 4f78: f8408c02 ldr x2, [x0, #8]! 4f7c: d1002041 sub x1, x2, #0x8 4f80: b4000ba0 cbz x0, 50f4 <rescuer_thread+0x304> __list_del(entry->prev, entry->next); 4f84: f9400888 ldr x8, [x4, #16] next->prev = prev; 4f88: f9000448 str x8, [x2, #8] case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 4f8c: f9000102 str x2, [x8] __list_add(new, head->prev, head); 4f90: f9401f82 ldr x2, [x28, #56] next->prev = new; 4f94: f9001f80 str x0, [x28, #56] new->prev = prev; 4f98: a900889b stp x27, x2, [x4, #8] 4f9c: f9000040 str x0, [x2] if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) 4fa0: f9400080 ldr x0, [x4] 4fa4: 37180180 tbnz w0, #3, 4fd4 <rescuer_thread+0x1e4> 4fa8: 14000053 b 50f4 <rescuer_thread+0x304> __list_del(entry->prev, entry->next); 4fac: f9400828 ldr x8, [x1, #16] next->prev = prev; 4fb0: f9000488 str x8, [x4, #8] 4fb4: f9000104 str x4, [x8] __list_add(new, head->prev, head); 4fb8: f9401f84 ldr x4, [x28, #56] next->prev = new; 4fbc: f9001f80 str x0, [x28, #56] new->prev = prev; 4fc0: a900903b stp x27, x4, [x1, #8] 4fc4: f9000080 str x0, [x4] 4fc8: f9400020 ldr x0, [x1] list_for_each_entry_safe_from(work, n, NULL, entry) { 4fcc: aa0203e1 mov x1, x2 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) 4fd0: 361ffb00 tbz w0, #3, 4f30 <rescuer_thread+0x140> list_for_each_entry_safe_from(work, n, NULL, entry) { 4fd4: aa0103e0 mov x0, x1 4fd8: f8408c04 ldr x4, [x0, #8]! 4fdc: d1002082 sub x2, x4, #0x8 4fe0: b5fffe60 cbnz x0, 4fac <rescuer_thread+0x1bc> list_for_each_entry_safe(work, n, &pool->worklist, entry) { 4fe4: aa0203e0 mov x0, x2 4fe8: aa0203e4 mov x4, x2 first = false; 4fec: 52800008 mov w8, #0x0 // #0 list_for_each_entry_safe(work, n, &pool->worklist, entry) { 4ff0: f8408c02 ldr x2, [x0, #8]! 4ff4: d1002042 sub x2, x2, #0x8 4ff8: eb19001f cmp x0, x25 4ffc: 54fffa81 b.ne 4f4c <rescuer_thread+0x15c> // b.any __READ_ONCE_SIZE; 5000: f9401b80 ldr x0, [x28, #48] if (!list_empty(scheduled)) { 5004: eb00037f cmp x27, x0 5008: 54000481 b.ne 5098 <rescuer_thread+0x2a8> // b.any put_pwq(pwq); 500c: aa1a03e0 mov x0, x26 5010: 97fff92a bl 34b8 <put_pwq> 5014: f9408660 ldr x0, [x19, #264] return !list_empty(&pool->worklist) && __need_more_worker(pool); 5018: eb00033f cmp x25, x0 501c: 540000a0 b.eq 5030 <rescuer_thread+0x240> // b.none 5020: b9458260 ldr w0, [x19, #1408] 5024: 35000060 cbnz w0, 5030 <rescuer_thread+0x240> wake_up_worker(pool); 5028: aa1303e0 mov x0, x19 502c: 97ffecff bl 428 <wake_up_worker> rescuer->pool = NULL; 5030: f900279f str xzr, [x28, #72] spin_unlock_irq(&pool->lock); 5034: aa1303e0 mov x0, x19 5038: 94000000 bl 0 <rt_spin_unlock> worker_detach_from_pool(rescuer, pool); 503c: aa1303e1 mov x1, x19 5040: aa1c03e0 mov x0, x28 5044: 97ffefe7 bl fe0 <worker_detach_from_pool> spin_lock_irq(&wq_mayday_lock); 5048: aa1503e0 mov x0, x21 504c: 94000000 bl 0 <rt_spin_lock> 5050: f940a2c0 ldr x0, [x22, #320] while (!list_empty(&wq->maydays)) { 5054: eb0002ff cmp x23, x0 5058: 54fff301 b.ne 4eb8 <rescuer_thread+0xc8> // b.any spin_unlock_irq(&wq_mayday_lock); 505c: f94003e0 ldr x0, [sp] 5060: 910b4000 add x0, x0, #0x2d0 5064: 94000000 bl 0 <rt_spin_unlock> if (should_stop) { 5068: b9400fe0 ldr w0, [sp, #12] 506c: 350005c0 cbnz w0, 5124 <rescuer_thread+0x334> WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING)); 5070: b9406b81 ldr w1, [x28, #104] 5074: 52803900 mov w0, #0x1c8 // #456 5078: 6a00003f tst w1, w0 507c: 54000360 b.eq 50e8 <rescuer_thread+0x2f8> // b.none schedule(); 5080: 94000000 bl 0 <schedule> goto repeat; 5084: 17ffff7d b 4e78 <rescuer_thread+0x88> struct work_struct *work = list_first_entry(&worker->scheduled, 5088: f9401b81 ldr x1, [x28, #48] process_one_work(worker, work); 508c: aa1c03e0 mov x0, x28 5090: d1002021 sub x1, x1, #0x8 5094: 97fffb57 bl 3df0 <process_one_work> 5098: f9401b80 ldr x0, [x28, #48] while (!list_empty(&worker->scheduled)) { 509c: eb00037f cmp x27, x0 50a0: 54ffff41 b.ne 5088 <rescuer_thread+0x298> // b.any 50a4: f9408660 ldr x0, [x19, #264] return !list_empty(&pool->worklist) && __need_more_worker(pool); 50a8: eb00033f cmp x25, x0 50ac: 54fffb00 b.eq 500c <rescuer_thread+0x21c> // b.none 50b0: b9458260 ldr w0, [x19, #1408] 50b4: 35fffac0 cbnz w0, 500c <rescuer_thread+0x21c> return need_more_worker(pool) && !may_start_working(pool); 50b8: b9411e60 ldr w0, [x19, #284] 50bc: 35fffa80 cbnz w0, 500c <rescuer_thread+0x21c> spin_lock(&wq_mayday_lock); 50c0: aa1503e0 mov x0, x21 50c4: 94000000 bl 0 <rt_spin_lock> if (wq->rescuer && list_empty(&pwq->mayday_node)) { 50c8: f940aac0 ldr x0, [x22, #336] 50cc: b4000080 cbz x0, 50dc <rescuer_thread+0x2ec> 50d0: f9400280 ldr x0, [x20] 50d4: eb00029f cmp x20, x0 50d8: 54000160 b.eq 5104 <rescuer_thread+0x314> // b.none spin_unlock(&wq_mayday_lock); 50dc: aa1503e0 mov x0, x21 50e0: 94000000 bl 0 <rt_spin_unlock> 50e4: 17ffffca b 500c <rescuer_thread+0x21c> WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING)); 50e8: d4210000 brk #0x800 schedule(); 50ec: 94000000 bl 0 <schedule> goto repeat; 50f0: 17ffff62 b 4e78 <rescuer_thread+0x88> list_for_each_entry_safe_from(work, n, NULL, entry) { 50f4: aa0103e2 mov x2, x1 50f8: 17ffff8e b 4f30 <rescuer_thread+0x140> WARN_ON_ONCE(!list_empty(scheduled)); 50fc: d4210000 brk #0x800 5100: 17ffff84 b 4f10 <rescuer_thread+0x120> get_pwq(pwq); 5104: aa1a03e0 mov x0, x26 5108: d101a281 sub x1, x20, #0x68 510c: 97fff429 bl 21b0 <get_pwq.isra.3> __list_add(new, head->prev, head); 5110: f940a6c0 ldr x0, [x22, #328] next->prev = new; 5114: f900a6d4 str x20, [x22, #328] new->prev = prev; 5118: a9000297 stp x23, x0, [x20] case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 511c: f9000014 str x20, [x0] WRITE_ONCE(prev->next, new); 5120: 17ffffef b 50dc <rescuer_thread+0x2ec> __set_current_state(TASK_RUNNING); 5124: 90000001 adrp x1, 0 <pwq_activate_delayed_work> 5128: 91000021 add x1, x1, #0x0 512c: d5384102 mrs x2, sp_el0 5130: f90b8841 str x1, [x2, #5904] } 5134: 52800000 mov w0, #0x0 // #0 5138: f9400fe1 ldr x1, [sp, #24] __set_current_state(TASK_RUNNING); 513c: f9000c5f str xzr, [x2, #24] } 5140: f94017e2 ldr x2, [sp, #40] 5144: f9400024 ldr x4, [x1] 5148: ca040044 eor x4, x2, x4 rescuer->task->flags &= ~PF_WQ_WORKER; 514c: f9402382 ldr x2, [x28, #64] 5150: b9403441 ldr w1, [x2, #52] 5154: 121a7821 and w1, w1, #0xffffffdf 5158: b9003441 str w1, [x2, #52] } 515c: b5000124 cbnz x4, 5180 <rescuer_thread+0x390> 5160: a9437bfd ldp x29, x30, [sp, #48] 5164: a94453f3 ldp x19, x20, [sp, #64] 5168: a9455bf5 ldp x21, x22, [sp, #80] 516c: a94663f7 ldp x23, x24, [sp, #96] 5170: a9476bf9 ldp x25, x26, [sp, #112] 5174: a94873fb ldp x27, x28, [sp, #128] 5178: 910243ff add sp, sp, #0x90 517c: d65f03c0 ret 5180: 94000000 bl 0 <__stack_chk_fail> 5184: d503201f nop 0000000000005188 <execute_in_process_context>: { 5188: a9be7bfd stp x29, x30, [sp, #-32]! 518c: d5384102 mrs x2, sp_el0 5190: 910003fd mov x29, sp 5194: a90153f3 stp x19, x20, [sp, #16] 5198: aa0003f4 mov x20, x0 519c: aa0103f3 mov x19, x1 __READ_ONCE_SIZE; 51a0: b9401042 ldr w2, [x2, #16] fn(&ew->work); 51a4: aa0103e0 mov x0, x1 if (!in_interrupt()) { 51a8: 7218305f tst w2, #0x1fff00 51ac: 540000c1 b.ne 51c4 <execute_in_process_context+0x3c> // b.any fn(&ew->work); 51b0: d63f0280 blr x20 return 0; 51b4: 52800000 mov w0, #0x0 // #0 } 51b8: a94153f3 ldp x19, x20, [sp, #16] 51bc: a8c27bfd ldp x29, x30, [sp], #32 51c0: d65f03c0 ret INIT_WORK(&ew->work, fn); 51c4: b27b7be3 mov x3, #0xfffffffe0 // #68719476704 51c8: f8020403 str x3, [x0], #32 51cc: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 51d0: 91000042 add x2, x2, #0x0 51d4: 52800003 mov w3, #0x0 // #0 51d8: 9109c042 add x2, x2, #0x270 51dc: 90000001 adrp x1, 0 <pwq_activate_delayed_work> 51e0: 91000021 add x1, x1, #0x0 51e4: 94000000 bl 0 <lockdep_init_map> return queue_work(system_wq, work); 51e8: 90000001 adrp x1, 0 <pwq_activate_delayed_work> 51ec: 91002260 add x0, x19, #0x8 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 51f0: f9000660 str x0, [x19, #8] return queue_work_on(WORK_CPU_UNBOUND, wq, work); 51f4: aa1303e2 mov x2, x19 51f8: f9400021 ldr x1, [x1] 51fc: a9015260 stp x0, x20, [x19, #16] 5200: 52800800 mov w0, #0x40 // #64 5204: 94000000 bl 3330 <queue_work_on> 5208: 52800020 mov w0, #0x1 // #1 } 520c: a94153f3 ldp x19, x20, [sp, #16] 5210: a8c27bfd ldp x29, x30, [sp], #32 5214: d65f03c0 ret 0000000000005218 <work_on_cpu>: { 5218: d10243ff sub sp, sp, #0x90 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); 521c: 90000004 adrp x4, 0 <pwq_activate_delayed_work> 5220: 91000084 add x4, x4, #0x0 5224: b27b7be6 mov x6, #0xfffffffe0 // #68719476704 5228: 90000005 adrp x5, 0 <pwq_activate_delayed_work> 522c: 52800003 mov w3, #0x0 // #0 { 5230: a9077bfd stp x29, x30, [sp, #112] 5234: 9101c3fd add x29, sp, #0x70 5238: a90853f3 stp x19, x20, [sp, #128] 523c: 90000013 adrp x19, 0 <__stack_chk_guard> 5240: 91000273 add x19, x19, #0x0 5244: 2a0003f4 mov w20, w0 5248: f9400260 ldr x0, [x19] 524c: f90037e0 str x0, [sp, #104] 5250: d2800000 mov x0, #0x0 // #0 struct work_for_cpu wfc = { .fn = fn, .arg = arg }; 5254: a904ffff stp xzr, xzr, [sp, #72] INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); 5258: 910083e0 add x0, sp, #0x20 struct work_for_cpu wfc = { .fn = fn, .arg = arg }; 525c: a905ffff stp xzr, xzr, [sp, #88] INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); 5260: f90003e6 str x6, [sp] struct work_for_cpu wfc = { .fn = fn, .arg = arg }; 5264: a900ffff stp xzr, xzr, [sp, #8] 5268: a901ffff stp xzr, xzr, [sp, #24] 526c: a902ffff stp xzr, xzr, [sp, #40] 5270: a903ffff stp xzr, xzr, [sp, #56] 5274: a9050be1 stp x1, x2, [sp, #80] INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); 5278: 910000a1 add x1, x5, #0x0 527c: 9109e082 add x2, x4, #0x278 5280: 94000000 bl 0 <lockdep_init_map> WRITE_ONCE(list->next, list); 5284: 910023e4 add x4, sp, #0x8 return queue_work_on(cpu, system_wq, work); 5288: 90000001 adrp x1, 0 <pwq_activate_delayed_work> 528c: 90000003 adrp x3, 0 <pwq_activate_delayed_work> 5290: 91000063 add x3, x3, #0x0 5294: 910003e2 mov x2, sp 5298: f9400021 ldr x1, [x1] 529c: 2a1403e0 mov w0, w20 52a0: f90007e4 str x4, [sp, #8] 52a4: a9010fe4 stp x4, x3, [sp, #16] 52a8: 94000000 bl 3330 <queue_work_on> flush_work(&wfc.work); 52ac: 910003e0 mov x0, sp 52b0: 94000000 bl 23e0 <flush_work> } 52b4: f94037e0 ldr x0, [sp, #104] 52b8: f9400261 ldr x1, [x19] 52bc: ca010001 eor x1, x0, x1 52c0: f94033e0 ldr x0, [sp, #96] 52c4: b50000a1 cbnz x1, 52d8 <work_on_cpu+0xc0> 52c8: a9477bfd ldp x29, x30, [sp, #112] 52cc: a94853f3 ldp x19, x20, [sp, #128] 52d0: 910243ff add sp, sp, #0x90 52d4: d65f03c0 ret 52d8: 94000000 bl 0 <__stack_chk_fail> 52dc: d503201f nop 00000000000052e0 <work_on_cpu_safe>: { 52e0: a9bd7bfd stp x29, x30, [sp, #-48]! 52e4: 910003fd mov x29, sp 52e8: a90153f3 stp x19, x20, [sp, #16] 52ec: 2a0003f3 mov w19, w0 52f0: aa0103f4 mov x20, x1 52f4: f90013f5 str x21, [sp, #32] 52f8: aa0203f5 mov x21, x2 static inline void get_online_cpus(void) { cpus_read_lock(); } 52fc: 94000000 bl 0 <cpus_read_lock> 5300: 7100027f cmp w19, #0x0 5304: 1100fe63 add w3, w19, #0x3f 5308: 1a93b063 csel w3, w3, w19, lt // lt = tstop 530c: 90000000 adrp x0, 0 <__cpu_online_mask> 5310: 91000000 add x0, x0, #0x0 5314: 13067c63 asr w3, w3, #6 5318: 93407c63 sxtw x3, w3 531c: f8637800 ldr x0, [x0, x3, lsl #3] 5320: 9ad32400 lsr x0, x0, x19 if (cpu_online(cpu)) 5324: 36000180 tbz w0, #0, 5354 <work_on_cpu_safe+0x74> ret = work_on_cpu(cpu, fn, arg); 5328: 2a1303e0 mov w0, w19 532c: aa1503e2 mov x2, x21 5330: aa1403e1 mov x1, x20 5334: 94000000 bl 5218 <work_on_cpu> 5338: aa0003f3 mov x19, x0 static inline void put_online_cpus(void) { cpus_read_unlock(); } 533c: 94000000 bl 0 <cpus_read_unlock> } 5340: aa1303e0 mov x0, x19 5344: a94153f3 ldp x19, x20, [sp, #16] 5348: f94013f5 ldr x21, [sp, #32] 534c: a8c37bfd ldp x29, x30, [sp], #48 5350: d65f03c0 ret long ret = -ENODEV; 5354: 92800253 mov x19, #0xffffffffffffffed // #-19 5358: 17fffff9 b 533c <work_on_cpu_safe+0x5c> 535c: d503201f nop 0000000000005360 <delayed_work_timer_fn>: __queue_work(dwork->cpu, dwork->wq, &dwork->work); 5360: aa0003e2 mov x2, x0 { 5364: a9bf7bfd stp x29, x30, [sp, #-16]! 5368: 910003fd mov x29, sp __queue_work(dwork->cpu, dwork->wq, &dwork->work); 536c: b940b800 ldr w0, [x0, #184] 5370: f9405841 ldr x1, [x2, #176] 5374: 97fff6d3 bl 2ec0 <__queue_work> } 5378: a8c17bfd ldp x29, x30, [sp], #16 537c: d65f03c0 ret 0000000000005380 <__queue_delayed_work>: { 5380: a9bf7bfd stp x29, x30, [sp, #-16]! struct timer_list *timer = &dwork->timer; 5384: 91014044 add x4, x2, #0x50 { 5388: 910003fd mov x29, sp WARN_ON_ONCE(!wq); 538c: b4000421 cbz x1, 5410 <__queue_delayed_work+0x90> WARN_ON_ONCE(timer->function != delayed_work_timer_fn); 5390: f9400c86 ldr x6, [x4, #24] 5394: 90000005 adrp x5, 5360 <delayed_work_timer_fn> 5398: 910000a5 add x5, x5, #0x0 539c: eb0500df cmp x6, x5 53a0: 540003c1 b.ne 5418 <__queue_delayed_work+0x98> // b.any WARN_ON_ONCE(timer->data != (unsigned long)dwork); 53a4: f9401085 ldr x5, [x4, #32] 53a8: eb05005f cmp x2, x5 53ac: 540003a1 b.ne 5420 <__queue_delayed_work+0xa0> // b.any WARN_ON_ONCE(timer_pending(timer)); 53b0: f9400485 ldr x5, [x4, #8] 53b4: b50003a5 cbnz x5, 5428 <__queue_delayed_work+0xa8> __READ_ONCE_SIZE; 53b8: aa0203e5 mov x5, x2 53bc: f8408ca6 ldr x6, [x5, #8]! WARN_ON_ONCE(!list_empty(&work->entry)); 53c0: eb0600bf cmp x5, x6 53c4: 54000221 b.ne 5408 <__queue_delayed_work+0x88> // b.any if (!delay) { 53c8: b40001a3 cbz x3, 53fc <__queue_delayed_work+0x7c> timer->expires = jiffies + delay; 53cc: 90000005 adrp x5, 0 <jiffies> dwork->wq = wq; 53d0: f9005841 str x1, [x2, #176] dwork->cpu = cpu; 53d4: b900b840 str w0, [x2, #184] if (unlikely(cpu != WORK_CPU_UNBOUND)) 53d8: 7101001f cmp w0, #0x40 timer->expires = jiffies + delay; 53dc: f94000a1 ldr x1, [x5] 53e0: 8b030021 add x1, x1, x3 53e4: f9000881 str x1, [x4, #16] if (unlikely(cpu != WORK_CPU_UNBOUND)) 53e8: 54000241 b.ne 5430 <__queue_delayed_work+0xb0> // b.any add_timer(timer); 53ec: aa0403e0 mov x0, x4 53f0: 94000000 bl 0 <add_timer> } 53f4: a8c17bfd ldp x29, x30, [sp], #16 53f8: d65f03c0 ret __queue_work(cpu, wq, &dwork->work); 53fc: 97fff6b1 bl 2ec0 <__queue_work> } 5400: a8c17bfd ldp x29, x30, [sp], #16 5404: d65f03c0 ret WARN_ON_ONCE(!list_empty(&work->entry)); 5408: d4210000 brk #0x800 540c: 17ffffef b 53c8 <__queue_delayed_work+0x48> WARN_ON_ONCE(!wq); 5410: d4210000 brk #0x800 5414: 17ffffdf b 5390 <__queue_delayed_work+0x10> WARN_ON_ONCE(timer->function != delayed_work_timer_fn); 5418: d4210000 brk #0x800 541c: 17ffffe2 b 53a4 <__queue_delayed_work+0x24> WARN_ON_ONCE(timer->data != (unsigned long)dwork); 5420: d4210000 brk #0x800 5424: 17ffffe3 b 53b0 <__queue_delayed_work+0x30> WARN_ON_ONCE(timer_pending(timer)); 5428: d4210000 brk #0x800 542c: 17ffffe3 b 53b8 <__queue_delayed_work+0x38> add_timer_on(timer, cpu); 5430: 2a0003e1 mov w1, w0 5434: aa0403e0 mov x0, x4 5438: 94000000 bl 0 <add_timer_on> 543c: 17ffffee b 53f4 <__queue_delayed_work+0x74> 0000000000005440 <queue_delayed_work_on>: { 5440: a9bb7bfd stp x29, x30, [sp, #-80]! 5444: 910003fd mov x29, sp 5448: a9025bf5 stp x21, x22, [sp, #32] local_lock_irqsave(pendingb_lock, flags); 544c: 90000015 adrp x21, 0 <__per_cpu_offset> 5450: 910002b5 add x21, x21, #0x0 { 5454: aa0203f6 mov x22, x2 5458: a90153f3 stp x19, x20, [sp, #16] local_lock_irqsave(pendingb_lock, flags); 545c: 90000013 adrp x19, 0 <pwq_activate_delayed_work> { 5460: a90363f7 stp x23, x24, [sp, #48] 5464: aa0103f8 mov x24, x1 5468: 2a0003f7 mov w23, w0 546c: f90023f9 str x25, [sp, #64] 5470: aa0303f9 mov x25, x3 local_lock_irqsave(pendingb_lock, flags); 5474: 94000000 bl 0 <migrate_disable> 5478: 91000273 add x19, x19, #0x0 547c: 94000000 bl 0 <debug_smp_processor_id> 5480: 91002274 add x20, x19, #0x8 5484: f8605aa0 ldr x0, [x21, w0, uxtw #3] 5488: d5384101 mrs x1, sp_el0 548c: 8b000294 add x20, x20, x0 if (lv->owner != current) { 5490: f9407a80 ldr x0, [x20, #240] 5494: eb01001f cmp x0, x1 5498: 540005c0 b.eq 5550 <queue_delayed_work_on+0x110> // b.none spin_lock_irqsave(&lv->lock, lv->flags); 549c: f900829f str xzr, [x20, #256] 54a0: aa1403e0 mov x0, x20 54a4: 94000000 bl 0 <rt_spin_lock> LL_WARN(lv->owner); 54a8: f9407a80 ldr x0, [x20, #240] 54ac: b50008c0 cbnz x0, 55c4 <queue_delayed_work_on+0x184> LL_WARN(lv->nestcnt); 54b0: b940fa80 ldr w0, [x20, #248] 54b4: 35000840 cbnz w0, 55bc <queue_delayed_work_on+0x17c> 54b8: d5384101 mrs x1, sp_el0 lv->nestcnt = 1; 54bc: 52800020 mov w0, #0x1 // #1 lv->owner = current; 54c0: f9007a81 str x1, [x20, #240] lv->nestcnt = 1; 54c4: b900fa80 str w0, [x20, #248] 54c8: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 54cc: 91000000 add x0, x0, #0x0 bool ret = false; 54d0: 52800014 mov w20, #0x0 // #0 local_lock_irqsave(pendingb_lock, flags); 54d4: 94000000 bl 0 <__this_cpu_preempt_check> if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 54d8: aa1603e1 mov x1, x22 54dc: 52800000 mov w0, #0x0 // #0 54e0: 94000000 bl 0 <test_and_set_bit> 54e4: 340004e0 cbz w0, 5580 <queue_delayed_work_on+0x140> local_unlock_irqrestore(pendingb_lock, flags); 54e8: 91002261 add x1, x19, #0x8 54ec: aa0103f3 mov x19, x1 54f0: 94000000 bl 0 <debug_smp_processor_id> 54f4: f8605aa1 ldr x1, [x21, w0, uxtw #3] 54f8: 8b010261 add x1, x19, x1 LL_WARN(!lv->nestcnt); 54fc: b940f820 ldr w0, [x1, #248] 5500: 340005a0 cbz w0, 55b4 <queue_delayed_work_on+0x174> LL_WARN(lv->owner != current); 5504: f9407822 ldr x2, [x1, #240] 5508: d5384100 mrs x0, sp_el0 550c: eb00005f cmp x2, x0 5510: 540005e1 b.ne 55cc <queue_delayed_work_on+0x18c> // b.any if (--lv->nestcnt) 5514: b940f820 ldr w0, [x1, #248] 5518: 51000400 sub w0, w0, #0x1 551c: b900f820 str w0, [x1, #248] 5520: 350000a0 cbnz w0, 5534 <queue_delayed_work_on+0xf4> spin_unlock_irqrestore(&lv->lock, lv->flags); 5524: aa0103e0 mov x0, x1 lv->owner = NULL; 5528: f900783f str xzr, [x1, #240] spin_unlock_irqrestore(&lv->lock, lv->flags); 552c: 94000000 bl 0 <rt_spin_unlock> 5530: 94000000 bl 0 <migrate_enable> } 5534: 2a1403e0 mov w0, w20 5538: a94153f3 ldp x19, x20, [sp, #16] 553c: a9425bf5 ldp x21, x22, [sp, #32] 5540: a94363f7 ldp x23, x24, [sp, #48] 5544: f94023f9 ldr x25, [sp, #64] 5548: a8c57bfd ldp x29, x30, [sp], #80 554c: d65f03c0 ret lv->nestcnt++; 5550: b940fa80 ldr w0, [x20, #248] 5554: 11000400 add w0, w0, #0x1 5558: b900fa80 str w0, [x20, #248] bool ret = false; 555c: 52800014 mov w20, #0x0 // #0 local_lock_irqsave(pendingb_lock, flags); 5560: 94000000 bl 0 <migrate_enable> 5564: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 5568: 91000000 add x0, x0, #0x0 556c: 94000000 bl 0 <__this_cpu_preempt_check> if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 5570: aa1603e1 mov x1, x22 5574: 52800000 mov w0, #0x0 // #0 5578: 94000000 bl 0 <test_and_set_bit> 557c: 35fffb60 cbnz w0, 54e8 <queue_delayed_work_on+0xa8> __queue_delayed_work(cpu, wq, dwork, delay); 5580: aa1903e3 mov x3, x25 5584: aa1603e2 mov x2, x22 5588: aa1803e1 mov x1, x24 558c: 2a1703e0 mov w0, w23 5590: 97ffff7c bl 5380 <__queue_delayed_work> ret = true; 5594: 52800034 mov w20, #0x1 // #1 local_unlock_irqrestore(pendingb_lock, flags); 5598: 91002261 add x1, x19, #0x8 559c: aa0103f3 mov x19, x1 55a0: 94000000 bl 0 <debug_smp_processor_id> 55a4: f8605aa1 ldr x1, [x21, w0, uxtw #3] 55a8: 8b010261 add x1, x19, x1 LL_WARN(!lv->nestcnt); 55ac: b940f820 ldr w0, [x1, #248] 55b0: 35fffaa0 cbnz w0, 5504 <queue_delayed_work_on+0xc4> 55b4: d4210000 brk #0x800 55b8: 17ffffd3 b 5504 <queue_delayed_work_on+0xc4> LL_WARN(lv->nestcnt); 55bc: d4210000 brk #0x800 55c0: 17ffffbe b 54b8 <queue_delayed_work_on+0x78> LL_WARN(lv->owner); 55c4: d4210000 brk #0x800 55c8: 17ffffba b 54b0 <queue_delayed_work_on+0x70> LL_WARN(lv->owner != current); 55cc: d4210000 brk #0x800 55d0: 17ffffd1 b 5514 <queue_delayed_work_on+0xd4> 55d4: d503201f nop 00000000000055d8 <mod_delayed_work_on>: { 55d8: d10143ff sub sp, sp, #0x50 55dc: a9017bfd stp x29, x30, [sp, #16] 55e0: 910043fd add x29, sp, #0x10 55e4: a90253f3 stp x19, x20, [sp, #32] 55e8: 90000014 adrp x20, 0 <__stack_chk_guard> 55ec: 91000294 add x20, x20, #0x0 55f0: aa0203f3 mov x19, x2 55f4: a9035bf5 stp x21, x22, [sp, #48] 55f8: 2a0003f6 mov w22, w0 55fc: f9400280 ldr x0, [x20] 5600: f90007e0 str x0, [sp, #8] 5604: d2800000 mov x0, #0x0 // #0 5608: a90463f7 stp x23, x24, [sp, #64] 560c: aa0103f7 mov x23, x1 5610: aa0303f8 mov x24, x3 ret = try_to_grab_pending(&dwork->work, true, &flags); 5614: 910003e2 mov x2, sp 5618: 52800021 mov w1, #0x1 // #1 561c: aa1303e0 mov x0, x19 5620: 97fffc30 bl 46e0 <try_to_grab_pending> 5624: 2a0003f5 mov w21, w0 } while (unlikely(ret == -EAGAIN)); 5628: 31002c1f cmn w0, #0xb 562c: 54ffff40 b.eq 5614 <mod_delayed_work_on+0x3c> // b.none if (likely(ret >= 0)) { 5630: 37f80320 tbnz w0, #31, 5694 <mod_delayed_work_on+0xbc> __queue_delayed_work(cpu, wq, dwork, delay); 5634: aa1303e2 mov x2, x19 5638: aa1803e3 mov x3, x24 563c: aa1703e1 mov x1, x23 5640: 2a1603e0 mov w0, w22 5644: 97ffff4f bl 5380 <__queue_delayed_work> local_unlock_irqrestore(pendingb_lock, flags); 5648: 90000001 adrp x1, 0 <pwq_activate_delayed_work> 564c: 91000021 add x1, x1, #0x0 5650: 91002021 add x1, x1, #0x8 5654: aa0103f3 mov x19, x1 5658: 94000000 bl 0 <debug_smp_processor_id> 565c: 90000001 adrp x1, 0 <__per_cpu_offset> 5660: 91000021 add x1, x1, #0x0 5664: f8605821 ldr x1, [x1, w0, uxtw #3] 5668: 8b010261 add x1, x19, x1 LL_WARN(!lv->nestcnt); 566c: b940f820 ldr w0, [x1, #248] 5670: 34000380 cbz w0, 56e0 <mod_delayed_work_on+0x108> LL_WARN(lv->owner != current); 5674: f9407822 ldr x2, [x1, #240] 5678: d5384100 mrs x0, sp_el0 567c: eb00005f cmp x2, x0 5680: 540002c1 b.ne 56d8 <mod_delayed_work_on+0x100> // b.any if (--lv->nestcnt) 5684: b940f820 ldr w0, [x1, #248] 5688: 51000400 sub w0, w0, #0x1 568c: b900f820 str w0, [x1, #248] 5690: 340001a0 cbz w0, 56c4 <mod_delayed_work_on+0xec> return ret; 5694: 710002bf cmp w21, #0x0 } 5698: f94007e0 ldr x0, [sp, #8] 569c: f9400281 ldr x1, [x20] 56a0: ca010001 eor x1, x0, x1 56a4: 1a9f07e0 cset w0, ne // ne = any 56a8: b5000201 cbnz x1, 56e8 <mod_delayed_work_on+0x110> 56ac: a9417bfd ldp x29, x30, [sp, #16] 56b0: a94253f3 ldp x19, x20, [sp, #32] 56b4: a9435bf5 ldp x21, x22, [sp, #48] 56b8: a94463f7 ldp x23, x24, [sp, #64] 56bc: 910143ff add sp, sp, #0x50 56c0: d65f03c0 ret spin_unlock_irqrestore(&lv->lock, lv->flags); 56c4: aa0103e0 mov x0, x1 lv->owner = NULL; 56c8: f900783f str xzr, [x1, #240] spin_unlock_irqrestore(&lv->lock, lv->flags); 56cc: 94000000 bl 0 <rt_spin_unlock> local_unlock_irqrestore(pendingb_lock, flags); 56d0: 94000000 bl 0 <migrate_enable> 56d4: 17fffff0 b 5694 <mod_delayed_work_on+0xbc> LL_WARN(lv->owner != current); 56d8: d4210000 brk #0x800 56dc: 17ffffea b 5684 <mod_delayed_work_on+0xac> LL_WARN(!lv->nestcnt); 56e0: d4210000 brk #0x800 56e4: 17ffffe4 b 5674 <mod_delayed_work_on+0x9c> } 56e8: 94000000 bl 0 <__stack_chk_fail> 56ec: d503201f nop 00000000000056f0 <flush_delayed_work>: { 56f0: a9bd7bfd stp x29, x30, [sp, #-48]! 56f4: 910003fd mov x29, sp 56f8: a9025bf5 stp x21, x22, [sp, #32] local_lock_irq(pendingb_lock); 56fc: 90000015 adrp x21, 0 <__per_cpu_offset> 5700: 910002b5 add x21, x21, #0x0 { 5704: aa0003f6 mov x22, x0 5708: a90153f3 stp x19, x20, [sp, #16] local_lock_irq(pendingb_lock); 570c: 94000000 bl 0 <migrate_disable> 5710: 94000000 bl 0 <debug_smp_processor_id> 5714: 90000013 adrp x19, 0 <pwq_activate_delayed_work> 5718: f8605aa0 ldr x0, [x21, w0, uxtw #3] 571c: 91000273 add x19, x19, #0x0 5720: 91002274 add x20, x19, #0x8 5724: 8b000294 add x20, x20, x0 spin_lock_irqsave(&lv->lock, lv->flags); 5728: aa1403e0 mov x0, x20 572c: f900829f str xzr, [x20, #256] 5730: 94000000 bl 0 <rt_spin_lock> LL_WARN(lv->owner); 5734: f9407a80 ldr x0, [x20, #240] 5738: b5000620 cbnz x0, 57fc <flush_delayed_work+0x10c> LL_WARN(lv->nestcnt); 573c: b940fa80 ldr w0, [x20, #248] 5740: 35000560 cbnz w0, 57ec <flush_delayed_work+0xfc> lv->nestcnt = 1; 5744: 52800020 mov w0, #0x1 // #1 5748: b900fa80 str w0, [x20, #248] 574c: d5384101 mrs x1, sp_el0 lv->owner = current; 5750: f9007a81 str x1, [x20, #240] if (del_timer_sync(&dwork->timer)) 5754: 910142c0 add x0, x22, #0x50 5758: 94000000 bl 0 <del_timer_sync> 575c: 350002e0 cbnz w0, 57b8 <flush_delayed_work+0xc8> local_unlock_irq(pendingb_lock); 5760: 91002261 add x1, x19, #0x8 5764: aa0103f3 mov x19, x1 5768: 94000000 bl 0 <debug_smp_processor_id> 576c: f8605aa1 ldr x1, [x21, w0, uxtw #3] 5770: 8b010261 add x1, x19, x1 LL_WARN(!lv->nestcnt); 5774: b940f820 ldr w0, [x1, #248] 5778: 34000360 cbz w0, 57e4 <flush_delayed_work+0xf4> LL_WARN(lv->owner != current); 577c: f9407822 ldr x2, [x1, #240] 5780: d5384100 mrs x0, sp_el0 5784: eb00005f cmp x2, x0 5788: 54000361 b.ne 57f4 <flush_delayed_work+0x104> // b.any spin_unlock_irq(&lv->lock); 578c: aa0103e0 mov x0, x1 lv->owner = NULL; 5790: f900783f str xzr, [x1, #240] lv->nestcnt = 0; 5794: b900f83f str wzr, [x1, #248] spin_unlock_irq(&lv->lock); 5798: 94000000 bl 0 <rt_spin_unlock> 579c: 94000000 bl 0 <migrate_enable> return flush_work(&dwork->work); 57a0: aa1603e0 mov x0, x22 57a4: 94000000 bl 23e0 <flush_work> } 57a8: a94153f3 ldp x19, x20, [sp, #16] 57ac: a9425bf5 ldp x21, x22, [sp, #32] 57b0: a8c37bfd ldp x29, x30, [sp], #48 57b4: d65f03c0 ret __queue_work(dwork->cpu, dwork->wq, &dwork->work); 57b8: b940bac0 ldr w0, [x22, #184] 57bc: aa1603e2 mov x2, x22 57c0: f9405ac1 ldr x1, [x22, #176] 57c4: 97fff5bf bl 2ec0 <__queue_work> local_unlock_irq(pendingb_lock); 57c8: 91002261 add x1, x19, #0x8 57cc: aa0103f3 mov x19, x1 57d0: 94000000 bl 0 <debug_smp_processor_id> 57d4: f8605aa1 ldr x1, [x21, w0, uxtw #3] 57d8: 8b010261 add x1, x19, x1 LL_WARN(!lv->nestcnt); 57dc: b940f820 ldr w0, [x1, #248] 57e0: 35fffce0 cbnz w0, 577c <flush_delayed_work+0x8c> 57e4: d4210000 brk #0x800 57e8: 17ffffe5 b 577c <flush_delayed_work+0x8c> LL_WARN(lv->nestcnt); 57ec: d4210000 brk #0x800 57f0: 17ffffd5 b 5744 <flush_delayed_work+0x54> LL_WARN(lv->owner != current); 57f4: d4210000 brk #0x800 57f8: 17ffffe5 b 578c <flush_delayed_work+0x9c> LL_WARN(lv->owner); 57fc: d4210000 brk #0x800 5800: 17ffffcf b 573c <flush_delayed_work+0x4c> 5804: d503201f nop 0000000000005808 <workqueue_congested>: { 5808: a9bd7bfd stp x29, x30, [sp, #-48]! 580c: 910003fd mov x29, sp 5810: a90153f3 stp x19, x20, [sp, #16] 5814: aa0103f4 mov x20, x1 lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_); 5818: 90000013 adrp x19, 0 <rcu_lock_map> 581c: 91000273 add x19, x19, #0x0 5820: a9025bf5 stp x21, x22, [sp, #32] 5824: 2a0003f5 mov w21, w0 __rcu_read_lock(); 5828: 94000000 bl 0 <__rcu_read_lock> lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_); 582c: 90000006 adrp x6, 0 <pwq_activate_delayed_work> 5830: aa1303e0 mov x0, x19 5834: 910000c6 add x6, x6, #0x0 5838: d2800005 mov x5, #0x0 // #0 583c: 52800004 mov w4, #0x0 // #0 5840: 52800043 mov w3, #0x2 // #2 5844: 52800002 mov w2, #0x0 // #0 5848: 52800001 mov w1, #0x0 // #0 584c: 94000000 bl 0 <lock_acquire> RCU_LOCKDEP_WARN(!rcu_is_watching(), 5850: 94000000 bl 0 <debug_lockdep_rcu_enabled> 5854: 340000a0 cbz w0, 5868 <workqueue_congested+0x60> 5858: 90000016 adrp x22, 0 <pwq_activate_delayed_work> 585c: 910002d6 add x22, x22, #0x0 5860: 39401ac0 ldrb w0, [x22, #6] 5864: 340008c0 cbz w0, 597c <workqueue_congested+0x174> preempt_disable(); 5868: 52800020 mov w0, #0x1 // #1 586c: 94000000 bl 0 <preempt_count_add> if (cpu == WORK_CPU_UNBOUND) 5870: 710102bf cmp w21, #0x40 5874: 54000660 b.eq 5940 <workqueue_congested+0x138> // b.none if (!(wq->flags & WQ_UNBOUND)) 5878: b9420280 ldr w0, [x20, #512] 587c: 370803e0 tbnz w0, #1, 58f8 <workqueue_congested+0xf0> pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); 5880: 90000001 adrp x1, 0 <__per_cpu_offset> 5884: 91000021 add x1, x1, #0x0 5888: f9410680 ldr x0, [x20, #520] 588c: f875d821 ldr x1, [x1, w21, sxtw #3] 5890: 8b010000 add x0, x0, x1 5894: f8460c01 ldr x1, [x0, #96]! ret = !list_empty(&pwq->delayed_works); 5898: eb01001f cmp x0, x1 589c: 1a9f07f5 cset w21, ne // ne = any preempt_enable(); 58a0: 52800020 mov w0, #0x1 // #1 58a4: 94000000 bl 0 <preempt_count_sub> 58a8: d5384100 mrs x0, sp_el0 58ac: b9401001 ldr w1, [x0, #16] 58b0: 34000381 cbz w1, 5920 <workqueue_congested+0x118> RCU_LOCKDEP_WARN(!rcu_is_watching(), 58b4: 94000000 bl 0 <debug_lockdep_rcu_enabled> 58b8: 340000a0 cbz w0, 58cc <workqueue_congested+0xc4> 58bc: 90000014 adrp x20, 0 <pwq_activate_delayed_work> 58c0: 91000294 add x20, x20, #0x0 58c4: 39401e80 ldrb w0, [x20, #7] 58c8: 34000420 cbz w0, 594c <workqueue_congested+0x144> __rcu_read_unlock(); 58cc: 94000000 bl 0 <__rcu_read_unlock> lock_release(map, 1, _THIS_IP_); 58d0: aa1303e0 mov x0, x19 58d4: 52800021 mov w1, #0x1 // #1 58d8: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 58dc: 91000042 add x2, x2, #0x0 58e0: 94000000 bl 0 <lock_release> } 58e4: 2a1503e0 mov w0, w21 58e8: a94153f3 ldp x19, x20, [sp, #16] 58ec: a9425bf5 ldp x21, x22, [sp, #32] 58f0: a8c37bfd ldp x29, x30, [sp], #48 58f4: d65f03c0 ret pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); 58f8: aa1403e0 mov x0, x20 58fc: 97fff4f3 bl 2cc8 <unbound_pwq_by_node.constprop.15> 5900: f8460c01 ldr x1, [x0, #96]! ret = !list_empty(&pwq->delayed_works); 5904: eb01001f cmp x0, x1 5908: 1a9f07f5 cset w21, ne // ne = any preempt_enable(); 590c: 52800020 mov w0, #0x1 // #1 5910: 94000000 bl 0 <preempt_count_sub> 5914: d5384100 mrs x0, sp_el0 5918: b9401001 ldr w1, [x0, #16] 591c: 35fffcc1 cbnz w1, 58b4 <workqueue_congested+0xac> 5920: f9400001 ldr x1, [x0] 5924: 721f003f tst w1, #0x2 5928: 54000081 b.ne 5938 <workqueue_congested+0x130> // b.any 592c: f9400000 ldr x0, [x0] 5930: 721a001f tst w0, #0x40 5934: 54fffc00 b.eq 58b4 <workqueue_congested+0xac> // b.none 5938: 94000000 bl 0 <preempt_schedule> 593c: 17ffffde b 58b4 <workqueue_congested+0xac> cpu = smp_processor_id(); 5940: 94000000 bl 0 <debug_smp_processor_id> 5944: 2a0003f5 mov w21, w0 5948: 17ffffcc b 5878 <workqueue_congested+0x70> RCU_LOCKDEP_WARN(!rcu_is_watching(), 594c: 94000000 bl 0 <rcu_is_watching> 5950: 72001c1f tst w0, #0xff 5954: 54fffbc1 b.ne 58cc <workqueue_congested+0xc4> // b.any 5958: 52800023 mov w3, #0x1 // #1 595c: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 5960: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 5964: 91000042 add x2, x2, #0x0 5968: 91000000 add x0, x0, #0x0 596c: 528051a1 mov w1, #0x28d // #653 5970: 39001e83 strb w3, [x20, #7] 5974: 94000000 bl 0 <lockdep_rcu_suspicious> 5978: 17ffffd5 b 58cc <workqueue_congested+0xc4> RCU_LOCKDEP_WARN(!rcu_is_watching(), 597c: 94000000 bl 0 <rcu_is_watching> 5980: 72001c1f tst w0, #0xff 5984: 54fff721 b.ne 5868 <workqueue_congested+0x60> // b.any 5988: 52800023 mov w3, #0x1 // #1 598c: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 5990: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 5994: 91000042 add x2, x2, #0x0 5998: 91000000 add x0, x0, #0x0 599c: 52804b41 mov w1, #0x25a // #602 59a0: 39001ac3 strb w3, [x22, #6] 59a4: 94000000 bl 0 <lockdep_rcu_suspicious> 59a8: 17ffffb0 b 5868 <workqueue_congested+0x60> 59ac: d503201f nop 00000000000059b0 <wq_worker_running>: { 59b0: a9bf7bfd stp x29, x30, [sp, #-16]! 59b4: 910003fd mov x29, sp struct worker *worker = kthread_data(task); 59b8: 94000000 bl 0 <kthread_data> if (!worker->sleeping) 59bc: b9407001 ldr w1, [x0, #112] 59c0: 340000c1 cbz w1, 59d8 <wq_worker_running+0x28> if (!(worker->flags & WORKER_NOT_RUNNING)) 59c4: b9406802 ldr w2, [x0, #104] 59c8: 52803901 mov w1, #0x1c8 // #456 59cc: 6a01005f tst w2, w1 59d0: 54000080 b.eq 59e0 <wq_worker_running+0x30> // b.none worker->sleeping = 0; 59d4: b900701f str wzr, [x0, #112] } 59d8: a8c17bfd ldp x29, x30, [sp], #16 59dc: d65f03c0 ret atomic_inc(&worker->pool->nr_running); 59e0: f9402401 ldr x1, [x0, #72] 59e4: 91160021 add x1, x1, #0x580 59e8: f9800031 prfm pstl1strm, [x1] 59ec: 885f7c22 ldxr w2, [x1] 59f0: 11000442 add w2, w2, #0x1 59f4: 88037c22 stxr w3, w2, [x1] 59f8: 35ffffa3 cbnz w3, 59ec <wq_worker_running+0x3c> 59fc: 17fffff6 b 59d4 <wq_worker_running+0x24> 0000000000005a00 <wq_worker_sleeping>: { 5a00: a9bf7bfd stp x29, x30, [sp, #-16]! 5a04: 910003fd mov x29, sp struct worker *worker = kthread_data(task); 5a08: 94000000 bl 0 <kthread_data> if (worker->flags & WORKER_NOT_RUNNING) 5a0c: b9406802 ldr w2, [x0, #104] 5a10: 52803901 mov w1, #0x1c8 // #456 5a14: 6a01005f tst w2, w1 5a18: 54000281 b.ne 5a68 <wq_worker_sleeping+0x68> // b.any if (WARN_ON_ONCE(worker->sleeping)) 5a1c: b9407002 ldr w2, [x0, #112] pool = worker->pool; 5a20: f9402401 ldr x1, [x0, #72] if (WARN_ON_ONCE(worker->sleeping)) 5a24: 35000262 cbnz w2, 5a70 <wq_worker_sleeping+0x70> worker->sleeping = 1; 5a28: 52800022 mov w2, #0x1 // #1 ATOMIC_OPS(sub, sub) 5a2c: 91160023 add x3, x1, #0x580 5a30: b9007002 str w2, [x0, #112] 5a34: f9800071 prfm pstl1strm, [x3] 5a38: 885f7c60 ldxr w0, [x3] 5a3c: 51000400 sub w0, w0, #0x1 5a40: 8802fc60 stlxr w2, w0, [x3] 5a44: 35ffffa2 cbnz w2, 5a38 <wq_worker_sleeping+0x38> 5a48: d5033bbf dmb ish if (atomic_dec_and_test(&pool->nr_running) && 5a4c: 350000e0 cbnz w0, 5a68 <wq_worker_sleeping+0x68> 5a50: f9408420 ldr x0, [x1, #264] !list_empty(&pool->worklist)) { 5a54: 91042022 add x2, x1, #0x108 if (atomic_dec_and_test(&pool->nr_running) && 5a58: eb00005f cmp x2, x0 5a5c: 54000060 b.eq 5a68 <wq_worker_sleeping+0x68> // b.none wake_up_worker(pool); 5a60: aa0103e0 mov x0, x1 5a64: 97ffea71 bl 428 <wake_up_worker> } 5a68: a8c17bfd ldp x29, x30, [sp], #16 5a6c: d65f03c0 ret if (WARN_ON_ONCE(worker->sleeping)) 5a70: d4210000 brk #0x800 5a74: 17fffffd b 5a68 <wq_worker_sleeping+0x68> 0000000000005a78 <wq_worker_last_func>: { 5a78: a9bf7bfd stp x29, x30, [sp, #-16]! 5a7c: 910003fd mov x29, sp struct worker *worker = kthread_data(task); 5a80: 94000000 bl 0 <kthread_data> } 5a84: a8c17bfd ldp x29, x30, [sp], #16 5a88: f9404c00 ldr x0, [x0, #152] 5a8c: d65f03c0 ret 0000000000005a90 <cancel_work>: { 5a90: a9bf7bfd stp x29, x30, [sp, #-16]! 5a94: 52800001 mov w1, #0x0 // #0 5a98: 910003fd mov x29, sp 5a9c: 97fffbf3 bl 4a68 <__cancel_work.part.10> } 5aa0: a8c17bfd ldp x29, x30, [sp], #16 5aa4: d65f03c0 ret 0000000000005aa8 <schedule_on_each_cpu>: { 5aa8: a9bb7bfd stp x29, x30, [sp, #-80]! works = alloc_percpu(struct work_struct); 5aac: d2800101 mov x1, #0x8 // #8 { 5ab0: 910003fd mov x29, sp 5ab4: a9046bf9 stp x25, x26, [sp, #64] 5ab8: aa0003fa mov x26, x0 works = alloc_percpu(struct work_struct); 5abc: d2800a00 mov x0, #0x50 // #80 5ac0: 94000000 bl 0 <__alloc_percpu> if (!works) 5ac4: b4000880 cbz x0, 5bd4 <schedule_on_each_cpu+0x12c> 5ac8: a90363f7 stp x23, x24, [sp, #48] INIT_WORK(work, func); 5acc: 90000018 adrp x24, 0 <pwq_activate_delayed_work> 5ad0: 91000318 add x24, x24, #0x0 5ad4: 90000017 adrp x23, 0 <nr_cpu_ids> struct work_struct *work = per_cpu_ptr(works, cpu); 5ad8: 90000019 adrp x25, 0 <__per_cpu_offset> INIT_WORK(work, func); 5adc: 910a0318 add x24, x24, #0x280 5ae0: 910002f7 add x23, x23, #0x0 struct work_struct *work = per_cpu_ptr(works, cpu); 5ae4: 91000339 add x25, x25, #0x0 5ae8: a90153f3 stp x19, x20, [sp, #16] for_each_online_cpu(cpu) { 5aec: 12800014 mov w20, #0xffffffff // #-1 5af0: a9025bf5 stp x21, x22, [sp, #32] 5af4: aa0003f5 mov x21, x0 static inline void get_online_cpus(void) { cpus_read_lock(); } 5af8: 90000016 adrp x22, 0 <__cpu_online_mask> 5afc: 94000000 bl 0 <cpus_read_lock> 5b00: 910002d6 add x22, x22, #0x0 5b04: 1400000e b 5b3c <schedule_on_each_cpu+0x94> struct work_struct *work = per_cpu_ptr(works, cpu); 5b08: f874db24 ldr x4, [x25, w20, sxtw #3] 5b0c: 8b040013 add x19, x0, x4 INIT_WORK(work, func); 5b10: f8246805 str x5, [x0, x4] 5b14: 91008260 add x0, x19, #0x20 5b18: 94000000 bl 0 <lockdep_init_map> 5b1c: 90000001 adrp x1, 0 <pwq_activate_delayed_work> 5b20: 91002260 add x0, x19, #0x8 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 5b24: f9000660 str x0, [x19, #8] 5b28: aa1303e2 mov x2, x19 5b2c: f9400021 ldr x1, [x1] 5b30: a9016a60 stp x0, x26, [x19, #16] 5b34: 2a1403e0 mov w0, w20 5b38: 94000000 bl 3330 <queue_work_on> for_each_online_cpu(cpu) { 5b3c: 2a1403e0 mov w0, w20 5b40: aa1603e1 mov x1, x22 5b44: 94000000 bl 0 <cpumask_next> 5b48: 2a0003f4 mov w20, w0 5b4c: b94002e4 ldr w4, [x23] INIT_WORK(work, func); 5b50: 90000001 adrp x1, 0 <pwq_activate_delayed_work> struct work_struct *work = per_cpu_ptr(works, cpu); 5b54: aa1503e0 mov x0, x21 INIT_WORK(work, func); 5b58: b27b7be5 mov x5, #0xfffffffe0 // #68719476704 5b5c: aa1803e2 mov x2, x24 5b60: 52800003 mov w3, #0x0 // #0 5b64: 91000021 add x1, x1, #0x0 for_each_online_cpu(cpu) { 5b68: 6b04029f cmp w20, w4 5b6c: 54fffce3 b.cc 5b08 <schedule_on_each_cpu+0x60> // b.lo, b.ul, b.last flush_work(per_cpu_ptr(works, cpu)); 5b70: 90000014 adrp x20, 0 <__per_cpu_offset> for_each_online_cpu(cpu) 5b74: 12800013 mov w19, #0xffffffff // #-1 flush_work(per_cpu_ptr(works, cpu)); 5b78: 91000294 add x20, x20, #0x0 5b7c: 14000004 b 5b8c <schedule_on_each_cpu+0xe4> 5b80: f873da81 ldr x1, [x20, w19, sxtw #3] 5b84: 8b010000 add x0, x0, x1 5b88: 94000000 bl 23e0 <flush_work> for_each_online_cpu(cpu) 5b8c: 2a1303e0 mov w0, w19 5b90: aa1603e1 mov x1, x22 5b94: 94000000 bl 0 <cpumask_next> 5b98: 2a0003f3 mov w19, w0 5b9c: b94002e1 ldr w1, [x23] flush_work(per_cpu_ptr(works, cpu)); 5ba0: aa1503e0 mov x0, x21 for_each_online_cpu(cpu) 5ba4: 6b01027f cmp w19, w1 5ba8: 54fffec3 b.cc 5b80 <schedule_on_each_cpu+0xd8> // b.lo, b.ul, b.last static inline void put_online_cpus(void) { cpus_read_unlock(); } 5bac: 94000000 bl 0 <cpus_read_unlock> free_percpu(works); 5bb0: aa1503e0 mov x0, x21 5bb4: 94000000 bl 0 <free_percpu> return 0; 5bb8: a94153f3 ldp x19, x20, [sp, #16] 5bbc: 52800000 mov w0, #0x0 // #0 5bc0: a9425bf5 ldp x21, x22, [sp, #32] 5bc4: a94363f7 ldp x23, x24, [sp, #48] } 5bc8: a9446bf9 ldp x25, x26, [sp, #64] 5bcc: a8c57bfd ldp x29, x30, [sp], #80 5bd0: d65f03c0 ret return -ENOMEM; 5bd4: 12800160 mov w0, #0xfffffff4 // #-12 5bd8: 17fffffc b 5bc8 <schedule_on_each_cpu+0x120> 5bdc: d503201f nop 0000000000005be0 <free_workqueue_attrs>: if (attrs) { 5be0: b40000c0 cbz x0, 5bf8 <free_workqueue_attrs+0x18> { 5be4: a9bf7bfd stp x29, x30, [sp, #-16]! 5be8: 910003fd mov x29, sp kfree(attrs); 5bec: 94000000 bl 0 <kfree> } 5bf0: a8c17bfd ldp x29, x30, [sp], #16 5bf4: d65f03c0 ret 5bf8: d65f03c0 ret 5bfc: d503201f nop 0000000000005c00 <alloc_workqueue_attrs>: { 5c00: a9bf7bfd stp x29, x30, [sp, #-16]! 5c04: 52800222 mov w2, #0x11 // #17 * @size: how many bytes of memory are required. * @flags: the type of memory to allocate (see kmalloc). */ static inline void *kzalloc(size_t size, gfp_t flags) { return kmalloc(size, flags | __GFP_ZERO); 5c08: 32110001 orr w1, w0, #0x8000 5c0c: 910003fd mov x29, sp if (likely((flags & (__GFP_DMA | __GFP_RECLAIMABLE)) == 0)) 5c10: 6a020002 ands w2, w0, w2 5c14: 540001e1 b.ne 5c50 <alloc_workqueue_attrs+0x50> // b.any return kmem_cache_alloc_trace( 5c18: 2a0203e0 mov w0, w2 5c1c: 90000003 adrp x3, 0 <kmalloc_caches> 5c20: 91000063 add x3, x3, #0x0 5c24: d37df002 lsl x2, x0, #3 5c28: cb000040 sub x0, x2, x0 5c2c: 8b001060 add x0, x3, x0, lsl #4 void *ret = kmem_cache_alloc(s, flags); 5c30: f9401c00 ldr x0, [x0, #56] 5c34: 94000000 bl 0 <kmem_cache_alloc> if (!attrs) 5c38: b4000080 cbz x0, 5c48 <alloc_workqueue_attrs+0x48> *dst = *src; 5c3c: 90000001 adrp x1, 0 <__cpu_possible_mask> 5c40: f9400021 ldr x1, [x1] 5c44: f9000401 str x1, [x0, #8] } 5c48: a8c17bfd ldp x29, x30, [sp], #16 5c4c: d65f03c0 ret return flags & __GFP_DMA ? KMALLOC_DMA : KMALLOC_RECLAIM; 5c50: 12000000 and w0, w0, #0x1 5c54: 11000402 add w2, w0, #0x1 5c58: 17fffff0 b 5c18 <alloc_workqueue_attrs+0x18> 5c5c: d503201f nop 0000000000005c60 <apply_workqueue_attrs>: { 5c60: a9bd7bfd stp x29, x30, [sp, #-48]! 5c64: 910003fd mov x29, sp 5c68: a90153f3 stp x19, x20, [sp, #16] 5c6c: aa0003f4 mov x20, x0 mutex_lock(&wq_pool_mutex); 5c70: 90000013 adrp x19, 0 <pwq_activate_delayed_work> 5c74: 91000273 add x19, x19, #0x0 { 5c78: f90013f5 str x21, [sp, #32] 5c7c: aa0103f5 mov x21, x1 static inline void get_online_cpus(void) { cpus_read_lock(); } 5c80: 94000000 bl 0 <cpus_read_lock> mutex_lock(&wq_pool_mutex); 5c84: aa1303e0 mov x0, x19 5c88: 94000000 bl 0 <_mutex_lock> ret = apply_workqueue_attrs_locked(wq, attrs); 5c8c: aa1503e1 mov x1, x21 5c90: aa1403e0 mov x0, x20 5c94: 97fff72b bl 3940 <apply_workqueue_attrs_locked> 5c98: 2a0003f4 mov w20, w0 mutex_unlock(&wq_pool_mutex); 5c9c: aa1303e0 mov x0, x19 5ca0: 94000000 bl 0 <_mutex_unlock> static inline void put_online_cpus(void) { cpus_read_unlock(); } 5ca4: 94000000 bl 0 <cpus_read_unlock> } 5ca8: 2a1403e0 mov w0, w20 5cac: a94153f3 ldp x19, x20, [sp, #16] 5cb0: f94013f5 ldr x21, [sp, #32] 5cb4: a8c37bfd ldp x29, x30, [sp], #48 5cb8: d65f03c0 ret 5cbc: d503201f nop 0000000000005cc0 <current_is_workqueue_rescuer>: { 5cc0: a9bf7bfd stp x29, x30, [sp, #-16]! 5cc4: d5384100 mrs x0, sp_el0 5cc8: 910003fd mov x29, sp __READ_ONCE_SIZE; 5ccc: b9401001 ldr w1, [x0, #16] if (in_task() && (current->flags & PF_WQ_WORKER)) 5cd0: 12183021 and w1, w1, #0x1fff00 5cd4: 12106021 and w1, w1, #0xffff01ff 5cd8: 35000061 cbnz w1, 5ce4 <current_is_workqueue_rescuer+0x24> 5cdc: b9403401 ldr w1, [x0, #52] 5ce0: 37280081 tbnz w1, #5, 5cf0 <current_is_workqueue_rescuer+0x30> 5ce4: 52800000 mov w0, #0x0 // #0 } 5ce8: a8c17bfd ldp x29, x30, [sp], #16 5cec: d65f03c0 ret return kthread_data(current); 5cf0: 94000000 bl 0 <kthread_data> return worker && worker->rescue_wq; 5cf4: b4ffff80 cbz x0, 5ce4 <current_is_workqueue_rescuer+0x24> 5cf8: f9404800 ldr x0, [x0, #144] } 5cfc: a8c17bfd ldp x29, x30, [sp], #16 return worker && worker->rescue_wq; 5d00: f100001f cmp x0, #0x0 5d04: 1a9f07e0 cset w0, ne // ne = any } 5d08: d65f03c0 ret 5d0c: d503201f nop 0000000000005d10 <set_worker_desc>: { 5d10: d10303ff sub sp, sp, #0xc0 5d14: a9057bfd stp x29, x30, [sp, #80] 5d18: 910143fd add x29, sp, #0x50 5d1c: a90653f3 stp x19, x20, [sp, #96] 5d20: 90000013 adrp x19, 0 <__stack_chk_guard> 5d24: 91000273 add x19, x19, #0x0 5d28: f9400268 ldr x8, [x19] 5d2c: f90027e8 str x8, [sp, #72] 5d30: d2800008 mov x8, #0x0 // #0 5d34: a9088be1 stp x1, x2, [sp, #136] 5d38: aa0003f4 mov x20, x0 5d3c: a90993e3 stp x3, x4, [sp, #152] 5d40: d5384100 mrs x0, sp_el0 5d44: a90a9be5 stp x5, x6, [sp, #168] 5d48: f9005fe7 str x7, [sp, #184] 5d4c: b9401001 ldr w1, [x0, #16] if (in_task() && (current->flags & PF_WQ_WORKER)) 5d50: 12183021 and w1, w1, #0x1fff00 5d54: 12106021 and w1, w1, #0xffff01ff 5d58: 35000061 cbnz w1, 5d64 <set_worker_desc+0x54> 5d5c: b9403401 ldr w1, [x0, #52] 5d60: 37280121 tbnz w1, #5, 5d84 <set_worker_desc+0x74> } 5d64: f94027e1 ldr x1, [sp, #72] 5d68: f9400260 ldr x0, [x19] 5d6c: ca000020 eor x0, x1, x0 5d70: b50003e0 cbnz x0, 5dec <set_worker_desc+0xdc> 5d74: a9457bfd ldp x29, x30, [sp, #80] 5d78: a94653f3 ldp x19, x20, [sp, #96] 5d7c: 910303ff add sp, sp, #0xc0 5d80: d65f03c0 ret return (struct task_struct *)sp_el0; 5d84: f9003bf5 str x21, [sp, #112] return kthread_data(current); 5d88: 94000000 bl 0 <kthread_data> 5d8c: aa0003f5 mov x21, x0 if (worker) { 5d90: b40002a0 cbz x0, 5de4 <set_worker_desc+0xd4> va_start(args, fmt); 5d94: 910303e2 add x2, sp, #0xc0 5d98: 910303e3 add x3, sp, #0xc0 5d9c: a9028fe2 stp x2, x3, [sp, #40] 5da0: 910203e1 add x1, sp, #0x80 5da4: 128006e0 mov w0, #0xffffffc8 // #-56 5da8: f9001fe1 str x1, [sp, #56] vsnprintf(worker->desc, sizeof(worker->desc), fmt, args); 5dac: aa1403e2 mov x2, x20 va_start(args, fmt); 5db0: 29087fe0 stp w0, wzr, [sp, #64] vsnprintf(worker->desc, sizeof(worker->desc), fmt, args); 5db4: 910003e3 mov x3, sp 5db8: a94297e4 ldp x4, x5, [sp, #40] 5dbc: a90017e4 stp x4, x5, [sp] 5dc0: 9101d2a0 add x0, x21, #0x74 5dc4: a94397e4 ldp x4, x5, [sp, #56] 5dc8: d2800301 mov x1, #0x18 // #24 5dcc: a90117e4 stp x4, x5, [sp, #16] 5dd0: 94000000 bl 0 <vsnprintf> worker->desc_valid = true; 5dd4: 52800020 mov w0, #0x1 // #1 5dd8: 3900a2a0 strb w0, [x21, #40] 5ddc: f9403bf5 ldr x21, [sp, #112] } 5de0: 17ffffe1 b 5d64 <set_worker_desc+0x54> 5de4: f9403bf5 ldr x21, [sp, #112] 5de8: 17ffffdf b 5d64 <set_worker_desc+0x54> 5dec: f9003bf5 str x21, [sp, #112] 5df0: 94000000 bl 0 <__stack_chk_fail> 5df4: d503201f nop 0000000000005df8 <print_worker_info>: { 5df8: d10243ff sub sp, sp, #0x90 5dfc: a9067bfd stp x29, x30, [sp, #96] 5e00: 910183fd add x29, sp, #0x60 5e04: a90753f3 stp x19, x20, [sp, #112] 5e08: 90000013 adrp x19, 0 <__stack_chk_guard> 5e0c: 91000273 add x19, x19, #0x0 5e10: f9400262 ldr x2, [x19] 5e14: f9002fe2 str x2, [sp, #88] 5e18: d2800002 mov x2, #0x0 // #0 bool desc_valid = false; 5e1c: 39003fff strb wzr, [sp, #15] if (!(task->flags & PF_WQ_WORKER)) 5e20: b9403422 ldr w2, [x1, #52] struct pool_workqueue *pwq = NULL; 5e24: a9017fff stp xzr, xzr, [sp, #16] struct workqueue_struct *wq = NULL; 5e28: f90013ff str xzr, [sp, #32] char name[WQ_NAME_LEN] = { }; 5e2c: a902ffff stp xzr, xzr, [sp, #40] 5e30: f9001fff str xzr, [sp, #56] char desc[WORKER_DESC_LEN] = { }; 5e34: a9047fff stp xzr, xzr, [sp, #64] 5e38: f9002bff str xzr, [sp, #80] if (!(task->flags & PF_WQ_WORKER)) 5e3c: 37280122 tbnz w2, #5, 5e60 <print_worker_info+0x68> } 5e40: f9402fe1 ldr x1, [sp, #88] 5e44: f9400260 ldr x0, [x19] 5e48: ca000020 eor x0, x1, x0 5e4c: b5000820 cbnz x0, 5f50 <print_worker_info+0x158> 5e50: a9467bfd ldp x29, x30, [sp, #96] 5e54: a94753f3 ldp x19, x20, [sp, #112] 5e58: 910243ff add sp, sp, #0x90 5e5c: d65f03c0 ret worker = kthread_probe_data(task); 5e60: f90043f5 str x21, [sp, #128] 5e64: aa0003f5 mov x21, x0 5e68: aa0103e0 mov x0, x1 5e6c: 94000000 bl 0 <kthread_probe_data> probe_kernel_read(&fn, &worker->current_func, sizeof(fn)); 5e70: d2800102 mov x2, #0x8 // #8 worker = kthread_probe_data(task); 5e74: aa0003f4 mov x20, x0 probe_kernel_read(&fn, &worker->current_func, sizeof(fn)); 5e78: 91006001 add x1, x0, #0x18 5e7c: 910043e0 add x0, sp, #0x10 5e80: 94000000 bl 0 <probe_kernel_read> probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq)); 5e84: 910063e0 add x0, sp, #0x18 5e88: 91008281 add x1, x20, #0x20 5e8c: d2800102 mov x2, #0x8 // #8 5e90: 94000000 bl 0 <probe_kernel_read> probe_kernel_read(&wq, &pwq->wq, sizeof(wq)); 5e94: 910083e0 add x0, sp, #0x20 5e98: f9400fe1 ldr x1, [sp, #24] 5e9c: d2800102 mov x2, #0x8 // #8 5ea0: 8b020021 add x1, x1, x2 5ea4: 94000000 bl 0 <probe_kernel_read> probe_kernel_read(name, wq->name, sizeof(name) - 1); 5ea8: f94013e1 ldr x1, [sp, #32] 5eac: d28002e2 mov x2, #0x17 // #23 5eb0: 9100a3e0 add x0, sp, #0x28 5eb4: 9106a021 add x1, x1, #0x1a8 5eb8: 94000000 bl 0 <probe_kernel_read> probe_kernel_read(&desc_valid, &worker->desc_valid, sizeof(desc_valid)); 5ebc: 9100a281 add x1, x20, #0x28 5ec0: d2800022 mov x2, #0x1 // #1 5ec4: 91003fe0 add x0, sp, #0xf 5ec8: 94000000 bl 0 <probe_kernel_read> if (desc_valid) 5ecc: 39403fe0 ldrb w0, [sp, #15] 5ed0: 35000140 cbnz w0, 5ef8 <print_worker_info+0x100> if (fn || name[0] || desc[0]) { 5ed4: f9400be3 ldr x3, [sp, #16] 5ed8: b50001c3 cbnz x3, 5f10 <print_worker_info+0x118> 5edc: 3940a3e0 ldrb w0, [sp, #40] 5ee0: 394103e1 ldrb w1, [sp, #64] 5ee4: 2a010000 orr w0, w0, w1 5ee8: 12001c00 and w0, w0, #0xff 5eec: 35000120 cbnz w0, 5f10 <print_worker_info+0x118> 5ef0: f94043f5 ldr x21, [sp, #128] 5ef4: 17ffffd3 b 5e40 <print_worker_info+0x48> probe_kernel_read(desc, worker->desc, sizeof(desc) - 1); 5ef8: 9101d281 add x1, x20, #0x74 5efc: d28002e2 mov x2, #0x17 // #23 5f00: 910103e0 add x0, sp, #0x40 5f04: 94000000 bl 0 <probe_kernel_read> if (fn || name[0] || desc[0]) { 5f08: f9400be3 ldr x3, [sp, #16] 5f0c: b4fffe83 cbz x3, 5edc <print_worker_info+0xe4> printk("%sWorkqueue: %s %pf", log_lvl, name, fn); 5f10: 9100a3e2 add x2, sp, #0x28 5f14: aa1503e1 mov x1, x21 5f18: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 5f1c: 91000000 add x0, x0, #0x0 5f20: 94000000 bl 0 <printk> if (desc[0]) 5f24: 394103e0 ldrb w0, [sp, #64] 5f28: 340000a0 cbz w0, 5f3c <print_worker_info+0x144> pr_cont(" (%s)", desc); 5f2c: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 5f30: 910103e1 add x1, sp, #0x40 5f34: 91000000 add x0, x0, #0x0 5f38: 94000000 bl 0 <printk> pr_cont("\n"); 5f3c: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 5f40: 91000000 add x0, x0, #0x0 5f44: 94000000 bl 0 <printk> 5f48: f94043f5 ldr x21, [sp, #128] 5f4c: 17ffffbd b 5e40 <print_worker_info+0x48> 5f50: f90043f5 str x21, [sp, #128] } 5f54: 94000000 bl 0 <__stack_chk_fail> 0000000000005f58 <show_workqueue_state>: { 5f58: d10243ff sub sp, sp, #0x90 5f5c: a9037bfd stp x29, x30, [sp, #48] 5f60: 9100c3fd add x29, sp, #0x30 5f64: a90453f3 stp x19, x20, [sp, #64] 5f68: 90000013 adrp x19, 0 <__stack_chk_guard> 5f6c: 91000273 add x19, x19, #0x0 5f70: f9400260 ldr x0, [x19] 5f74: f90017e0 str x0, [sp, #40] 5f78: d2800000 mov x0, #0x0 // #0 lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_); 5f7c: 90000014 adrp x20, 0 <rcu_lock_map> 5f80: 91000294 add x20, x20, #0x0 5f84: a9055bf5 stp x21, x22, [sp, #80] 5f88: a90663f7 stp x23, x24, [sp, #96] 5f8c: a9076bf9 stp x25, x26, [sp, #112] __rcu_read_lock(); 5f90: 94000000 bl 0 <__rcu_read_lock> lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_); 5f94: aa1403e0 mov x0, x20 5f98: 90000006 adrp x6, 0 <pwq_activate_delayed_work> 5f9c: 910000c6 add x6, x6, #0x0 5fa0: d2800005 mov x5, #0x0 // #0 5fa4: 52800004 mov w4, #0x0 // #0 5fa8: 52800043 mov w3, #0x2 // #2 5fac: 52800002 mov w2, #0x0 // #0 5fb0: 52800001 mov w1, #0x0 // #0 5fb4: 94000000 bl 0 <lock_acquire> RCU_LOCKDEP_WARN(!rcu_is_watching(), 5fb8: 94000000 bl 0 <debug_lockdep_rcu_enabled> 5fbc: 34000200 cbz w0, 5ffc <show_workqueue_state+0xa4> 5fc0: 90000015 adrp x21, 0 <pwq_activate_delayed_work> 5fc4: 910002b5 add x21, x21, #0x0 5fc8: 39401aa0 ldrb w0, [x21, #6] 5fcc: 35000180 cbnz w0, 5ffc <show_workqueue_state+0xa4> 5fd0: 94000000 bl 0 <rcu_is_watching> 5fd4: 72001c1f tst w0, #0xff 5fd8: 54000121 b.ne 5ffc <show_workqueue_state+0xa4> // b.any 5fdc: 52800023 mov w3, #0x1 // #1 5fe0: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 5fe4: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 5fe8: 91000042 add x2, x2, #0x0 5fec: 91000000 add x0, x0, #0x0 5ff0: 52804b41 mov w1, #0x25a // #602 5ff4: 39001aa3 strb w3, [x21, #6] 5ff8: 94000000 bl 0 <lockdep_rcu_suspicious> 5ffc: 90000018 adrp x24, 0 <pwq_activate_delayed_work> 6000: 91000318 add x24, x24, #0x0 pr_info("Showing busy workqueues and worker pools:\n"); 6004: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 6008: 91000000 add x0, x0, #0x0 600c: 94000000 bl 0 <printk> pr_cont(" active=%d/%d refcnt=%d%s\n", 6010: 90000019 adrp x25, 0 <pwq_activate_delayed_work> 6014: f9426316 ldr x22, [x24, #1216] 6018: 91000339 add x25, x25, #0x0 list_for_each_entry_rcu(wq, &workqueues, list) { 601c: d10042d6 sub x22, x22, #0x10 6020: 910042c0 add x0, x22, #0x10 6024: 91130301 add x1, x24, #0x4c0 6028: eb01001f cmp x0, x1 602c: 54001220 b.eq 6270 <show_workqueue_state+0x318> // b.none 6030: aa1603fa mov x26, x22 6034: a90873fb stp x27, x28, [sp, #128] for_each_pwq(pwq, wq) { 6038: 90000017 adrp x23, 0 <pwq_activate_delayed_work> 603c: 9000001b adrp x27, 0 <pwq_activate_delayed_work> 6040: 910002f7 add x23, x23, #0x0 6044: f84d8755 ldr x21, [x26], #216 6048: 9100037b add x27, x27, #0x0 604c: d101c2b5 sub x21, x21, #0x70 6050: 9101c2a0 add x0, x21, #0x70 6054: eb0002df cmp x22, x0 6058: 540015e0 b.eq 6314 <show_workqueue_state+0x3bc> // b.none 605c: 94000000 bl 0 <debug_lockdep_rcu_enabled> 6060: 34000200 cbz w0, 60a0 <show_workqueue_state+0x148> 6064: 39402ae0 ldrb w0, [x23, #10] 6068: 350001c0 cbnz w0, 60a0 <show_workqueue_state+0x148> 606c: 94000000 bl 0 <rcu_read_lock_held> 6070: 35000180 cbnz w0, 60a0 <show_workqueue_state+0x148> return lock_is_held_type(lock, -1); 6074: 12800001 mov w1, #0xffffffff // #-1 6078: aa1a03e0 mov x0, x26 607c: 94000000 bl 0 <lock_is_held_type> 6080: 35000100 cbnz w0, 60a0 <show_workqueue_state+0x148> 6084: 52800023 mov w3, #0x1 // #1 6088: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 608c: aa1b03e2 mov x2, x27 6090: 91000000 add x0, x0, #0x0 6094: 52823c01 mov w1, #0x11e0 // #4576 6098: 39002ae3 strb w3, [x23, #10] 609c: 94000000 bl 0 <lockdep_rcu_suspicious> if (pwq->nr_active || !list_empty(&pwq->delayed_works)) { 60a0: b9405aa0 ldr w0, [x21, #88] 60a4: 34001e60 cbz w0, 6470 <show_workqueue_state+0x518> pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags); 60a8: b94202c2 ldr w2, [x22, #512] 60ac: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 60b0: 9106a2c1 add x1, x22, #0x1a8 60b4: 91000000 add x0, x0, #0x0 60b8: 94000000 bl 0 <printk> 60bc: f94002d5 ldr x21, [x22] for_each_pwq(pwq, wq) { 60c0: d101c2b5 sub x21, x21, #0x70 60c4: 9101c2a0 add x0, x21, #0x70 60c8: eb0002df cmp x22, x0 60cc: 54001240 b.eq 6314 <show_workqueue_state+0x3bc> // b.none 60d0: 94000000 bl 0 <debug_lockdep_rcu_enabled> 60d4: 34000260 cbz w0, 6120 <show_workqueue_state+0x1c8> 60d8: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 60dc: 91000017 add x23, x0, #0x0 60e0: 39402ee0 ldrb w0, [x23, #11] 60e4: 350001e0 cbnz w0, 6120 <show_workqueue_state+0x1c8> 60e8: 94000000 bl 0 <rcu_read_lock_held> 60ec: 350001a0 cbnz w0, 6120 <show_workqueue_state+0x1c8> 60f0: 12800001 mov w1, #0xffffffff // #-1 60f4: 910362c0 add x0, x22, #0xd8 60f8: 94000000 bl 0 <lock_is_held_type> 60fc: 35000120 cbnz w0, 6120 <show_workqueue_state+0x1c8> 6100: 52800023 mov w3, #0x1 // #1 6104: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 6108: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 610c: 91000042 add x2, x2, #0x0 6110: 91000000 add x0, x0, #0x0 6114: 52823d61 mov w1, #0x11eb // #4587 6118: 39002ee3 strb w3, [x23, #11] 611c: 94000000 bl 0 <lockdep_rcu_suspicious> spin_lock_irqsave(&pwq->pool->lock, flags); 6120: aa1503fa mov x26, x21 6124: f8460740 ldr x0, [x26], #96 6128: 94000000 bl 0 <rt_spin_lock> if (pwq->nr_active || !list_empty(&pwq->delayed_works)) 612c: b9405aa0 ldr w0, [x21, #88] 6130: 34001ae0 cbz w0, 648c <show_workqueue_state+0x534> struct worker_pool *pool = pwq->pool; 6134: f94002b7 ldr x23, [x21] pr_info(" pwq %d:", pool->id); 6138: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 613c: 91000000 add x0, x0, #0x0 6140: b940fae1 ldr w1, [x23, #248] 6144: 94000000 bl 0 <printk> pr_cont_pool_info(pool); 6148: aa1703e0 mov x0, x23 614c: 94000000 bl 0 <pwq_activate_delayed_work> 6150: aa1503e5 mov x5, x21 pr_cont(" active=%d/%d refcnt=%d%s\n", 6154: b9401aa3 ldr w3, [x21, #24] 6158: 294b0aa1 ldp w1, w2, [x21, #88] 615c: 90000004 adrp x4, 0 <pwq_activate_delayed_work> 6160: f8480ca6 ldr x6, [x5, #128]! 6164: 91000084 add x4, x4, #0x0 6168: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 616c: 91000000 add x0, x0, #0x0 6170: eb0600bf cmp x5, x6 6174: 9a991084 csel x4, x4, x25, ne // ne = any 6178: 94000000 bl 0 <printk> hash_for_each(pool->busy_hash, bkt, worker, hentry) { 617c: 9107a2e2 add x2, x23, #0x1e8 pr_cont(" active=%d/%d refcnt=%d%s\n", 6180: d2800021 mov x1, #0x1 // #1 hash_for_each(pool->busy_hash, bkt, worker, hentry) { 6184: f8617840 ldr x0, [x2, x1, lsl #3] 6188: b4001980 cbz x0, 64b8 <show_workqueue_state+0x560> if (worker->current_pwq == pwq) { 618c: f9401003 ldr x3, [x0, #32] 6190: eb0302bf cmp x21, x3 6194: 540018e1 b.ne 64b0 <show_workqueue_state+0x558> // b.any pr_info(" in-flight:"); 6198: 910fc2e1 add x1, x23, #0x3f0 619c: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 61a0: 91000000 add x0, x0, #0x0 61a4: f90003e1 str x1, [sp] 61a8: 9107c2fb add x27, x23, #0x1f0 61ac: 94000000 bl 0 <printk> pr_cont("%s %d%s:%pf", comma ? "," : "", 61b0: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 61b4: 91000000 add x0, x0, #0x0 61b8: f90007e0 str x0, [sp, #8] bool comma = false; 61bc: 52800000 mov w0, #0x0 // #0 hash_for_each(pool->busy_hash, bkt, worker, hentry) { 61c0: f940037c ldr x28, [x27] 61c4: b40000dc cbz x28, 61dc <show_workqueue_state+0x284> if (worker->current_pwq != pwq) 61c8: f9401381 ldr x1, [x28, #32] 61cc: eb0102bf cmp x21, x1 61d0: 54001b00 b.eq 6530 <show_workqueue_state+0x5d8> // b.none hash_for_each(pool->busy_hash, bkt, worker, hentry) { 61d4: f940039c ldr x28, [x28] 61d8: b5ffff9c cbnz x28, 61c8 <show_workqueue_state+0x270> 61dc: f94003e1 ldr x1, [sp] 61e0: 9100237b add x27, x27, #0x8 61e4: eb1b003f cmp x1, x27 61e8: 54fffec1 b.ne 61c0 <show_workqueue_state+0x268> // b.any pr_cont("\n"); 61ec: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 61f0: 91000000 add x0, x0, #0x0 61f4: 94000000 bl 0 <printk> list_for_each_entry(work, &pool->worklist, entry) { 61f8: f94086e0 ldr x0, [x23, #264] 61fc: 910422fb add x27, x23, #0x108 6200: d1002000 sub x0, x0, #0x8 6204: 91002001 add x1, x0, #0x8 6208: eb01037f cmp x27, x1 620c: 54001640 b.eq 64d4 <show_workqueue_state+0x57c> // b.none 6210: f9400002 ldr x2, [x0] return (void *)(data & WORK_STRUCT_WQ_DATA_MASK); 6214: 9278dc41 and x1, x2, #0xffffffffffffff00 6218: f27e005f tst x2, #0x4 621c: 9a9f1021 csel x1, x1, xzr, ne // ne = any if (get_work_pwq(work) == pwq) { 6220: eb0102bf cmp x21, x1 6224: 54001801 b.ne 6524 <show_workqueue_state+0x5cc> // b.any pr_info(" pending:"); 6228: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 622c: 91000000 add x0, x0, #0x0 6230: 94000000 bl 0 <printk> list_for_each_entry(work, &pool->worklist, entry) { 6234: f94086f7 ldr x23, [x23, #264] bool comma = false; 6238: 52800000 mov w0, #0x0 // #0 list_for_each_entry(work, &pool->worklist, entry) { 623c: d10022f7 sub x23, x23, #0x8 6240: 910022e1 add x1, x23, #0x8 6244: eb01037f cmp x27, x1 6248: 54001400 b.eq 64c8 <show_workqueue_state+0x570> // b.none 624c: f94002e2 ldr x2, [x23] return (void *)(data & WORK_STRUCT_WQ_DATA_MASK); 6250: 9278dc41 and x1, x2, #0xffffffffffffff00 6254: f27e005f tst x2, #0x4 6258: 9a9f1021 csel x1, x1, xzr, ne // ne = any if (get_work_pwq(work) != pwq) 625c: eb0102bf cmp x21, x1 6260: 54001b00 b.eq 65c0 <show_workqueue_state+0x668> // b.none list_for_each_entry(work, &pool->worklist, entry) { 6264: f94006f7 ldr x23, [x23, #8] 6268: d10022f7 sub x23, x23, #0x8 626c: 17fffff5 b 6240 <show_workqueue_state+0x2e8> for_each_pool(pool, pi) { 6270: 90000016 adrp x22, 0 <pwq_activate_delayed_work> pr_cont(" %s%d", first ? "idle: " : "", 6274: 90000019 adrp x25, 0 <pwq_activate_delayed_work> for_each_pool(pool, pi) { 6278: 910002d6 add x22, x22, #0x0 pr_cont(" %s%d", first ? "idle: " : "", 627c: 91000339 add x25, x25, #0x0 for_each_pool(pool, pi) { 6280: b90027ff str wzr, [sp, #36] 6284: 910093e1 add x1, sp, #0x24 6288: 9103a300 add x0, x24, #0xe8 628c: 94000000 bl 0 <idr_get_next> 6290: aa0003f5 mov x21, x0 6294: b4000480 cbz x0, 6324 <show_workqueue_state+0x3cc> 6298: 94000000 bl 0 <debug_lockdep_rcu_enabled> 629c: 34000220 cbz w0, 62e0 <show_workqueue_state+0x388> 62a0: 394032c0 ldrb w0, [x22, #12] 62a4: 350001e0 cbnz w0, 62e0 <show_workqueue_state+0x388> 62a8: 94000000 bl 0 <rcu_read_lock_held> 62ac: 350001a0 cbnz w0, 62e0 <show_workqueue_state+0x388> 62b0: 12800001 mov w1, #0xffffffff // #-1 62b4: 9102e300 add x0, x24, #0xb8 62b8: 94000000 bl 0 <lock_is_held_type> 62bc: 35000120 cbnz w0, 62e0 <show_workqueue_state+0x388> 62c0: 52800023 mov w3, #0x1 // #1 62c4: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 62c8: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 62cc: 91000042 add x2, x2, #0x0 62d0: 91000000 add x0, x0, #0x0 62d4: 52823f21 mov w1, #0x11f9 // #4601 62d8: 390032c3 strb w3, [x22, #12] 62dc: 94000000 bl 0 <lockdep_rcu_suspicious> spin_lock_irqsave(&pool->lock, flags); 62e0: aa1503e0 mov x0, x21 62e4: 94000000 bl 0 <rt_spin_lock> if (pool->nr_workers == pool->nr_idle) 62e8: b9411aa1 ldr w1, [x21, #280] 62ec: b9411ea0 ldr w0, [x21, #284] 62f0: 6b00003f cmp w1, w0 62f4: 54000601 b.ne 63b4 <show_workqueue_state+0x45c> // b.any spin_unlock_irqrestore(&pool->lock, flags); 62f8: aa1503e0 mov x0, x21 62fc: 94000000 bl 0 <rt_spin_unlock> * disables interrupts for a long time. This call is stateless. */ static inline void touch_nmi_watchdog(void) { arch_touch_nmi_watchdog(); touch_softlockup_watchdog(); 6300: 94000000 bl 0 <touch_softlockup_watchdog> for_each_pool(pool, pi) { 6304: b94027e0 ldr w0, [sp, #36] 6308: 11000400 add w0, w0, #0x1 630c: b90027e0 str w0, [sp, #36] 6310: 17ffffdd b 6284 <show_workqueue_state+0x32c> 6314: f9400ad6 ldr x22, [x22, #16] list_for_each_entry_rcu(wq, &workqueues, list) { 6318: a94873fb ldp x27, x28, [sp, #128] 631c: d10042d6 sub x22, x22, #0x10 6320: 17ffff40 b 6020 <show_workqueue_state+0xc8> RCU_LOCKDEP_WARN(!rcu_is_watching(), 6324: 94000000 bl 0 <debug_lockdep_rcu_enabled> 6328: 34000200 cbz w0, 6368 <show_workqueue_state+0x410> 632c: 90000015 adrp x21, 0 <pwq_activate_delayed_work> 6330: 910002b5 add x21, x21, #0x0 6334: 39401ea0 ldrb w0, [x21, #7] 6338: 35000180 cbnz w0, 6368 <show_workqueue_state+0x410> 633c: 94000000 bl 0 <rcu_is_watching> 6340: 72001c1f tst w0, #0xff 6344: 54000121 b.ne 6368 <show_workqueue_state+0x410> // b.any 6348: 52800023 mov w3, #0x1 // #1 634c: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 6350: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 6354: 91000042 add x2, x2, #0x0 6358: 91000000 add x0, x0, #0x0 635c: 528051a1 mov w1, #0x28d // #653 6360: 39001ea3 strb w3, [x21, #7] 6364: 94000000 bl 0 <lockdep_rcu_suspicious> __rcu_read_unlock(); 6368: 94000000 bl 0 <__rcu_read_unlock> lock_release(map, 1, _THIS_IP_); 636c: aa1403e0 mov x0, x20 6370: 52800021 mov w1, #0x1 // #1 6374: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 6378: 91000042 add x2, x2, #0x0 637c: 94000000 bl 0 <lock_release> } 6380: f94017e1 ldr x1, [sp, #40] 6384: f9400260 ldr x0, [x19] 6388: ca000020 eor x0, x1, x0 638c: b5000100 cbnz x0, 63ac <show_workqueue_state+0x454> 6390: a9437bfd ldp x29, x30, [sp, #48] 6394: a94453f3 ldp x19, x20, [sp, #64] 6398: a9455bf5 ldp x21, x22, [sp, #80] 639c: a94663f7 ldp x23, x24, [sp, #96] 63a0: a9476bf9 ldp x25, x26, [sp, #112] 63a4: 910243ff add sp, sp, #0x90 63a8: d65f03c0 ret 63ac: a90873fb stp x27, x28, [sp, #128] 63b0: 94000000 bl 0 <__stack_chk_fail> pr_info("pool %d:", pool->id); 63b4: a90873fb stp x27, x28, [sp, #128] 63b8: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 63bc: 91000000 add x0, x0, #0x0 63c0: b940faa1 ldr w1, [x21, #248] 63c4: 94000000 bl 0 <printk> pr_cont_pool_info(pool); 63c8: aa1503e0 mov x0, x21 63cc: 94000000 bl 0 <pwq_activate_delayed_work> pr_cont(" hung=%us workers=%d", 63d0: 90000001 adrp x1, 0 <jiffies> 63d4: f94082a0 ldr x0, [x21, #256] 63d8: f9400021 ldr x1, [x1] 63dc: cb000020 sub x0, x1, x0 63e0: 94000000 bl 0 <jiffies_to_msecs> 63e4: 52807d01 mov w1, #0x3e8 // #1000 63e8: b9411aa2 ldr w2, [x21, #280] 63ec: 90000003 adrp x3, 0 <pwq_activate_delayed_work> 63f0: 1ac10801 udiv w1, w0, w1 63f4: 91000060 add x0, x3, #0x0 63f8: 94000000 bl 0 <printk> if (pool->manager) 63fc: f941faa0 ldr x0, [x21, #1008] 6400: b40000c0 cbz x0, 6418 <show_workqueue_state+0x4c0> return tsk->pid; 6404: f9402001 ldr x1, [x0, #64] pr_cont(" manager: %d", 6408: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 640c: 91000000 add x0, x0, #0x0 6410: b945a821 ldr w1, [x1, #1448] 6414: 94000000 bl 0 <printk> list_for_each_entry(worker, &pool->idle_list, entry) { 6418: f94092b7 ldr x23, [x21, #288] pr_cont(" %s%d", first ? "idle: " : "", 641c: 9000001a adrp x26, 0 <pwq_activate_delayed_work> 6420: 910482bc add x28, x21, #0x120 6424: 9100035a add x26, x26, #0x0 bool first = true; 6428: 52800020 mov w0, #0x1 // #1 pr_cont(" %s%d", first ? "idle: " : "", 642c: 9000001b adrp x27, 0 <pwq_activate_delayed_work> list_for_each_entry(worker, &pool->idle_list, entry) { 6430: eb1c02ff cmp x23, x28 6434: 54000140 b.eq 645c <show_workqueue_state+0x504> // b.none 6438: f94022e2 ldr x2, [x23, #64] pr_cont(" %s%d", first ? "idle: " : "", 643c: 7100001f cmp w0, #0x0 6440: 9a991341 csel x1, x26, x25, ne // ne = any 6444: 91000360 add x0, x27, #0x0 6448: b945a842 ldr w2, [x2, #1448] 644c: 94000000 bl 0 <printk> first = false; 6450: 52800000 mov w0, #0x0 // #0 list_for_each_entry(worker, &pool->idle_list, entry) { 6454: f94002f7 ldr x23, [x23] 6458: 17fffff6 b 6430 <show_workqueue_state+0x4d8> pr_cont("\n"); 645c: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 6460: 91000000 add x0, x0, #0x0 6464: 94000000 bl 0 <printk> 6468: a94873fb ldp x27, x28, [sp, #128] 646c: 17ffffa3 b 62f8 <show_workqueue_state+0x3a0> 6470: aa1503e0 mov x0, x21 6474: f8460c01 ldr x1, [x0, #96]! if (pwq->nr_active || !list_empty(&pwq->delayed_works)) { 6478: eb01001f cmp x0, x1 647c: 54ffe161 b.ne 60a8 <show_workqueue_state+0x150> // b.any 6480: f9403ab5 ldr x21, [x21, #112] for_each_pwq(pwq, wq) { 6484: d101c2b5 sub x21, x21, #0x70 6488: 17fffef2 b 6050 <show_workqueue_state+0xf8> 648c: f94032a0 ldr x0, [x21, #96] if (pwq->nr_active || !list_empty(&pwq->delayed_works)) 6490: eb00035f cmp x26, x0 6494: 54ffe501 b.ne 6134 <show_workqueue_state+0x1dc> // b.any spin_unlock_irqrestore(&pwq->pool->lock, flags); 6498: f94002a0 ldr x0, [x21] 649c: 94000000 bl 0 <rt_spin_unlock> 64a0: 94000000 bl 0 <touch_softlockup_watchdog> 64a4: f9403ab5 ldr x21, [x21, #112] for_each_pwq(pwq, wq) { 64a8: d101c2b5 sub x21, x21, #0x70 64ac: 17ffff06 b 60c4 <show_workqueue_state+0x16c> hash_for_each(pool->busy_hash, bkt, worker, hentry) { 64b0: f9400000 ldr x0, [x0] 64b4: b5ffe6c0 cbnz x0, 618c <show_workqueue_state+0x234> 64b8: 7100fc3f cmp w1, #0x3f 64bc: 91000421 add x1, x1, #0x1 64c0: 54ffe629 b.ls 6184 <show_workqueue_state+0x22c> // b.plast 64c4: 17ffff4d b 61f8 <show_workqueue_state+0x2a0> pr_cont("\n"); 64c8: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 64cc: 91000000 add x0, x0, #0x0 64d0: 94000000 bl 0 <printk> 64d4: f94032a0 ldr x0, [x21, #96] if (!list_empty(&pwq->delayed_works)) { 64d8: eb00035f cmp x26, x0 64dc: 54fffde0 b.eq 6498 <show_workqueue_state+0x540> // b.none pr_info(" delayed:"); 64e0: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 64e4: 91000000 add x0, x0, #0x0 64e8: 94000000 bl 0 <printk> list_for_each_entry(work, &pwq->delayed_works, entry) { 64ec: f94032b7 ldr x23, [x21, #96] bool comma = false; 64f0: 52800000 mov w0, #0x0 // #0 list_for_each_entry(work, &pwq->delayed_works, entry) { 64f4: d10022f7 sub x23, x23, #0x8 64f8: 910022e1 add x1, x23, #0x8 64fc: eb01035f cmp x26, x1 6500: 54000540 b.eq 65a8 <show_workqueue_state+0x650> // b.none pr_cont_work(comma, work); 6504: aa1703e1 mov x1, x23 6508: 94000000 bl 0 <pwq_activate_delayed_work> list_for_each_entry(work, &pwq->delayed_works, entry) { 650c: a9405ee0 ldp x0, x23, [x23] comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); 6510: d343fc00 lsr x0, x0, #3 list_for_each_entry(work, &pwq->delayed_works, entry) { 6514: d10022f7 sub x23, x23, #0x8 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); 6518: d2400000 eor x0, x0, #0x1 651c: 12000000 and w0, w0, #0x1 list_for_each_entry(work, &pwq->delayed_works, entry) { 6520: 17fffff6 b 64f8 <show_workqueue_state+0x5a0> list_for_each_entry(work, &pool->worklist, entry) { 6524: f9400400 ldr x0, [x0, #8] 6528: d1002000 sub x0, x0, #0x8 652c: 17ffff36 b 6204 <show_workqueue_state+0x2ac> pr_cont("%s %d%s:%pf", comma ? "," : "", 6530: f94006a2 ldr x2, [x21, #8] 6534: 7100001f cmp w0, #0x0 6538: f9402380 ldr x0, [x28, #64] list_for_each_entry(work, &worker->scheduled, entry) 653c: f9000bfc str x28, [sp, #16] pr_cont("%s %d%s:%pf", comma ? "," : "", 6540: f94007e1 ldr x1, [sp, #8] 6544: f940a842 ldr x2, [x2, #336] 6548: 9a991021 csel x1, x1, x25, ne // ne = any 654c: f9400f84 ldr x4, [x28, #24] 6550: eb1c005f cmp x2, x28 6554: b945a802 ldr w2, [x0, #1448] 6558: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 655c: 91000003 add x3, x0, #0x0 6560: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 6564: 9a990063 csel x3, x3, x25, eq // eq = none 6568: 91000000 add x0, x0, #0x0 656c: 94000000 bl 0 <printk> list_for_each_entry(work, &worker->scheduled, entry) 6570: f9400be5 ldr x5, [sp, #16] 6574: f8430ca2 ldr x2, [x5, #48]! 6578: d1002042 sub x2, x2, #0x8 657c: 91002040 add x0, x2, #0x8 6580: eb05001f cmp x0, x5 6584: 540001a0 b.eq 65b8 <show_workqueue_state+0x660> // b.none pr_cont_work(false, work); 6588: aa0203e1 mov x1, x2 658c: 52800000 mov w0, #0x0 // #0 6590: a90117e2 stp x2, x5, [sp, #16] 6594: 94000000 bl 0 <pwq_activate_delayed_work> list_for_each_entry(work, &worker->scheduled, entry) 6598: a94117e2 ldp x2, x5, [sp, #16] 659c: f9400442 ldr x2, [x2, #8] 65a0: d1002042 sub x2, x2, #0x8 65a4: 17fffff6 b 657c <show_workqueue_state+0x624> pr_cont("\n"); 65a8: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 65ac: 91000000 add x0, x0, #0x0 65b0: 94000000 bl 0 <printk> 65b4: 17ffffb9 b 6498 <show_workqueue_state+0x540> comma = true; 65b8: 52800020 mov w0, #0x1 // #1 65bc: 17ffff06 b 61d4 <show_workqueue_state+0x27c> pr_cont_work(comma, work); 65c0: aa1703e1 mov x1, x23 65c4: 94000000 bl 0 <pwq_activate_delayed_work> comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); 65c8: f94002e0 ldr x0, [x23] 65cc: d343fc00 lsr x0, x0, #3 65d0: d2400000 eor x0, x0, #0x1 65d4: 12000000 and w0, w0, #0x1 65d8: 17ffff23 b 6264 <show_workqueue_state+0x30c> 65dc: d503201f nop 00000000000065e0 <destroy_workqueue>: { 65e0: a9bc7bfd stp x29, x30, [sp, #-64]! 65e4: 910003fd mov x29, sp 65e8: a90153f3 stp x19, x20, [sp, #16] 65ec: aa0003f4 mov x20, x0 65f0: a9025bf5 stp x21, x22, [sp, #32] * * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister. */ static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { struct wq_device *wq_dev = wq->wq_dev; 65f4: f940b800 ldr x0, [x0, #368] if (!wq->wq_dev) 65f8: b4000080 cbz x0, 6608 <destroy_workqueue+0x28> return; wq->wq_dev = NULL; 65fc: f900ba9f str xzr, [x20, #368] device_unregister(&wq_dev->dev); 6600: 91002000 add x0, x0, #0x8 6604: 94000000 bl 0 <device_unregister> drain_workqueue(wq); 6608: aa1403e0 mov x0, x20 660c: 94000000 bl 18c0 <drain_workqueue> if (wq->rescuer) { 6610: f940aa95 ldr x21, [x20, #336] 6614: b40001b5 cbz x21, 6648 <destroy_workqueue+0x68> spin_lock_irq(&wq_mayday_lock); 6618: 90000013 adrp x19, 0 <pwq_activate_delayed_work> 661c: 91000273 add x19, x19, #0x0 6620: 910b4273 add x19, x19, #0x2d0 6624: aa1303e0 mov x0, x19 6628: 94000000 bl 0 <rt_spin_lock> wq->rescuer = NULL; 662c: f900aa9f str xzr, [x20, #336] spin_unlock_irq(&wq_mayday_lock); 6630: aa1303e0 mov x0, x19 6634: 94000000 bl 0 <rt_spin_unlock> kthread_stop(rescuer->task); 6638: f94022a0 ldr x0, [x21, #64] 663c: 94000000 bl 0 <kthread_stop> kfree(rescuer); 6640: aa1503e0 mov x0, x21 6644: 94000000 bl 0 <kfree> mutex_lock(&wq->mutex); 6648: 91008295 add x21, x20, #0x20 664c: aa1503e0 mov x0, x21 6650: 94000000 bl 0 <_mutex_lock> 6654: f9400293 ldr x19, [x20] for_each_pwq(pwq, wq) { 6658: eb13029f cmp x20, x19 665c: d101c273 sub x19, x19, #0x70 6660: 540005a0 b.eq 6714 <destroy_workqueue+0x134> // b.none 6664: 90000001 adrp x1, 0 <pwq_activate_delayed_work> 6668: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 666c: 91000016 add x22, x0, #0x0 6670: a90363f7 stp x23, x24, [sp, #48] 6674: 91000037 add x23, x1, #0x0 6678: 91036298 add x24, x20, #0xd8 667c: 94000000 bl 0 <debug_lockdep_rcu_enabled> 6680: 34000060 cbz w0, 668c <destroy_workqueue+0xac> 6684: 394036e0 ldrb w0, [x23, #13] 6688: 34000780 cbz w0, 6778 <destroy_workqueue+0x198> 668c: 91007260 add x0, x19, #0x1c 6690: 91016262 add x2, x19, #0x58 6694: 14000004 b 66a4 <destroy_workqueue+0xc4> if (WARN_ON(pwq->nr_in_flight[i])) { 6698: 91001000 add x0, x0, #0x4 for (i = 0; i < WORK_NR_COLORS; i++) { 669c: eb00005f cmp x2, x0 66a0: 54000180 b.eq 66d0 <destroy_workqueue+0xf0> // b.none if (WARN_ON(pwq->nr_in_flight[i])) { 66a4: b9400001 ldr w1, [x0] 66a8: 34ffff81 cbz w1, 6698 <destroy_workqueue+0xb8> WARN_ON(!list_empty(&pwq->delayed_works))) { 66ac: d4210000 brk #0x800 mutex_unlock(&wq->mutex); 66b0: aa1503e0 mov x0, x21 66b4: 94000000 bl 0 <_mutex_unlock> show_workqueue_state(); 66b8: 94000000 bl 5f58 <show_workqueue_state> return; 66bc: a94363f7 ldp x23, x24, [sp, #48] } 66c0: a94153f3 ldp x19, x20, [sp, #16] 66c4: a9425bf5 ldp x21, x22, [sp, #32] 66c8: a8c47bfd ldp x29, x30, [sp], #64 66cc: d65f03c0 ret if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) || 66d0: f940b680 ldr x0, [x20, #360] 66d4: eb13001f cmp x0, x19 66d8: 54000080 b.eq 66e8 <destroy_workqueue+0x108> // b.none 66dc: b9401a60 ldr w0, [x19, #24] 66e0: 7100041f cmp w0, #0x1 66e4: 54fffe4c b.gt 66ac <destroy_workqueue+0xcc> WARN_ON(pwq->nr_active) || 66e8: b9405a60 ldr w0, [x19, #88] 66ec: 35fffe00 cbnz w0, 66ac <destroy_workqueue+0xcc> 66f0: aa1303e0 mov x0, x19 66f4: f8460c01 ldr x1, [x0, #96]! WARN_ON(!list_empty(&pwq->delayed_works))) { 66f8: eb01001f cmp x0, x1 66fc: 54fffd81 b.ne 66ac <destroy_workqueue+0xcc> // b.any 6700: f9403a73 ldr x19, [x19, #112] for_each_pwq(pwq, wq) { 6704: eb13029f cmp x20, x19 6708: d101c273 sub x19, x19, #0x70 670c: 54fffb81 b.ne 667c <destroy_workqueue+0x9c> // b.any 6710: a94363f7 ldp x23, x24, [sp, #48] mutex_unlock(&wq->mutex); 6714: aa1503e0 mov x0, x21 mutex_lock(&wq_pool_mutex); 6718: 90000013 adrp x19, 0 <pwq_activate_delayed_work> mutex_unlock(&wq->mutex); 671c: 94000000 bl 0 <_mutex_unlock> mutex_lock(&wq_pool_mutex); 6720: 91000273 add x19, x19, #0x0 6724: aa1303e0 mov x0, x19 6728: 94000000 bl 0 <_mutex_lock> __list_del(entry->prev, entry->next); 672c: a9410682 ldp x2, x1, [x20, #16] next->prev = prev; 6730: f9000441 str x1, [x2, #8] 6734: d2804003 mov x3, #0x200 // #512 mutex_unlock(&wq_pool_mutex); 6738: aa1303e0 mov x0, x19 673c: f2fbd5a3 movk x3, #0xdead, lsl #48 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 6740: f9000022 str x2, [x1] 6744: f9000e83 str x3, [x20, #24] 6748: 94000000 bl 0 <_mutex_unlock> if (!(wq->flags & WQ_UNBOUND)) { 674c: b9420280 ldr w0, [x20, #512] 6750: 36080300 tbz w0, #1, 67b0 <destroy_workqueue+0x1d0> __READ_ONCE_SIZE; 6754: f9410a80 ldr x0, [x20, #528] case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 6758: f9010a9f str xzr, [x20, #528] if (pwq) { 675c: b4000040 cbz x0, 6764 <destroy_workqueue+0x184> 6760: 97fff37a bl 3548 <put_pwq_unlocked.part.9> pwq = wq->dfl_pwq; 6764: f940b680 ldr x0, [x20, #360] wq->dfl_pwq = NULL; 6768: f900b69f str xzr, [x20, #360] if (pwq) { 676c: b4fffaa0 cbz x0, 66c0 <destroy_workqueue+0xe0> 6770: 97fff376 bl 3548 <put_pwq_unlocked.part.9> 6774: 17ffffd3 b 66c0 <destroy_workqueue+0xe0> for_each_pwq(pwq, wq) { 6778: 94000000 bl 0 <rcu_read_lock_held> 677c: 35fff880 cbnz w0, 668c <destroy_workqueue+0xac> 6780: 12800001 mov w1, #0xffffffff // #-1 6784: aa1803e0 mov x0, x24 6788: 94000000 bl 0 <lock_is_held_type> 678c: 35fff800 cbnz w0, 668c <destroy_workqueue+0xac> 6790: 52800023 mov w3, #0x1 // #1 6794: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 6798: aa1603e2 mov x2, x22 679c: 91000000 add x0, x0, #0x0 67a0: 52820b61 mov w1, #0x105b // #4187 67a4: 390036e3 strb w3, [x23, #13] 67a8: 94000000 bl 0 <lockdep_rcu_suspicious> 67ac: 17ffffb8 b 668c <destroy_workqueue+0xac> call_rcu(&wq->rcu, rcu_free_wq); 67b0: 91070280 add x0, x20, #0x1c0 67b4: 90000001 adrp x1, 0 <pwq_activate_delayed_work> 67b8: 91000021 add x1, x1, #0x0 67bc: 94000000 bl 0 <call_rcu> 67c0: 17ffffc0 b 66c0 <destroy_workqueue+0xe0> 67c4: d503201f nop 00000000000067c8 <workqueue_prepare_cpu>: { 67c8: a9bd7bfd stp x29, x30, [sp, #-48]! 67cc: 910003fd mov x29, sp 67d0: a90153f3 stp x19, x20, [sp, #16] for_each_cpu_worker_pool(pool, cpu) { 67d4: 90000014 adrp x20, 0 <__per_cpu_offset> 67d8: 91000294 add x20, x20, #0x0 { 67dc: a9025bf5 stp x21, x22, [sp, #32] for_each_cpu_worker_pool(pool, cpu) { 67e0: 2a0003f6 mov w22, w0 67e4: 90000015 adrp x21, 0 <pwq_activate_delayed_work> 67e8: 910002b5 add x21, x21, #0x0 67ec: f8767a93 ldr x19, [x20, x22, lsl #3] 67f0: aa1503e0 mov x0, x21 67f4: 8b000273 add x19, x19, x0 67f8: 91300260 add x0, x19, #0xc00 67fc: eb00027f cmp x19, x0 6800: 540001c2 b.cs 6838 <workqueue_prepare_cpu+0x70> // b.hs, b.nlast 6804: d503201f nop if (pool->nr_workers) 6808: b9411a61 ldr w1, [x19, #280] if (!create_worker(pool)) 680c: aa1303e0 mov x0, x19 for_each_cpu_worker_pool(pool, cpu) { 6810: 91180273 add x19, x19, #0x600 if (pool->nr_workers) 6814: 35000061 cbnz w1, 6820 <workqueue_prepare_cpu+0x58> if (!create_worker(pool)) 6818: 97ffedea bl 1fc0 <create_worker> 681c: b4000180 cbz x0, 684c <workqueue_prepare_cpu+0x84> for_each_cpu_worker_pool(pool, cpu) { 6820: f8767a81 ldr x1, [x20, x22, lsl #3] 6824: aa1503e0 mov x0, x21 6828: 8b010000 add x0, x0, x1 682c: 91300000 add x0, x0, #0xc00 6830: eb00027f cmp x19, x0 6834: 54fffea3 b.cc 6808 <workqueue_prepare_cpu+0x40> // b.lo, b.ul, b.last return 0; 6838: 52800000 mov w0, #0x0 // #0 } 683c: a94153f3 ldp x19, x20, [sp, #16] 6840: a9425bf5 ldp x21, x22, [sp, #32] 6844: a8c37bfd ldp x29, x30, [sp], #48 6848: d65f03c0 ret return -ENOMEM; 684c: 12800160 mov w0, #0xfffffff4 // #-12 } 6850: a94153f3 ldp x19, x20, [sp, #16] 6854: a9425bf5 ldp x21, x22, [sp, #32] 6858: a8c37bfd ldp x29, x30, [sp], #48 685c: d65f03c0 ret 0000000000006860 <workqueue_online_cpu>: { 6860: d10283ff sub sp, sp, #0xa0 6864: 90000001 adrp x1, 0 <__stack_chk_guard> 6868: 91000021 add x1, x1, #0x0 686c: a9047bfd stp x29, x30, [sp, #64] 6870: 910103fd add x29, sp, #0x40 6874: a90553f3 stp x19, x20, [sp, #80] 6878: a9065bf5 stp x21, x22, [sp, #96] 687c: a90763f7 stp x23, x24, [sp, #112] 6880: 2a0003f7 mov w23, w0 mutex_lock(&wq_pool_mutex); 6884: 90000018 adrp x24, 0 <pwq_activate_delayed_work> { 6888: a9086bf9 stp x25, x26, [sp, #128] mutex_lock(&wq_pool_mutex); 688c: 91000318 add x24, x24, #0x0 for_each_pool(pool, pi) { 6890: 9000001a adrp x26, 0 <pwq_activate_delayed_work> { 6894: a90973fb stp x27, x28, [sp, #144] return (*dst = *src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)) != 0; 6898: 9000001c adrp x28, 0 <pwq_activate_delayed_work> for_each_pool(pool, pi) { 689c: 9100035a add x26, x26, #0x0 { 68a0: f9400020 ldr x0, [x1] 68a4: f9001fe0 str x0, [sp, #56] 68a8: d2800000 mov x0, #0x0 // #0 68ac: f90017e1 str x1, [sp, #40] mutex_lock(&wq_pool_mutex); 68b0: aa1803e0 mov x0, x24 68b4: 94000000 bl 0 <_mutex_lock> 68b8: 710002ff cmp w23, #0x0 68bc: 1100fee0 add w0, w23, #0x3f 68c0: 1a97b000 csel w0, w0, w23, lt // lt = tstop 68c4: 91000382 add x2, x28, #0x0 68c8: d2800021 mov x1, #0x1 // #1 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0); 68cc: 910a2059 add x25, x2, #0x288 68d0: 13067c00 asr w0, w0, #6 68d4: 9ad72021 lsl x1, x1, x23 68d8: f90013e2 str x2, [sp, #32] 68dc: 937d7c00 sbfiz x0, x0, #3, #32 68e0: a90103e1 stp x1, x0, [sp, #16] for_each_pool(pool, pi) { 68e4: b90037ff str wzr, [sp, #52] 68e8: 14000006 b 6900 <workqueue_online_cpu+0xa0> mutex_unlock(&pool->attach_mutex); 68ec: aa1503e0 mov x0, x21 68f0: 94000000 bl 0 <_mutex_unlock> for_each_pool(pool, pi) { 68f4: b94037e0 ldr w0, [sp, #52] 68f8: 11000400 add w0, w0, #0x1 68fc: b90037e0 str w0, [sp, #52] 6900: 9100d3e1 add x1, sp, #0x34 6904: 9103a300 add x0, x24, #0xe8 6908: 94000000 bl 0 <idr_get_next> 690c: aa0003f3 mov x19, x0 6910: b40012a0 cbz x0, 6b64 <workqueue_online_cpu+0x304> 6914: 94000000 bl 0 <debug_lockdep_rcu_enabled> 6918: 34000060 cbz w0, 6924 <workqueue_online_cpu+0xc4> 691c: 39403b40 ldrb w0, [x26, #14] 6920: 34001040 cbz w0, 6b28 <workqueue_online_cpu+0x2c8> mutex_lock(&pool->attach_mutex); 6924: 910fe275 add x21, x19, #0x3f8 6928: aa1503e0 mov x0, x21 692c: 94000000 bl 0 <_mutex_lock> if (pool->cpu == cpu) 6930: b940f260 ldr w0, [x19, #240] 6934: 6b17001f cmp w0, w23 6938: 54000600 b.eq 69f8 <workqueue_online_cpu+0x198> // b.none else if (pool->cpu < 0) 693c: 36fffd80 tbz w0, #31, 68ec <workqueue_online_cpu+0x8c> lockdep_assert_held(&pool->attach_mutex); 6940: 90000000 adrp x0, 0 <debug_locks> 6944: 91000014 add x20, x0, #0x0 6948: b9400000 ldr w0, [x0] 694c: 35001460 cbnz w0, 6bd8 <workqueue_online_cpu+0x378> if (!cpumask_test_cpu(cpu, pool->attrs->cpumask)) 6950: f9428660 ldr x0, [x19, #1288] 6954: f9400fe2 ldr x2, [sp, #24] return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); 6958: 91002001 add x1, x0, #0x8 695c: f8626821 ldr x1, [x1, x2] 6960: f9400be2 ldr x2, [sp, #16] 6964: ea01005f tst x2, x1 6968: 54fffc20 b.eq 68ec <workqueue_online_cpu+0x8c> // b.none 696c: 90000001 adrp x1, 0 <__cpu_online_mask> for_each_pool_worker(worker, pool) 6970: 91138276 add x22, x19, #0x4e0 6974: f9400400 ldr x0, [x0, #8] 6978: f9400022 ldr x2, [x1] 697c: f9427261 ldr x1, [x19, #1248] 6980: 8a020000 and x0, x0, x2 6984: f94013e2 ldr x2, [sp, #32] 6988: d101403b sub x27, x1, #0x50 698c: eb0102df cmp x22, x1 6990: f9014440 str x0, [x2, #648] 6994: 54fffac0 b.eq 68ec <workqueue_online_cpu+0x8c> // b.none 6998: 9112c273 add x19, x19, #0x4b0 699c: 14000009 b 69c0 <workqueue_online_cpu+0x160> WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0); 69a0: f9402360 ldr x0, [x27, #64] 69a4: aa1903e1 mov x1, x25 69a8: 94000000 bl 0 <set_cpus_allowed_ptr> 69ac: 37f80220 tbnz w0, #31, 69f0 <workqueue_online_cpu+0x190> for_each_pool_worker(worker, pool) 69b0: f9402b7b ldr x27, [x27, #80] 69b4: eb1b02df cmp x22, x27 69b8: d101437b sub x27, x27, #0x50 69bc: 54fff980 b.eq 68ec <workqueue_online_cpu+0x8c> // b.none 69c0: b9400280 ldr w0, [x20] 69c4: 34fffee0 cbz w0, 69a0 <workqueue_online_cpu+0x140> 69c8: 12800001 mov w1, #0xffffffff // #-1 69cc: aa1303e0 mov x0, x19 69d0: 94000000 bl 0 <lock_is_held_type> 69d4: 35fffe60 cbnz w0, 69a0 <workqueue_online_cpu+0x140> 69d8: d4210000 brk #0x800 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0); 69dc: f9402360 ldr x0, [x27, #64] 69e0: aa1903e1 mov x1, x25 69e4: 94000000 bl 0 <set_cpus_allowed_ptr> 69e8: 36fffe40 tbz w0, #31, 69b0 <workqueue_online_cpu+0x150> 69ec: d503201f nop 69f0: d4210000 brk #0x800 69f4: 17ffffef b 69b0 <workqueue_online_cpu+0x150> lockdep_assert_held(&pool->attach_mutex); 69f8: 90000000 adrp x0, 0 <debug_locks> 69fc: 91000014 add x20, x0, #0x0 6a00: b9400000 ldr w0, [x0] 6a04: 35000f60 cbnz w0, 6bf0 <workqueue_online_cpu+0x390> for_each_pool_worker(worker, pool) 6a08: f9427260 ldr x0, [x19, #1248] 6a0c: 91138276 add x22, x19, #0x4e0 6a10: 9112c27b add x27, x19, #0x4b0 6a14: d101401c sub x28, x0, #0x50 6a18: eb0002df cmp x22, x0 6a1c: 54000161 b.ne 6a48 <workqueue_online_cpu+0x1e8> // b.any 6a20: 1400001c b 6a90 <workqueue_online_cpu+0x230> WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, 6a24: f9402380 ldr x0, [x28, #64] 6a28: f9428661 ldr x1, [x19, #1288] 6a2c: 91002021 add x1, x1, #0x8 6a30: 94000000 bl 0 <set_cpus_allowed_ptr> 6a34: 37f80220 tbnz w0, #31, 6a78 <workqueue_online_cpu+0x218> for_each_pool_worker(worker, pool) 6a38: f9402b82 ldr x2, [x28, #80] 6a3c: eb0202df cmp x22, x2 6a40: d101405c sub x28, x2, #0x50 6a44: 54000260 b.eq 6a90 <workqueue_online_cpu+0x230> // b.none 6a48: b9400280 ldr w0, [x20] 6a4c: 34fffec0 cbz w0, 6a24 <workqueue_online_cpu+0x1c4> 6a50: 12800001 mov w1, #0xffffffff // #-1 6a54: aa1b03e0 mov x0, x27 6a58: 94000000 bl 0 <lock_is_held_type> 6a5c: 35fffe40 cbnz w0, 6a24 <workqueue_online_cpu+0x1c4> 6a60: d4210000 brk #0x800 WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, 6a64: f9402380 ldr x0, [x28, #64] 6a68: f9428661 ldr x1, [x19, #1288] 6a6c: 91002021 add x1, x1, #0x8 6a70: 94000000 bl 0 <set_cpus_allowed_ptr> 6a74: 36fffe20 tbz w0, #31, 6a38 <workqueue_online_cpu+0x1d8> 6a78: d4210000 brk #0x800 for_each_pool_worker(worker, pool) 6a7c: f9402b82 ldr x2, [x28, #80] 6a80: eb0202df cmp x22, x2 6a84: d101405c sub x28, x2, #0x50 6a88: 54fffe01 b.ne 6a48 <workqueue_online_cpu+0x1e8> // b.any 6a8c: d503201f nop spin_lock_irq(&pool->lock); 6a90: aa1303e0 mov x0, x19 6a94: 94000000 bl 0 <rt_spin_lock> if (!(pool->flags & POOL_DISASSOCIATED)) { 6a98: b940fe60 ldr w0, [x19, #252] 6a9c: 361002a0 tbz w0, #2, 6af0 <workqueue_online_cpu+0x290> pool->flags &= ~POOL_DISASSOCIATED; 6aa0: 121d7800 and w0, w0, #0xfffffffb 6aa4: b900fe60 str w0, [x19, #252] for_each_pool_worker(worker, pool) { 6aa8: f9427262 ldr x2, [x19, #1248] 6aac: 9112c260 add x0, x19, #0x4b0 6ab0: f90007e0 str x0, [sp, #8] 6ab4: eb0202df cmp x22, x2 6ab8: d101405c sub x28, x2, #0x50 6abc: 540001a0 b.eq 6af0 <workqueue_online_cpu+0x290> // b.none 6ac0: b9400280 ldr w0, [x20] 6ac4: 35000260 cbnz w0, 6b10 <workqueue_online_cpu+0x2b0> unsigned int worker_flags = worker->flags; 6ac8: b9406b9b ldr w27, [x28, #104] if (worker_flags & WORKER_IDLE) 6acc: 3710019b tbnz w27, #2, 6afc <workqueue_online_cpu+0x29c> WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND)); 6ad0: 363801db tbz w27, #7, 6b08 <workqueue_online_cpu+0x2a8> worker_flags &= ~WORKER_UNBOUND; 6ad4: 12187b61 and w1, w27, #0xffffff7f for_each_pool_worker(worker, pool) { 6ad8: f9402b80 ldr x0, [x28, #80] worker_flags &= ~WORKER_UNBOUND; 6adc: 32180021 orr w1, w1, #0x100 ACCESS_ONCE(worker->flags) = worker_flags; 6ae0: b9006b81 str w1, [x28, #104] for_each_pool_worker(worker, pool) { 6ae4: eb0002df cmp x22, x0 6ae8: d101401c sub x28, x0, #0x50 6aec: 54fffea1 b.ne 6ac0 <workqueue_online_cpu+0x260> // b.any spin_unlock_irq(&pool->lock); 6af0: aa1303e0 mov x0, x19 6af4: 94000000 bl 0 <rt_spin_unlock> 6af8: 17ffff7d b 68ec <workqueue_online_cpu+0x8c> wake_up_process(worker->task); 6afc: f9402380 ldr x0, [x28, #64] 6b00: 94000000 bl 0 <wake_up_process> WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND)); 6b04: 373ffe9b tbnz w27, #7, 6ad4 <workqueue_online_cpu+0x274> 6b08: d4210000 brk #0x800 6b0c: 17fffff2 b 6ad4 <workqueue_online_cpu+0x274> 6b10: f94007e0 ldr x0, [sp, #8] 6b14: 12800001 mov w1, #0xffffffff // #-1 6b18: 94000000 bl 0 <lock_is_held_type> for_each_pool_worker(worker, pool) { 6b1c: 35fffd60 cbnz w0, 6ac8 <workqueue_online_cpu+0x268> 6b20: d4210000 brk #0x800 6b24: 17ffffe9 b 6ac8 <workqueue_online_cpu+0x268> for_each_pool(pool, pi) { 6b28: 94000000 bl 0 <rcu_read_lock_held> 6b2c: 35ffefc0 cbnz w0, 6924 <workqueue_online_cpu+0xc4> 6b30: 12800001 mov w1, #0xffffffff // #-1 6b34: 9102e300 add x0, x24, #0xb8 6b38: 94000000 bl 0 <lock_is_held_type> 6b3c: 35ffef40 cbnz w0, 6924 <workqueue_online_cpu+0xc4> 6b40: 52800023 mov w3, #0x1 // #1 6b44: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 6b48: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 6b4c: 91000042 add x2, x2, #0x0 6b50: 91000000 add x0, x0, #0x0 6b54: 52825ba1 mov w1, #0x12dd // #4829 6b58: 39003b43 strb w3, [x26, #14] 6b5c: 94000000 bl 0 <lockdep_rcu_suspicious> 6b60: 17ffff71 b 6924 <workqueue_online_cpu+0xc4> list_for_each_entry(wq, &workqueues, list) 6b64: f9426300 ldr x0, [x24, #1216] 6b68: 91130314 add x20, x24, #0x4c0 6b6c: d1004013 sub x19, x0, #0x10 6b70: eb14001f cmp x0, x20 6b74: 54000120 b.eq 6b98 <workqueue_online_cpu+0x338> // b.none wq_update_unbound_numa(wq, cpu, true); 6b78: aa1303e0 mov x0, x19 6b7c: 52800022 mov w2, #0x1 // #1 6b80: 2a1703e1 mov w1, w23 6b84: 97fff457 bl 3ce0 <wq_update_unbound_numa> list_for_each_entry(wq, &workqueues, list) 6b88: f9400a73 ldr x19, [x19, #16] 6b8c: eb14027f cmp x19, x20 6b90: d1004273 sub x19, x19, #0x10 6b94: 54ffff21 b.ne 6b78 <workqueue_online_cpu+0x318> // b.any mutex_unlock(&wq_pool_mutex); 6b98: aa1803e0 mov x0, x24 6b9c: 94000000 bl 0 <_mutex_unlock> } 6ba0: f94017e1 ldr x1, [sp, #40] 6ba4: 52800000 mov w0, #0x0 // #0 6ba8: f9401fe2 ldr x2, [sp, #56] 6bac: f9400021 ldr x1, [x1] 6bb0: ca010041 eor x1, x2, x1 6bb4: b50002a1 cbnz x1, 6c08 <workqueue_online_cpu+0x3a8> 6bb8: a9447bfd ldp x29, x30, [sp, #64] 6bbc: a94553f3 ldp x19, x20, [sp, #80] 6bc0: a9465bf5 ldp x21, x22, [sp, #96] 6bc4: a94763f7 ldp x23, x24, [sp, #112] 6bc8: a9486bf9 ldp x25, x26, [sp, #128] 6bcc: a94973fb ldp x27, x28, [sp, #144] 6bd0: 910283ff add sp, sp, #0xa0 6bd4: d65f03c0 ret 6bd8: 12800001 mov w1, #0xffffffff // #-1 6bdc: 9112c260 add x0, x19, #0x4b0 6be0: 94000000 bl 0 <lock_is_held_type> lockdep_assert_held(&pool->attach_mutex); 6be4: 35ffeb60 cbnz w0, 6950 <workqueue_online_cpu+0xf0> 6be8: d4210000 brk #0x800 6bec: 17ffff59 b 6950 <workqueue_online_cpu+0xf0> 6bf0: 12800001 mov w1, #0xffffffff // #-1 6bf4: 9112c260 add x0, x19, #0x4b0 6bf8: 94000000 bl 0 <lock_is_held_type> lockdep_assert_held(&pool->attach_mutex); 6bfc: 35fff060 cbnz w0, 6a08 <workqueue_online_cpu+0x1a8> 6c00: d4210000 brk #0x800 6c04: 17ffff81 b 6a08 <workqueue_online_cpu+0x1a8> } 6c08: 94000000 bl 0 <__stack_chk_fail> 6c0c: d503201f nop 0000000000006c10 <workqueue_offline_cpu>: { 6c10: d10283ff sub sp, sp, #0xa0 INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn); 6c14: b27b7be4 mov x4, #0xfffffffe0 // #68719476704 6c18: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 6c1c: 91000042 add x2, x2, #0x0 6c20: 910a4042 add x2, x2, #0x290 6c24: 52800003 mov w3, #0x0 // #0 { 6c28: a9067bfd stp x29, x30, [sp, #96] 6c2c: 910183fd add x29, sp, #0x60 INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn); 6c30: 90000001 adrp x1, 0 <pwq_activate_delayed_work> 6c34: 91000021 add x1, x1, #0x0 { 6c38: f9004bf7 str x23, [sp, #144] 6c3c: 90000017 adrp x23, 0 <__stack_chk_guard> 6c40: 910002f7 add x23, x23, #0x0 6c44: a9085bf5 stp x21, x22, [sp, #128] 6c48: 2a0003f5 mov w21, w0 6c4c: f94002e0 ldr x0, [x23] 6c50: f9002fe0 str x0, [sp, #88] 6c54: d2800000 mov x0, #0x0 // #0 INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn); 6c58: f90007e4 str x4, [sp, #8] 6c5c: 9100a3e0 add x0, sp, #0x28 { 6c60: a90753f3 stp x19, x20, [sp, #112] INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn); 6c64: 94000000 bl 0 <lockdep_init_map> queue_work_on(cpu, system_highpri_wq, &unbind_work); 6c68: 90000000 adrp x0, 0 <pwq_activate_delayed_work> WRITE_ONCE(list->next, list); 6c6c: 910043e4 add x4, sp, #0x10 INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn); 6c70: 90000003 adrp x3, 0 <pwq_activate_delayed_work> 6c74: 91000063 add x3, x3, #0x0 queue_work_on(cpu, system_highpri_wq, &unbind_work); 6c78: f9400001 ldr x1, [x0] 6c7c: 910023e2 add x2, sp, #0x8 mutex_lock(&wq_pool_mutex); 6c80: 90000016 adrp x22, 0 <pwq_activate_delayed_work> 6c84: 910002d6 add x22, x22, #0x0 queue_work_on(cpu, system_highpri_wq, &unbind_work); 6c88: 2a1503e0 mov w0, w21 6c8c: f9000be4 str x4, [sp, #16] INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn); 6c90: a9018fe4 stp x4, x3, [sp, #24] queue_work_on(cpu, system_highpri_wq, &unbind_work); 6c94: 94000000 bl 3330 <queue_work_on> mutex_lock(&wq_pool_mutex); 6c98: aa1603e0 mov x0, x22 6c9c: 94000000 bl 0 <_mutex_lock> list_for_each_entry(wq, &workqueues, list) 6ca0: 911302d4 add x20, x22, #0x4c0 6ca4: f94262c0 ldr x0, [x22, #1216] 6ca8: eb14001f cmp x0, x20 6cac: 54000160 b.eq 6cd8 <workqueue_offline_cpu+0xc8> // b.none 6cb0: d1004013 sub x19, x0, #0x10 6cb4: d503201f nop wq_update_unbound_numa(wq, cpu, false); 6cb8: aa1303e0 mov x0, x19 6cbc: 52800002 mov w2, #0x0 // #0 6cc0: 2a1503e1 mov w1, w21 6cc4: 97fff407 bl 3ce0 <wq_update_unbound_numa> list_for_each_entry(wq, &workqueues, list) 6cc8: f9400a73 ldr x19, [x19, #16] 6ccc: eb14027f cmp x19, x20 6cd0: d1004273 sub x19, x19, #0x10 6cd4: 54ffff21 b.ne 6cb8 <workqueue_offline_cpu+0xa8> // b.any mutex_unlock(&wq_pool_mutex); 6cd8: aa1603e0 mov x0, x22 6cdc: 94000000 bl 0 <_mutex_unlock> flush_work(&unbind_work); 6ce0: 910023e0 add x0, sp, #0x8 6ce4: 94000000 bl 23e0 <flush_work> } 6ce8: 52800000 mov w0, #0x0 // #0 6cec: f9402fe2 ldr x2, [sp, #88] 6cf0: f94002e1 ldr x1, [x23] 6cf4: ca010041 eor x1, x2, x1 6cf8: b50000e1 cbnz x1, 6d14 <workqueue_offline_cpu+0x104> 6cfc: a9467bfd ldp x29, x30, [sp, #96] 6d00: a94753f3 ldp x19, x20, [sp, #112] 6d04: a9485bf5 ldp x21, x22, [sp, #128] 6d08: f9404bf7 ldr x23, [sp, #144] 6d0c: 910283ff add sp, sp, #0xa0 6d10: d65f03c0 ret 6d14: 94000000 bl 0 <__stack_chk_fail> 0000000000006d18 <freeze_workqueues_begin>: { 6d18: a9bb7bfd stp x29, x30, [sp, #-80]! 6d1c: 910003fd mov x29, sp 6d20: a9025bf5 stp x21, x22, [sp, #32] mutex_lock(&wq_pool_mutex); 6d24: 90000016 adrp x22, 0 <pwq_activate_delayed_work> 6d28: 910002d6 add x22, x22, #0x0 6d2c: aa1603e0 mov x0, x22 { 6d30: a90153f3 stp x19, x20, [sp, #16] mutex_lock(&wq_pool_mutex); 6d34: 94000000 bl 0 <_mutex_lock> WARN_ON_ONCE(workqueue_freezing); 6d38: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 6d3c: 39400001 ldrb w1, [x0] 6d40: 35000861 cbnz w1, 6e4c <freeze_workqueues_begin+0x134> list_for_each_entry(wq, &workqueues, list) { 6d44: f94262d4 ldr x20, [x22, #1216] workqueue_freezing = true; 6d48: 52800021 mov w1, #0x1 // #1 6d4c: 39000001 strb w1, [x0] list_for_each_entry(wq, &workqueues, list) { 6d50: 911302c0 add x0, x22, #0x4c0 6d54: eb00029f cmp x20, x0 6d58: d1004294 sub x20, x20, #0x10 6d5c: 540006c0 b.eq 6e34 <freeze_workqueues_begin+0x11c> // b.none for_each_pwq(pwq, wq) 6d60: 90000015 adrp x21, 0 <pwq_activate_delayed_work> 6d64: 910002b5 add x21, x21, #0x0 6d68: a90363f7 stp x23, x24, [sp, #48] 6d6c: 90000017 adrp x23, 0 <pwq_activate_delayed_work> 6d70: 910002f7 add x23, x23, #0x0 6d74: f90023f9 str x25, [sp, #64] mutex_lock(&wq->mutex); 6d78: 91008299 add x25, x20, #0x20 6d7c: aa1903e0 mov x0, x25 6d80: 94000000 bl 0 <_mutex_lock> __READ_ONCE_SIZE; 6d84: f9400293 ldr x19, [x20] for_each_pwq(pwq, wq) 6d88: eb13029f cmp x20, x19 6d8c: d101c273 sub x19, x19, #0x70 6d90: 54000400 b.eq 6e10 <freeze_workqueues_begin+0xf8> // b.none 6d94: 91036298 add x24, x20, #0xd8 6d98: 14000007 b 6db4 <freeze_workqueues_begin+0x9c> pwq_adjust_max_active(pwq); 6d9c: aa1303e0 mov x0, x19 6da0: 97ffe5c0 bl 4a0 <pwq_adjust_max_active> 6da4: f9403a73 ldr x19, [x19, #112] for_each_pwq(pwq, wq) 6da8: eb13029f cmp x20, x19 6dac: d101c273 sub x19, x19, #0x70 6db0: 54000300 b.eq 6e10 <freeze_workqueues_begin+0xf8> // b.none 6db4: 94000000 bl 0 <debug_lockdep_rcu_enabled> 6db8: 34ffff20 cbz w0, 6d9c <freeze_workqueues_begin+0x84> 6dbc: 39403ea0 ldrb w0, [x21, #15] 6dc0: 35fffee0 cbnz w0, 6d9c <freeze_workqueues_begin+0x84> 6dc4: 94000000 bl 0 <rcu_read_lock_held> 6dc8: 35fffea0 cbnz w0, 6d9c <freeze_workqueues_begin+0x84> 6dcc: 12800001 mov w1, #0xffffffff // #-1 6dd0: aa1803e0 mov x0, x24 6dd4: 94000000 bl 0 <lock_is_held_type> 6dd8: 35fffe20 cbnz w0, 6d9c <freeze_workqueues_begin+0x84> 6ddc: 52800023 mov w3, #0x1 // #1 6de0: aa1703e2 mov x2, x23 6de4: 52826b81 mov w1, #0x135c // #4956 6de8: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 6dec: 91000000 add x0, x0, #0x0 6df0: 39003ea3 strb w3, [x21, #15] 6df4: 94000000 bl 0 <lockdep_rcu_suspicious> pwq_adjust_max_active(pwq); 6df8: aa1303e0 mov x0, x19 6dfc: 97ffe5a9 bl 4a0 <pwq_adjust_max_active> 6e00: f9403a73 ldr x19, [x19, #112] for_each_pwq(pwq, wq) 6e04: eb13029f cmp x20, x19 6e08: d101c273 sub x19, x19, #0x70 6e0c: 54fffd41 b.ne 6db4 <freeze_workqueues_begin+0x9c> // b.any mutex_unlock(&wq->mutex); 6e10: aa1903e0 mov x0, x25 6e14: 94000000 bl 0 <_mutex_unlock> list_for_each_entry(wq, &workqueues, list) { 6e18: f9400a94 ldr x20, [x20, #16] 6e1c: 911302c0 add x0, x22, #0x4c0 6e20: eb00029f cmp x20, x0 6e24: d1004294 sub x20, x20, #0x10 6e28: 54fffa81 b.ne 6d78 <freeze_workqueues_begin+0x60> // b.any 6e2c: a94363f7 ldp x23, x24, [sp, #48] 6e30: f94023f9 ldr x25, [sp, #64] mutex_unlock(&wq_pool_mutex); 6e34: aa1603e0 mov x0, x22 6e38: 94000000 bl 0 <_mutex_unlock> } 6e3c: a94153f3 ldp x19, x20, [sp, #16] 6e40: a9425bf5 ldp x21, x22, [sp, #32] 6e44: a8c57bfd ldp x29, x30, [sp], #80 6e48: d65f03c0 ret WARN_ON_ONCE(workqueue_freezing); 6e4c: d4210000 brk #0x800 6e50: 17ffffbd b 6d44 <freeze_workqueues_begin+0x2c> 6e54: d503201f nop 0000000000006e58 <freeze_workqueues_busy>: { 6e58: a9bb7bfd stp x29, x30, [sp, #-80]! 6e5c: 910003fd mov x29, sp 6e60: a9025bf5 stp x21, x22, [sp, #32] mutex_lock(&wq_pool_mutex); 6e64: 90000015 adrp x21, 0 <pwq_activate_delayed_work> 6e68: 910002b5 add x21, x21, #0x0 6e6c: aa1503e0 mov x0, x21 { 6e70: a90153f3 stp x19, x20, [sp, #16] mutex_lock(&wq_pool_mutex); 6e74: 94000000 bl 0 <_mutex_lock> WARN_ON_ONCE(!workqueue_freezing); 6e78: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 6e7c: 39400000 ldrb w0, [x0] 6e80: 34001340 cbz w0, 70e8 <freeze_workqueues_busy+0x290> list_for_each_entry(wq, &workqueues, list) { 6e84: f94262b4 ldr x20, [x21, #1216] 6e88: 911302a0 add x0, x21, #0x4c0 6e8c: eb00029f cmp x20, x0 6e90: d1004294 sub x20, x20, #0x10 6e94: 54000820 b.eq 6f98 <freeze_workqueues_busy+0x140> // b.none for_each_pwq(pwq, wq) { 6e98: 90000016 adrp x22, 0 <pwq_activate_delayed_work> 6e9c: 910002d6 add x22, x22, #0x0 6ea0: a90363f7 stp x23, x24, [sp, #48] lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_); 6ea4: 90000017 adrp x23, 0 <rcu_lock_map> 6ea8: 910002f7 add x23, x23, #0x0 6eac: f90023f9 str x25, [sp, #64] 6eb0: 14000006 b 6ec8 <freeze_workqueues_busy+0x70> list_for_each_entry(wq, &workqueues, list) { 6eb4: f9400a94 ldr x20, [x20, #16] 6eb8: 911302a0 add x0, x21, #0x4c0 6ebc: eb00029f cmp x20, x0 6ec0: d1004294 sub x20, x20, #0x10 6ec4: 54000660 b.eq 6f90 <freeze_workqueues_busy+0x138> // b.none if (!(wq->flags & WQ_FREEZABLE)) 6ec8: b9420280 ldr w0, [x20, #512] 6ecc: 3617ff40 tbz w0, #2, 6eb4 <freeze_workqueues_busy+0x5c> __rcu_read_lock(); 6ed0: 94000000 bl 0 <__rcu_read_lock> lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_); 6ed4: 90000006 adrp x6, 0 <pwq_activate_delayed_work> 6ed8: aa1703e0 mov x0, x23 6edc: 910000c6 add x6, x6, #0x0 6ee0: d2800005 mov x5, #0x0 // #0 6ee4: 52800004 mov w4, #0x0 // #0 6ee8: 52800043 mov w3, #0x2 // #2 6eec: 52800002 mov w2, #0x0 // #0 6ef0: 52800001 mov w1, #0x0 // #0 6ef4: 94000000 bl 0 <lock_acquire> RCU_LOCKDEP_WARN(!rcu_is_watching(), 6ef8: 94000000 bl 0 <debug_lockdep_rcu_enabled> 6efc: 34000060 cbz w0, 6f08 <freeze_workqueues_busy+0xb0> 6f00: 39401ac0 ldrb w0, [x22, #6] 6f04: 34000c20 cbz w0, 7088 <freeze_workqueues_busy+0x230> 6f08: f9400293 ldr x19, [x20] for_each_pwq(pwq, wq) { 6f0c: 90000018 adrp x24, 0 <pwq_activate_delayed_work> 6f10: 91036299 add x25, x20, #0xd8 6f14: 91000318 add x24, x24, #0x0 6f18: eb13029f cmp x20, x19 6f1c: d101c273 sub x19, x19, #0x70 6f20: 540001a0 b.eq 6f54 <freeze_workqueues_busy+0xfc> // b.none 6f24: d503201f nop 6f28: 94000000 bl 0 <debug_lockdep_rcu_enabled> 6f2c: 34000060 cbz w0, 6f38 <freeze_workqueues_busy+0xe0> 6f30: 394042c0 ldrb w0, [x22, #16] 6f34: 340006e0 cbz w0, 7010 <freeze_workqueues_busy+0x1b8> WARN_ON_ONCE(pwq->nr_active < 0); 6f38: b9405a60 ldr w0, [x19, #88] 6f3c: 37f803e0 tbnz w0, #31, 6fb8 <freeze_workqueues_busy+0x160> if (pwq->nr_active) { 6f40: 35000440 cbnz w0, 6fc8 <freeze_workqueues_busy+0x170> 6f44: f9403a73 ldr x19, [x19, #112] for_each_pwq(pwq, wq) { 6f48: eb13029f cmp x20, x19 6f4c: d101c273 sub x19, x19, #0x70 6f50: 54fffec1 b.ne 6f28 <freeze_workqueues_busy+0xd0> // b.any RCU_LOCKDEP_WARN(!rcu_is_watching(), 6f54: 94000000 bl 0 <debug_lockdep_rcu_enabled> 6f58: 34000060 cbz w0, 6f64 <freeze_workqueues_busy+0x10c> 6f5c: 39401ec0 ldrb w0, [x22, #7] 6f60: 34000ac0 cbz w0, 70b8 <freeze_workqueues_busy+0x260> __rcu_read_unlock(); 6f64: 94000000 bl 0 <__rcu_read_unlock> lock_release(map, 1, _THIS_IP_); 6f68: aa1703e0 mov x0, x23 6f6c: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 6f70: 52800021 mov w1, #0x1 // #1 6f74: 91000042 add x2, x2, #0x0 6f78: 94000000 bl 0 <lock_release> list_for_each_entry(wq, &workqueues, list) { 6f7c: f9400a94 ldr x20, [x20, #16] 6f80: 911302a0 add x0, x21, #0x4c0 6f84: eb00029f cmp x20, x0 6f88: d1004294 sub x20, x20, #0x10 6f8c: 54fff9e1 b.ne 6ec8 <freeze_workqueues_busy+0x70> // b.any out_unlock: 6f90: a94363f7 ldp x23, x24, [sp, #48] 6f94: f94023f9 ldr x25, [sp, #64] mutex_unlock(&wq_pool_mutex); 6f98: aa1503e0 mov x0, x21 bool busy = false; 6f9c: 52800013 mov w19, #0x0 // #0 mutex_unlock(&wq_pool_mutex); 6fa0: 94000000 bl 0 <_mutex_unlock> } 6fa4: 2a1303e0 mov w0, w19 6fa8: a94153f3 ldp x19, x20, [sp, #16] 6fac: a9425bf5 ldp x21, x22, [sp, #32] 6fb0: a8c57bfd ldp x29, x30, [sp], #80 6fb4: d65f03c0 ret WARN_ON_ONCE(pwq->nr_active < 0); 6fb8: d4210000 brk #0x800 6fbc: b9405a60 ldr w0, [x19, #88] if (pwq->nr_active) { 6fc0: 34fffc20 cbz w0, 6f44 <freeze_workqueues_busy+0xec> 6fc4: d503201f nop RCU_LOCKDEP_WARN(!rcu_is_watching(), 6fc8: 94000000 bl 0 <debug_lockdep_rcu_enabled> 6fcc: 350003e0 cbnz w0, 7048 <freeze_workqueues_busy+0x1f0> __rcu_read_unlock(); 6fd0: 94000000 bl 0 <__rcu_read_unlock> busy = true; 6fd4: 52800033 mov w19, #0x1 // #1 lock_release(map, 1, _THIS_IP_); 6fd8: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 6fdc: 2a1303e1 mov w1, w19 6fe0: 91000042 add x2, x2, #0x0 6fe4: aa1703e0 mov x0, x23 6fe8: 94000000 bl 0 <lock_release> mutex_unlock(&wq_pool_mutex); 6fec: aa1503e0 mov x0, x21 goto out_unlock; 6ff0: a94363f7 ldp x23, x24, [sp, #48] 6ff4: f94023f9 ldr x25, [sp, #64] mutex_unlock(&wq_pool_mutex); 6ff8: 94000000 bl 0 <_mutex_unlock> } 6ffc: 2a1303e0 mov w0, w19 7000: a94153f3 ldp x19, x20, [sp, #16] 7004: a9425bf5 ldp x21, x22, [sp, #32] 7008: a8c57bfd ldp x29, x30, [sp], #80 700c: d65f03c0 ret for_each_pwq(pwq, wq) { 7010: 94000000 bl 0 <rcu_read_lock_held> 7014: 35fff920 cbnz w0, 6f38 <freeze_workqueues_busy+0xe0> 7018: 12800001 mov w1, #0xffffffff // #-1 701c: aa1903e0 mov x0, x25 7020: 94000000 bl 0 <lock_is_held_type> 7024: 35fff8a0 cbnz w0, 6f38 <freeze_workqueues_busy+0xe0> 7028: 52800023 mov w3, #0x1 // #1 702c: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 7030: aa1803e2 mov x2, x24 7034: 91000000 add x0, x0, #0x0 7038: 52827061 mov w1, #0x1383 // #4995 703c: 390042c3 strb w3, [x22, #16] 7040: 94000000 bl 0 <lockdep_rcu_suspicious> 7044: 17ffffbd b 6f38 <freeze_workqueues_busy+0xe0> RCU_LOCKDEP_WARN(!rcu_is_watching(), 7048: 90000013 adrp x19, 0 <pwq_activate_delayed_work> 704c: 91000273 add x19, x19, #0x0 7050: 39401e60 ldrb w0, [x19, #7] 7054: 35fffbe0 cbnz w0, 6fd0 <freeze_workqueues_busy+0x178> 7058: 94000000 bl 0 <rcu_is_watching> 705c: 72001c1f tst w0, #0xff 7060: 54fffb81 b.ne 6fd0 <freeze_workqueues_busy+0x178> // b.any 7064: 52800023 mov w3, #0x1 // #1 7068: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 706c: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 7070: 91000042 add x2, x2, #0x0 7074: 91000000 add x0, x0, #0x0 7078: 528051a1 mov w1, #0x28d // #653 707c: 39001e63 strb w3, [x19, #7] 7080: 94000000 bl 0 <lockdep_rcu_suspicious> 7084: 17ffffd3 b 6fd0 <freeze_workqueues_busy+0x178> RCU_LOCKDEP_WARN(!rcu_is_watching(), 7088: 94000000 bl 0 <rcu_is_watching> 708c: 72001c1f tst w0, #0xff 7090: 54fff3c1 b.ne 6f08 <freeze_workqueues_busy+0xb0> // b.any 7094: 52800023 mov w3, #0x1 // #1 7098: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 709c: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 70a0: 91000042 add x2, x2, #0x0 70a4: 91000000 add x0, x0, #0x0 70a8: 52804b41 mov w1, #0x25a // #602 70ac: 39001ac3 strb w3, [x22, #6] 70b0: 94000000 bl 0 <lockdep_rcu_suspicious> 70b4: 17ffff95 b 6f08 <freeze_workqueues_busy+0xb0> RCU_LOCKDEP_WARN(!rcu_is_watching(), 70b8: 94000000 bl 0 <rcu_is_watching> 70bc: 72001c1f tst w0, #0xff 70c0: 54fff521 b.ne 6f64 <freeze_workqueues_busy+0x10c> // b.any 70c4: 52800023 mov w3, #0x1 // #1 70c8: 90000002 adrp x2, 0 <pwq_activate_delayed_work> 70cc: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 70d0: 91000042 add x2, x2, #0x0 70d4: 91000000 add x0, x0, #0x0 70d8: 528051a1 mov w1, #0x28d // #653 70dc: 39001ec3 strb w3, [x22, #7] 70e0: 94000000 bl 0 <lockdep_rcu_suspicious> 70e4: 17ffffa0 b 6f64 <freeze_workqueues_busy+0x10c> WARN_ON_ONCE(!workqueue_freezing); 70e8: d4210000 brk #0x800 70ec: 17ffff66 b 6e84 <freeze_workqueues_busy+0x2c> 00000000000070f0 <thaw_workqueues>: { 70f0: a9bb7bfd stp x29, x30, [sp, #-80]! 70f4: 910003fd mov x29, sp 70f8: a9025bf5 stp x21, x22, [sp, #32] mutex_lock(&wq_pool_mutex); 70fc: 90000016 adrp x22, 0 <pwq_activate_delayed_work> 7100: 910002d6 add x22, x22, #0x0 7104: aa1603e0 mov x0, x22 7108: 94000000 bl 0 <_mutex_lock> if (!workqueue_freezing) 710c: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 7110: 39400001 ldrb w1, [x0] 7114: 340007e1 cbz w1, 7210 <thaw_workqueues+0x120> workqueue_freezing = false; 7118: a90153f3 stp x19, x20, [sp, #16] list_for_each_entry(wq, &workqueues, list) { 711c: f94262d4 ldr x20, [x22, #1216] workqueue_freezing = false; 7120: 3900001f strb wzr, [x0] list_for_each_entry(wq, &workqueues, list) { 7124: 911302c0 add x0, x22, #0x4c0 7128: eb00029f cmp x20, x0 712c: d1004294 sub x20, x20, #0x10 7130: 540007a0 b.eq 7224 <thaw_workqueues+0x134> // b.none for_each_pwq(pwq, wq) 7134: 90000015 adrp x21, 0 <pwq_activate_delayed_work> 7138: 910002b5 add x21, x21, #0x0 713c: a90363f7 stp x23, x24, [sp, #48] 7140: 90000017 adrp x23, 0 <pwq_activate_delayed_work> 7144: 910002f7 add x23, x23, #0x0 7148: f90023f9 str x25, [sp, #64] 714c: d503201f nop mutex_lock(&wq->mutex); 7150: 91008299 add x25, x20, #0x20 7154: aa1903e0 mov x0, x25 7158: 94000000 bl 0 <_mutex_lock> 715c: f9400293 ldr x19, [x20] for_each_pwq(pwq, wq) 7160: eb13029f cmp x20, x19 7164: d101c273 sub x19, x19, #0x70 7168: 54000400 b.eq 71e8 <thaw_workqueues+0xf8> // b.none 716c: 91036298 add x24, x20, #0xd8 7170: 14000007 b 718c <thaw_workqueues+0x9c> pwq_adjust_max_active(pwq); 7174: aa1303e0 mov x0, x19 7178: 97ffe4ca bl 4a0 <pwq_adjust_max_active> 717c: f9403a73 ldr x19, [x19, #112] for_each_pwq(pwq, wq) 7180: eb13029f cmp x20, x19 7184: d101c273 sub x19, x19, #0x70 7188: 54000300 b.eq 71e8 <thaw_workqueues+0xf8> // b.none 718c: 94000000 bl 0 <debug_lockdep_rcu_enabled> 7190: 34ffff20 cbz w0, 7174 <thaw_workqueues+0x84> 7194: 394046a0 ldrb w0, [x21, #17] 7198: 35fffee0 cbnz w0, 7174 <thaw_workqueues+0x84> 719c: 94000000 bl 0 <rcu_read_lock_held> 71a0: 35fffea0 cbnz w0, 7174 <thaw_workqueues+0x84> 71a4: 12800001 mov w1, #0xffffffff // #-1 71a8: aa1803e0 mov x0, x24 71ac: 94000000 bl 0 <lock_is_held_type> 71b0: 35fffe20 cbnz w0, 7174 <thaw_workqueues+0x84> 71b4: 52800023 mov w3, #0x1 // #1 71b8: aa1703e2 mov x2, x23 71bc: 52827541 mov w1, #0x13aa // #5034 71c0: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 71c4: 91000000 add x0, x0, #0x0 71c8: 390046a3 strb w3, [x21, #17] 71cc: 94000000 bl 0 <lockdep_rcu_suspicious> pwq_adjust_max_active(pwq); 71d0: aa1303e0 mov x0, x19 71d4: 97ffe4b3 bl 4a0 <pwq_adjust_max_active> 71d8: f9403a73 ldr x19, [x19, #112] for_each_pwq(pwq, wq) 71dc: eb13029f cmp x20, x19 71e0: d101c273 sub x19, x19, #0x70 71e4: 54fffd41 b.ne 718c <thaw_workqueues+0x9c> // b.any mutex_unlock(&wq->mutex); 71e8: aa1903e0 mov x0, x25 71ec: 94000000 bl 0 <_mutex_unlock> list_for_each_entry(wq, &workqueues, list) { 71f0: f9400a94 ldr x20, [x20, #16] 71f4: 911302c0 add x0, x22, #0x4c0 71f8: eb00029f cmp x20, x0 71fc: d1004294 sub x20, x20, #0x10 7200: 54fffa81 b.ne 7150 <thaw_workqueues+0x60> // b.any 7204: a94153f3 ldp x19, x20, [sp, #16] 7208: a94363f7 ldp x23, x24, [sp, #48] 720c: f94023f9 ldr x25, [sp, #64] mutex_unlock(&wq_pool_mutex); 7210: aa1603e0 mov x0, x22 7214: 94000000 bl 0 <_mutex_unlock> } 7218: a9425bf5 ldp x21, x22, [sp, #32] 721c: a8c57bfd ldp x29, x30, [sp], #80 7220: d65f03c0 ret 7224: a94153f3 ldp x19, x20, [sp, #16] 7228: 17fffffa b 7210 <thaw_workqueues+0x120> 722c: d503201f nop 0000000000007230 <workqueue_set_unbound_cpumask>: { 7230: d101c3ff sub sp, sp, #0x70 7234: a9027bfd stp x29, x30, [sp, #32] 7238: 910083fd add x29, sp, #0x20 723c: a90353f3 stp x19, x20, [sp, #48] 7240: aa0003f3 mov x19, x0 7244: 90000000 adrp x0, 0 <__cpu_possible_mask> 7248: a90563f7 stp x23, x24, [sp, #80] 724c: 90000017 adrp x23, 0 <__stack_chk_guard> 7250: 910002f7 add x23, x23, #0x0 7254: a9066bf9 stp x25, x26, [sp, #96] 7258: f9400001 ldr x1, [x0] 725c: f94002e0 ldr x0, [x23] 7260: f9000fe0 str x0, [sp, #24] 7264: d2800000 mov x0, #0x0 // #0 7268: f9400260 ldr x0, [x19] 726c: 8a010000 and x0, x0, x1 7270: f9000260 str x0, [x19] if (!cpumask_empty(cpumask)) { 7274: b4000b20 cbz x0, 73d8 <workqueue_set_unbound_cpumask+0x1a8> mutex_lock(&wq_pool_mutex); 7278: 90000014 adrp x20, 0 <pwq_activate_delayed_work> 727c: 91000294 add x20, x20, #0x0 7280: a9045bf5 stp x21, x22, [sp, #64] static inline void get_online_cpus(void) { cpus_read_lock(); } 7284: 94000000 bl 0 <cpus_read_lock> 7288: aa1403e0 mov x0, x20 LIST_HEAD(ctxs); 728c: 910023f5 add x21, sp, #0x8 mutex_lock(&wq_pool_mutex); 7290: 94000000 bl 0 <_mutex_lock> lockdep_assert_held(&wq_pool_mutex); 7294: 90000000 adrp x0, 0 <debug_locks> *dst = *src; 7298: 90000016 adrp x22, 0 <pwq_activate_delayed_work> 729c: 910002d6 add x22, x22, #0x0 LIST_HEAD(ctxs); 72a0: a900d7f5 stp x21, x21, [sp, #8] lockdep_assert_held(&wq_pool_mutex); 72a4: b9400000 ldr w0, [x0] 72a8: f9400261 ldr x1, [x19] 72ac: f9402ad8 ldr x24, [x22, #80] 72b0: f9002ac1 str x1, [x22, #80] 72b4: 35000820 cbnz w0, 73b8 <workqueue_set_unbound_cpumask+0x188> list_for_each_entry(wq, &workqueues, list) { 72b8: f9426280 ldr x0, [x20, #1216] 72bc: 91130299 add x25, x20, #0x4c0 72c0: d1004013 sub x19, x0, #0x10 72c4: eb19001f cmp x0, x25 72c8: 54000240 b.eq 7310 <workqueue_set_unbound_cpumask+0xe0> // b.none 72cc: d503201f nop if (!(wq->flags & WQ_UNBOUND)) 72d0: b9420260 ldr w0, [x19, #512] 72d4: 36080160 tbz w0, #1, 7300 <workqueue_set_unbound_cpumask+0xd0> if (wq->flags & __WQ_ORDERED) 72d8: 37880140 tbnz w0, #17, 7300 <workqueue_set_unbound_cpumask+0xd0> ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs); 72dc: f940b261 ldr x1, [x19, #352] 72e0: aa1303e0 mov x0, x19 72e4: 97fff121 bl 3768 <apply_wqattrs_prepare> if (!ctx) { 72e8: b4000580 cbz x0, 7398 <workqueue_set_unbound_cpumask+0x168> __list_add(new, head->prev, head); 72ec: f9400be1 ldr x1, [sp, #16] list_add_tail(&ctx->list, &ctxs); 72f0: 91004002 add x2, x0, #0x10 new->prev = prev; 72f4: a9010415 stp x21, x1, [x0, #16] next->prev = new; 72f8: f9000be2 str x2, [sp, #16] case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 72fc: f9000022 str x2, [x1] list_for_each_entry(wq, &workqueues, list) { 7300: f9400a73 ldr x19, [x19, #16] 7304: eb19027f cmp x19, x25 7308: d1004273 sub x19, x19, #0x10 730c: 54fffe21 b.ne 72d0 <workqueue_set_unbound_cpumask+0xa0> // b.any list_for_each_entry_safe(ctx, n, &ctxs, list) { 7310: f94007f3 ldr x19, [sp, #8] int ret = 0; 7314: 52800019 mov w25, #0x0 // #0 list_for_each_entry_safe(ctx, n, &ctxs, list) { 7318: eb1302bf cmp x21, x19 731c: f85f067a ldr x26, [x19], #-16 7320: d100435a sub x26, x26, #0x10 7324: 54000560 b.eq 73d0 <workqueue_set_unbound_cpumask+0x1a0> // b.none if (!ret) 7328: 34000339 cbz w25, 738c <workqueue_set_unbound_cpumask+0x15c> apply_wqattrs_cleanup(ctx); 732c: aa1303e0 mov x0, x19 7330: 97fff0fa bl 3718 <apply_wqattrs_cleanup> list_for_each_entry_safe(ctx, n, &ctxs, list) { 7334: aa1a03e0 mov x0, x26 7338: aa1a03f3 mov x19, x26 733c: f8410c1a ldr x26, [x0, #16]! 7340: d100435a sub x26, x26, #0x10 7344: eb15001f cmp x0, x21 7348: 54ffff01 b.ne 7328 <workqueue_set_unbound_cpumask+0xf8> // b.any if (ret < 0) 734c: 35000339 cbnz w25, 73b0 <workqueue_set_unbound_cpumask+0x180> mutex_unlock(&wq_pool_mutex); 7350: aa1403e0 mov x0, x20 7354: 94000000 bl 0 <_mutex_unlock> static inline void put_online_cpus(void) { cpus_read_unlock(); } 7358: 94000000 bl 0 <cpus_read_unlock> 735c: a9445bf5 ldp x21, x22, [sp, #64] } 7360: 2a1903e0 mov w0, w25 7364: f9400fe2 ldr x2, [sp, #24] 7368: f94002e1 ldr x1, [x23] 736c: ca010041 eor x1, x2, x1 7370: b5000381 cbnz x1, 73e0 <workqueue_set_unbound_cpumask+0x1b0> 7374: a9427bfd ldp x29, x30, [sp, #32] 7378: a94353f3 ldp x19, x20, [sp, #48] 737c: a94563f7 ldp x23, x24, [sp, #80] 7380: a9466bf9 ldp x25, x26, [sp, #96] 7384: 9101c3ff add sp, sp, #0x70 7388: d65f03c0 ret apply_wqattrs_commit(ctx); 738c: aa1303e0 mov x0, x19 7390: 97ffee2e bl 2c48 <apply_wqattrs_commit> 7394: 17ffffe6 b 732c <workqueue_set_unbound_cpumask+0xfc> list_for_each_entry_safe(ctx, n, &ctxs, list) { 7398: f94007f3 ldr x19, [sp, #8] ret = -ENOMEM; 739c: 12800179 mov w25, #0xfffffff4 // #-12 list_for_each_entry_safe(ctx, n, &ctxs, list) { 73a0: eb1302bf cmp x21, x19 73a4: f85f067a ldr x26, [x19], #-16 73a8: d100435a sub x26, x26, #0x10 73ac: 54fffbe1 b.ne 7328 <workqueue_set_unbound_cpumask+0xf8> // b.any 73b0: f9002ad8 str x24, [x22, #80] 73b4: 17ffffe7 b 7350 <workqueue_set_unbound_cpumask+0x120> 73b8: 12800001 mov w1, #0xffffffff // #-1 73bc: 9102e280 add x0, x20, #0xb8 73c0: 94000000 bl 0 <lock_is_held_type> lockdep_assert_held(&wq_pool_mutex); 73c4: 35fff7a0 cbnz w0, 72b8 <workqueue_set_unbound_cpumask+0x88> 73c8: d4210000 brk #0x800 73cc: 17ffffbb b 72b8 <workqueue_set_unbound_cpumask+0x88> int ret = 0; 73d0: 52800019 mov w25, #0x0 // #0 73d4: 17ffffdf b 7350 <workqueue_set_unbound_cpumask+0x120> int ret = -EINVAL; 73d8: 128002b9 mov w25, #0xffffffea // #-22 return ret; 73dc: 17ffffe1 b 7360 <workqueue_set_unbound_cpumask+0x130> 73e0: a9045bf5 stp x21, x22, [sp, #64] } 73e4: 94000000 bl 0 <__stack_chk_fail> 00000000000073e8 <wq_unbound_cpumask_store>: { 73e8: d10103ff sub sp, sp, #0x40 char *nl = strchr(buf, '\n'); 73ec: 52800141 mov w1, #0xa // #10 73f0: a9017bfd stp x29, x30, [sp, #16] 73f4: 910043fd add x29, sp, #0x10 73f8: a90253f3 stp x19, x20, [sp, #32] 73fc: 90000013 adrp x19, 0 <__stack_chk_guard> 7400: 91000273 add x19, x19, #0x0 7404: f9400260 ldr x0, [x19] 7408: f90007e0 str x0, [sp, #8] 740c: d2800000 mov x0, #0x0 // #0 7410: aa0203f4 mov x20, x2 7414: aa0203e0 mov x0, x2 *dst = 0UL; 7418: f90003ff str xzr, [sp] 741c: f9001bf5 str x21, [sp, #48] 7420: aa0303f5 mov x21, x3 7424: 94000000 bl 0 <strchr> unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf); 7428: 4b140001 sub w1, w0, w20 742c: b40002c0 cbz x0, 7484 <wq_unbound_cpumask_store+0x9c> return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits); 7430: aa1403e0 mov x0, x20 7434: 910003e3 mov x3, sp 7438: 52800804 mov w4, #0x40 // #64 743c: 52800002 mov w2, #0x0 // #0 7440: 94000000 bl 0 <__bitmap_parse> if (!ret) 7444: 34000180 cbz w0, 7474 <wq_unbound_cpumask_store+0x8c> return ret ? ret : count; 7448: 93407c15 sxtw x21, w0 } 744c: aa1503e0 mov x0, x21 7450: f94007e2 ldr x2, [sp, #8] 7454: f9400261 ldr x1, [x19] 7458: ca010041 eor x1, x2, x1 745c: b50001c1 cbnz x1, 7494 <wq_unbound_cpumask_store+0xac> 7460: a9417bfd ldp x29, x30, [sp, #16] 7464: a94253f3 ldp x19, x20, [sp, #32] 7468: f9401bf5 ldr x21, [sp, #48] 746c: 910103ff add sp, sp, #0x40 7470: d65f03c0 ret ret = workqueue_set_unbound_cpumask(cpumask); 7474: 910003e0 mov x0, sp 7478: 94000000 bl 7230 <workqueue_set_unbound_cpumask> return ret ? ret : count; 747c: 34fffe80 cbz w0, 744c <wq_unbound_cpumask_store+0x64> 7480: 17fffff2 b 7448 <wq_unbound_cpumask_store+0x60> 7484: aa1403e0 mov x0, x20 7488: 94000000 bl 0 <strlen> 748c: 2a0003e1 mov w1, w0 7490: 17ffffe8 b 7430 <wq_unbound_cpumask_store+0x48> } 7494: 94000000 bl 0 <__stack_chk_fail> 0000000000007498 <workqueue_sysfs_register>: { 7498: a9bc7bfd stp x29, x30, [sp, #-64]! 749c: 910003fd mov x29, sp 74a0: f9001bf7 str x23, [sp, #48] if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) 74a4: b9420001 ldr w1, [x0, #512] 74a8: 379809a1 tbnz w1, #19, 75dc <workqueue_sysfs_register+0x144> void *ret = kmem_cache_alloc(s, flags); 74ac: a9025bf5 stp x21, x22, [sp, #32] 74b0: aa0003f5 mov x21, x0 return kmem_cache_alloc_trace( 74b4: 90000000 adrp x0, 0 <kmalloc_caches> void *ret = kmem_cache_alloc(s, flags); 74b8: 52901801 mov w1, #0x80c0 // #32960 74bc: f9400000 ldr x0, [x0] 74c0: 72a02801 movk w1, #0x140, lsl #16 74c4: 94000000 bl 0 <kmem_cache_alloc> wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL); 74c8: f900baa0 str x0, [x21, #368] 74cc: aa0003f6 mov x22, x0 if (!wq_dev) 74d0: b4000920 cbz x0, 75f4 <workqueue_sysfs_register+0x15c> wq_dev->wq = wq; 74d4: a90153f3 stp x19, x20, [sp, #16] wq_dev->dev.bus = &wq_subsys; 74d8: 90000013 adrp x19, 0 <pwq_activate_delayed_work> 74dc: 91000273 add x19, x19, #0x0 74e0: 91080264 add x4, x19, #0x200 wq_dev->dev.release = wq_device_release; 74e4: 90000003 adrp x3, 0 <pwq_activate_delayed_work> 74e8: 91000063 add x3, x3, #0x0 dev_set_name(&wq_dev->dev, "%s", wq->name); 74ec: 910022d4 add x20, x22, #0x8 74f0: 9106a2a2 add x2, x21, #0x1a8 74f4: 90000001 adrp x1, 0 <pwq_activate_delayed_work> 74f8: aa1403e0 mov x0, x20 74fc: 91000021 add x1, x1, #0x0 wq_dev->wq = wq; 7500: f90002d5 str x21, [x22] wq_dev->dev.bus = &wq_subsys; 7504: f900aac4 str x4, [x22, #336] wq_dev->dev.release = wq_device_release; 7508: f9036ac3 str x3, [x22, #1744] dev_set_name(&wq_dev->dev, "%s", wq->name); 750c: 94000000 bl 0 <dev_set_name> return dev->kobj.uevent_suppress; } static inline void dev_set_uevent_suppress(struct device *dev, int val) { dev->kobj.uevent_suppress = val; 7510: 39413281 ldrb w1, [x20, #76] ret = device_register(&wq_dev->dev); 7514: aa1403e0 mov x0, x20 7518: 321c0021 orr w1, w1, #0x10 751c: 39013281 strb w1, [x20, #76] 7520: 94000000 bl 0 <device_register> 7524: 2a0003f7 mov w23, w0 if (ret) { 7528: 35000480 cbnz w0, 75b8 <workqueue_sysfs_register+0x120> if (wq->flags & WQ_UNBOUND) { 752c: b94202a0 ldr w0, [x21, #512] 7530: 360802c0 tbz w0, #1, 7588 <workqueue_sysfs_register+0xf0> for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) { 7534: f9426a60 ldr x0, [x19, #1232] 7538: 91134273 add x19, x19, #0x4d0 753c: b5000080 cbnz x0, 754c <workqueue_sysfs_register+0xb4> 7540: 14000012 b 7588 <workqueue_sysfs_register+0xf0> 7544: f8430e61 ldr x1, [x19, #48]! 7548: b4000201 cbz x1, 7588 <workqueue_sysfs_register+0xf0> ret = device_create_file(&wq_dev->dev, attr); 754c: aa1303e1 mov x1, x19 7550: aa1403e0 mov x0, x20 7554: 94000000 bl 0 <device_create_file> 7558: 2a0003e1 mov w1, w0 if (ret) { 755c: 34ffff40 cbz w0, 7544 <workqueue_sysfs_register+0xac> ret = device_create_file(&wq_dev->dev, attr); 7560: 2a0103f7 mov w23, w1 device_unregister(&wq_dev->dev); 7564: aa1403e0 mov x0, x20 7568: 94000000 bl 0 <device_unregister> wq->wq_dev = NULL; 756c: f900babf str xzr, [x21, #368] } 7570: 2a1703e0 mov w0, w23 return ret; 7574: a94153f3 ldp x19, x20, [sp, #16] 7578: a9425bf5 ldp x21, x22, [sp, #32] } 757c: f9401bf7 ldr x23, [sp, #48] 7580: a8c47bfd ldp x29, x30, [sp], #64 7584: d65f03c0 ret 7588: 39413282 ldrb w2, [x20, #76] kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD); 758c: 910062c0 add x0, x22, #0x18 7590: 52800001 mov w1, #0x0 // #0 7594: 121b7842 and w2, w2, #0xffffffef 7598: 39013282 strb w2, [x20, #76] 759c: 94000000 bl 0 <kobject_uevent> return 0; 75a0: a94153f3 ldp x19, x20, [sp, #16] 75a4: a9425bf5 ldp x21, x22, [sp, #32] } 75a8: 2a1703e0 mov w0, w23 75ac: f9401bf7 ldr x23, [sp, #48] 75b0: a8c47bfd ldp x29, x30, [sp], #64 75b4: d65f03c0 ret put_device(&wq_dev->dev); 75b8: aa1403e0 mov x0, x20 75bc: 94000000 bl 0 <put_device> wq->wq_dev = NULL; 75c0: f900babf str xzr, [x21, #368] } 75c4: 2a1703e0 mov w0, w23 return ret; 75c8: a94153f3 ldp x19, x20, [sp, #16] 75cc: a9425bf5 ldp x21, x22, [sp, #32] } 75d0: f9401bf7 ldr x23, [sp, #48] 75d4: a8c47bfd ldp x29, x30, [sp], #64 75d8: d65f03c0 ret if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) 75dc: d4210000 brk #0x800 return -EINVAL; 75e0: 128002b7 mov w23, #0xffffffea // #-22 } 75e4: 2a1703e0 mov w0, w23 75e8: f9401bf7 ldr x23, [sp, #48] 75ec: a8c47bfd ldp x29, x30, [sp], #64 75f0: d65f03c0 ret return -ENOMEM; 75f4: 12800177 mov w23, #0xfffffff4 // #-12 75f8: a9425bf5 ldp x21, x22, [sp, #32] 75fc: 17ffffeb b 75a8 <workqueue_sysfs_register+0x110> 0000000000007600 <__alloc_workqueue_key>: { 7600: d103c3ff sub sp, sp, #0xf0 7604: a9077bfd stp x29, x30, [sp, #112] 7608: 9101c3fd add x29, sp, #0x70 760c: a90853f3 stp x19, x20, [sp, #128] 7610: 2a0103f4 mov w20, w1 if ((flags & WQ_UNBOUND) && max_active == 1) 7614: f27f029f tst x20, #0x2 { 7618: a9095bf5 stp x21, x22, [sp, #144] 761c: 90000016 adrp x22, 0 <__stack_chk_guard> 7620: 910002d6 add x22, x22, #0x0 flags |= __WQ_ORDERED; 7624: 7a411840 ccmp w2, #0x1, #0x0, ne // ne = any { 7628: f94002c1 ldr x1, [x22] 762c: f90037e1 str x1, [sp, #104] 7630: d2800001 mov x1, #0x0 // #0 7634: a90a63f7 stp x23, x24, [sp, #160] flags |= __WQ_ORDERED; 7638: 320f0281 orr w1, w20, #0x20000 { 763c: a90b6bf9 stp x25, x26, [sp, #176] 7640: 2a0203f8 mov w24, w2 7644: aa0003f5 mov x21, x0 7648: a90d9be5 stp x5, x6, [sp, #216] 764c: aa0303f9 mov x25, x3 7650: aa0403fa mov x26, x4 7654: f90077e7 str x7, [sp, #232] flags |= __WQ_ORDERED; 7658: 1a940034 csel w20, w1, w20, eq // eq = none if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient) 765c: 363802d4 tbz w20, #7, 76b4 <__alloc_workqueue_key+0xb4> 7660: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 7664: 39400000 ldrb w0, [x0] 7668: 34000260 cbz w0, 76b4 <__alloc_workqueue_key+0xb4> flags |= WQ_UNBOUND; 766c: 321f0294 orr w20, w20, #0x2 return __kmalloc(size, flags); 7670: 52901801 mov w1, #0x80c0 // #32960 7674: d2805100 mov x0, #0x288 // #648 7678: 72a02801 movk w1, #0x140, lsl #16 767c: 94000000 bl 0 <__kmalloc> 7680: aa0003f3 mov x19, x0 if (!wq) 7684: b40013a0 cbz x0, 78f8 <__alloc_workqueue_key+0x2f8> return kmem_cache_alloc_trace( 7688: 90000000 adrp x0, 0 <kmalloc_caches> void *ret = kmem_cache_alloc(s, flags); 768c: 52901801 mov w1, #0x80c0 // #32960 7690: 72a02801 movk w1, #0x140, lsl #16 7694: f9400000 ldr x0, [x0] 7698: 94000000 bl 0 <kmem_cache_alloc> if (!attrs) 769c: b4001fc0 cbz x0, 7a94 <__alloc_workqueue_key+0x494> *dst = *src; 76a0: 90000017 adrp x23, 0 <__cpu_possible_mask> wq->unbound_attrs = alloc_workqueue_attrs(GFP_KERNEL); 76a4: f900b260 str x0, [x19, #352] 76a8: f94002e1 ldr x1, [x23] 76ac: f9000401 str x1, [x0, #8] if (!wq->unbound_attrs) 76b0: 14000008 b 76d0 <__alloc_workqueue_key+0xd0> if (flags & WQ_UNBOUND) 76b4: 370ffdf4 tbnz w20, #1, 7670 <__alloc_workqueue_key+0x70> return __kmalloc(size, flags); 76b8: 52901801 mov w1, #0x80c0 // #32960 76bc: d2805000 mov x0, #0x280 // #640 76c0: 72a02801 movk w1, #0x140, lsl #16 76c4: 94000000 bl 0 <__kmalloc> 76c8: aa0003f3 mov x19, x0 if (!wq) 76cc: b4001160 cbz x0, 78f8 <__alloc_workqueue_key+0x2f8> va_start(args, lock_name); 76d0: 9103c3e2 add x2, sp, #0xf0 76d4: a9048be2 stp x2, x2, [sp, #72] 76d8: 910343e0 add x0, sp, #0xd0 76dc: 128002e1 mov w1, #0xffffffe8 // #-24 76e0: f9002fe0 str x0, [sp, #88] 76e4: 290c7fe1 stp w1, wzr, [sp, #96] vsnprintf(wq->name, sizeof(wq->name), fmt, args); 76e8: 9106a277 add x23, x19, #0x1a8 76ec: a94497e4 ldp x4, x5, [sp, #72] 76f0: a90217e4 stp x4, x5, [sp, #32] 76f4: 910083e3 add x3, sp, #0x20 76f8: a94597e4 ldp x4, x5, [sp, #88] 76fc: aa1503e2 mov x2, x21 7700: aa1703e0 mov x0, x23 7704: d2800301 mov x1, #0x18 // #24 7708: f9000ff7 str x23, [sp, #24] mutex_init(&wq->mutex); 770c: 91008275 add x21, x19, #0x20 vsnprintf(wq->name, sizeof(wq->name), fmt, args); 7710: a90317e4 stp x4, x5, [sp, #48] 7714: 94000000 bl 0 <vsnprintf> max_active = max_active ?: WQ_DFL_ACTIVE; 7718: 7100031f cmp w24, #0x0 max_active = wq_clamp_max_active(max_active, flags, wq->name); 771c: aa1703e2 mov x2, x23 7720: 2a1403e1 mov w1, w20 max_active = max_active ?: WQ_DFL_ACTIVE; 7724: 52802000 mov w0, #0x100 // #256 max_active = wq_clamp_max_active(max_active, flags, wq->name); 7728: 1a801300 csel w0, w24, w0, ne // ne = any mutex_init(&wq->mutex); 772c: 90000017 adrp x23, 0 <pwq_activate_delayed_work> max_active = wq_clamp_max_active(max_active, flags, wq->name); 7730: 97ffe3ee bl 6e8 <wq_clamp_max_active> wq->saved_max_active = max_active; 7734: b9015e60 str w0, [x19, #348] wq->flags = flags; 7738: b9020274 str w20, [x19, #512] mutex_init(&wq->mutex); 773c: 910002f7 add x23, x23, #0x0 7740: 90000001 adrp x1, 0 <pwq_activate_delayed_work> 7744: 91000021 add x1, x1, #0x0 7748: 910a82e2 add x2, x23, #0x2a0 774c: 91006021 add x1, x1, #0x18 7750: aa1503e0 mov x0, x21 7754: 94000000 bl 0 <__rt_mutex_init> 7758: 910aa2e2 add x2, x23, #0x2a8 775c: aa1503e0 mov x0, x21 7760: 90000001 adrp x1, 0 <pwq_activate_delayed_work> 7764: 91000021 add x1, x1, #0x0 7768: 94000000 bl 0 <__mutex_do_init> case 4: *(volatile __u32 *)p = *(__u32 *)res; break; 776c: b901127f str wzr, [x19, #272] case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 7770: f9000273 str x19, [x19] INIT_LIST_HEAD(&wq->flusher_queue); 7774: 91048263 add x3, x19, #0x120 7778: f9009263 str x3, [x19, #288] INIT_LIST_HEAD(&wq->flusher_overflow); 777c: 9104c260 add x0, x19, #0x130 INIT_LIST_HEAD(&wq->maydays); 7780: 91050264 add x4, x19, #0x140 7784: f9009a60 str x0, [x19, #304] list->prev = list; 7788: f9000673 str x19, [x19, #8] lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); 778c: aa1903e2 mov x2, x25 7790: f9009663 str x3, [x19, #296] 7794: aa1a03e1 mov x1, x26 7798: f9009e60 str x0, [x19, #312] 779c: 52800003 mov w3, #0x0 // #0 77a0: f900a264 str x4, [x19, #320] 77a4: 9105e260 add x0, x19, #0x178 77a8: f900a664 str x4, [x19, #328] INIT_LIST_HEAD(&wq->list); 77ac: 91004278 add x24, x19, #0x10 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); 77b0: 94000000 bl 0 <lockdep_init_map> 77b4: f9000a78 str x24, [x19, #16] bool highpri = wq->flags & WQ_HIGHPRI; 77b8: b9420260 ldr w0, [x19, #512] 77bc: f9000e78 str x24, [x19, #24] 77c0: d3441019 ubfx x25, x0, #4, #1 if (!(wq->flags & WQ_UNBOUND)) { 77c4: 36080cc0 tbz w0, #1, 795c <__alloc_workqueue_key+0x35c> ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]); 77c8: 93407f39 sxtw x25, w25 } else if (wq->flags & __WQ_ORDERED) { 77cc: 36880800 tbz w0, #17, 78cc <__alloc_workqueue_key+0x2cc> ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]); 77d0: 8b190ef7 add x23, x23, x25, lsl #3 77d4: aa1303e0 mov x0, x19 77d8: f9415ae1 ldr x1, [x23, #688] 77dc: 94000000 bl 5c60 <apply_workqueue_attrs> WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node || 77e0: 350007e0 cbnz w0, 78dc <__alloc_workqueue_key+0x2dc> 77e4: f940b660 ldr x0, [x19, #360] 77e8: f9400261 ldr x1, [x19] 77ec: 9101c000 add x0, x0, #0x70 77f0: eb00003f cmp x1, x0 77f4: 54001480 b.eq 7a84 <__alloc_workqueue_key+0x484> // b.none 77f8: f9400fe1 ldr x1, [sp, #24] 77fc: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 7800: 91000000 add x0, x0, #0x0 7804: 94000000 bl 0 <printk> 7808: d4210000 brk #0x800 780c: d503201f nop if (flags & WQ_MEM_RECLAIM) { 7810: 37181014 tbnz w20, #3, 7a10 <__alloc_workqueue_key+0x410> if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq)) 7814: b9420260 ldr w0, [x19, #512] 7818: 36300080 tbz w0, #6, 7828 <__alloc_workqueue_key+0x228> 781c: aa1303e0 mov x0, x19 7820: 94000000 bl 7498 <workqueue_sysfs_register> 7824: 35001280 cbnz w0, 7a74 <__alloc_workqueue_key+0x474> mutex_lock(&wq_pool_mutex); 7828: 90000017 adrp x23, 0 <pwq_activate_delayed_work> 782c: 910002f7 add x23, x23, #0x0 7830: aa1703e0 mov x0, x23 7834: 94000000 bl 0 <_mutex_lock> mutex_lock(&wq->mutex); 7838: aa1503e0 mov x0, x21 783c: 94000000 bl 0 <_mutex_lock> __READ_ONCE_SIZE; 7840: f9400274 ldr x20, [x19] for_each_pwq(pwq, wq) 7844: eb14027f cmp x19, x20 7848: d101c294 sub x20, x20, #0x70 784c: 54000700 b.eq 792c <__alloc_workqueue_key+0x32c> // b.none 7850: 90000019 adrp x25, 0 <pwq_activate_delayed_work> 7854: 9000001a adrp x26, 0 <pwq_activate_delayed_work> 7858: 91000339 add x25, x25, #0x0 785c: 9100035a add x26, x26, #0x0 7860: a90c73fb stp x27, x28, [sp, #192] 7864: 9103627b add x27, x19, #0xd8 7868: 14000007 b 7884 <__alloc_workqueue_key+0x284> pwq_adjust_max_active(pwq); 786c: aa1403e0 mov x0, x20 7870: 97ffe30c bl 4a0 <pwq_adjust_max_active> 7874: f9403a94 ldr x20, [x20, #112] for_each_pwq(pwq, wq) 7878: eb14027f cmp x19, x20 787c: d101c294 sub x20, x20, #0x70 7880: 54000540 b.eq 7928 <__alloc_workqueue_key+0x328> // b.none 7884: 94000000 bl 0 <debug_lockdep_rcu_enabled> 7888: 34ffff20 cbz w0, 786c <__alloc_workqueue_key+0x26c> 788c: 39404b20 ldrb w0, [x25, #18] 7890: 35fffee0 cbnz w0, 786c <__alloc_workqueue_key+0x26c> 7894: 94000000 bl 0 <rcu_read_lock_held> 7898: 35fffea0 cbnz w0, 786c <__alloc_workqueue_key+0x26c> 789c: 12800001 mov w1, #0xffffffff // #-1 78a0: aa1b03e0 mov x0, x27 78a4: 94000000 bl 0 <lock_is_held_type> 78a8: 35fffe20 cbnz w0, 786c <__alloc_workqueue_key+0x26c> 78ac: 52800023 mov w3, #0x1 // #1 78b0: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 78b4: aa1a03e2 mov x2, x26 78b8: 91000000 add x0, x0, #0x0 78bc: 52820461 mov w1, #0x1023 // #4131 78c0: 39004b23 strb w3, [x25, #18] 78c4: 94000000 bl 0 <lockdep_rcu_suspicious> 78c8: 17ffffe9 b 786c <__alloc_workqueue_key+0x26c> return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]); 78cc: 8b190ef7 add x23, x23, x25, lsl #3 78d0: aa1303e0 mov x0, x19 78d4: f94162e1 ldr x1, [x23, #704] 78d8: 94000000 bl 5c60 <apply_workqueue_attrs> if (alloc_and_link_pwqs(wq) < 0) 78dc: 36fff9a0 tbz w0, #31, 7810 <__alloc_workqueue_key+0x210> free_workqueue_attrs(wq->unbound_attrs); 78e0: f940b260 ldr x0, [x19, #352] if (attrs) { 78e4: b4000040 cbz x0, 78ec <__alloc_workqueue_key+0x2ec> kfree(attrs); 78e8: 94000000 bl 0 <kfree> kfree(wq); 78ec: aa1303e0 mov x0, x19 return NULL; 78f0: d2800013 mov x19, #0x0 // #0 kfree(wq); 78f4: 94000000 bl 0 <kfree> } 78f8: aa1303e0 mov x0, x19 78fc: f94037e2 ldr x2, [sp, #104] 7900: f94002c1 ldr x1, [x22] 7904: ca010041 eor x1, x2, x1 7908: b5000ca1 cbnz x1, 7a9c <__alloc_workqueue_key+0x49c> 790c: a9477bfd ldp x29, x30, [sp, #112] 7910: a94853f3 ldp x19, x20, [sp, #128] 7914: a9495bf5 ldp x21, x22, [sp, #144] 7918: a94a63f7 ldp x23, x24, [sp, #160] 791c: a94b6bf9 ldp x25, x26, [sp, #176] 7920: 9103c3ff add sp, sp, #0xf0 7924: d65f03c0 ret 7928: a94c73fb ldp x27, x28, [sp, #192] mutex_unlock(&wq->mutex); 792c: aa1503e0 mov x0, x21 7930: 94000000 bl 0 <_mutex_unlock> __list_add_rcu(new, head->prev, head); 7934: 911002e1 add x1, x23, #0x400 new->next = next; 7938: 911302e0 add x0, x23, #0x4c0 793c: f9000a60 str x0, [x19, #16] __list_add_rcu(new, head->prev, head); 7940: f9406420 ldr x0, [x1, #200] new->prev = prev; 7944: f9000e60 str x0, [x19, #24] rcu_assign_pointer(list_next_rcu(prev), new); 7948: c89ffc18 stlr x24, [x0] mutex_unlock(&wq_pool_mutex); 794c: aa1703e0 mov x0, x23 next->prev = new; 7950: f9006438 str x24, [x1, #200] 7954: 94000000 bl 0 <_mutex_unlock> return wq; 7958: 17ffffe8 b 78f8 <__alloc_workqueue_key+0x2f8> wq->cpu_pwqs = alloc_percpu(struct pool_workqueue); 795c: d2802001 mov x1, #0x100 // #256 7960: aa0103e0 mov x0, x1 7964: 94000000 bl 0 <__alloc_percpu> 7968: f9010660 str x0, [x19, #520] if (!wq->cpu_pwqs) 796c: b4fffba0 cbz x0, 78e0 <__alloc_workqueue_key+0x2e0> init_pwq(pwq, wq, &cpu_pools[highpri]); 7970: 92401f39 and x25, x25, #0xff 7974: 90000017 adrp x23, 0 <__cpu_possible_mask> 7978: 9000001a adrp x26, 0 <nr_cpu_ids> 797c: 910002f7 add x23, x23, #0x0 7980: d37ffb20 lsl x0, x25, #1 7984: 9100035a add x26, x26, #0x0 7988: 8b190019 add x25, x0, x25 per_cpu(cpu_worker_pools, cpu); 798c: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 7990: 91000000 add x0, x0, #0x0 7994: a90c73fb stp x27, x28, [sp, #192] init_pwq(pwq, wq, &cpu_pools[highpri]); 7998: d377db21 lsl x1, x25, #9 per_cpu_ptr(wq->cpu_pwqs, cpu); 799c: 9000001b adrp x27, 0 <__per_cpu_offset> for_each_possible_cpu(cpu) { 79a0: 12800019 mov w25, #0xffffffff // #-1 per_cpu_ptr(wq->cpu_pwqs, cpu); 79a4: 9100037b add x27, x27, #0x0 per_cpu(cpu_worker_pools, cpu); 79a8: a90083e1 stp x1, x0, [sp, #8] 79ac: 14000010 b 79ec <__alloc_workqueue_key+0x3ec> 79b0: a9408be4 ldp x4, x2, [sp, #8] per_cpu_ptr(wq->cpu_pwqs, cpu); 79b4: f879db63 ldr x3, [x27, w25, sxtw #3] init_pwq(pwq, wq, &cpu_pools[highpri]); 79b8: aa1303e1 mov x1, x19 per_cpu_ptr(wq->cpu_pwqs, cpu); 79bc: f9410660 ldr x0, [x19, #520] per_cpu(cpu_worker_pools, cpu); 79c0: 8b020062 add x2, x3, x2 init_pwq(pwq, wq, &cpu_pools[highpri]); 79c4: 8b040042 add x2, x2, x4 per_cpu_ptr(wq->cpu_pwqs, cpu); 79c8: 8b03001c add x28, x0, x3 init_pwq(pwq, wq, &cpu_pools[highpri]); 79cc: aa1c03e0 mov x0, x28 79d0: 97ffeb8c bl 2800 <init_pwq> mutex_lock(&wq->mutex); 79d4: aa1503e0 mov x0, x21 79d8: 94000000 bl 0 <_mutex_lock> link_pwq(pwq); 79dc: aa1c03e0 mov x0, x28 79e0: 97ffe2ec bl 590 <link_pwq> mutex_unlock(&wq->mutex); 79e4: aa1503e0 mov x0, x21 79e8: 94000000 bl 0 <_mutex_unlock> for_each_possible_cpu(cpu) { 79ec: 2a1903e0 mov w0, w25 79f0: aa1703e1 mov x1, x23 79f4: 94000000 bl 0 <cpumask_next> 79f8: 2a0003f9 mov w25, w0 79fc: b9400340 ldr w0, [x26] 7a00: 6b00033f cmp w25, w0 7a04: 54fffd63 b.cc 79b0 <__alloc_workqueue_key+0x3b0> // b.lo, b.ul, b.last 7a08: a94c73fb ldp x27, x28, [sp, #192] if (flags & WQ_MEM_RECLAIM) { 7a0c: 361ff054 tbz w20, #3, 7814 <__alloc_workqueue_key+0x214> rescuer = alloc_worker(NUMA_NO_NODE); 7a10: 97ffe958 bl 1f70 <alloc_worker.isra.1> 7a14: aa0003f4 mov x20, x0 if (!rescuer) 7a18: b40002e0 cbz x0, 7a74 <__alloc_workqueue_key+0x474> rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", 7a1c: f9400fe4 ldr x4, [sp, #24] rescuer->rescue_wq = wq; 7a20: f9004a93 str x19, [x20, #144] rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", 7a24: 90000003 adrp x3, 0 <pwq_activate_delayed_work> 7a28: 90000000 adrp x0, 0 <pwq_activate_delayed_work> 7a2c: 91000063 add x3, x3, #0x0 7a30: 91000000 add x0, x0, #0x0 7a34: 12800002 mov w2, #0xffffffff // #-1 7a38: aa1403e1 mov x1, x20 7a3c: 94000000 bl 0 <kthread_create_on_node> 7a40: f9002280 str x0, [x20, #64] if (IS_ERR(rescuer->task)) { 7a44: b140041f cmn x0, #0x1, lsl #12 7a48: 54000128 b.hi 7a6c <__alloc_workqueue_key+0x46c> // b.pmore wq->rescuer = rescuer; 7a4c: f900aa74 str x20, [x19, #336] kthread_bind_mask(rescuer->task, cpu_possible_mask); 7a50: 90000001 adrp x1, 0 <__cpu_possible_mask> 7a54: 91000021 add x1, x1, #0x0 7a58: f9402280 ldr x0, [x20, #64] 7a5c: 94000000 bl 0 <kthread_bind_mask> wake_up_process(rescuer->task); 7a60: f9402280 ldr x0, [x20, #64] 7a64: 94000000 bl 0 <wake_up_process> 7a68: 17ffff6b b 7814 <__alloc_workqueue_key+0x214> kfree(rescuer); 7a6c: aa1403e0 mov x0, x20 7a70: 94000000 bl 0 <kfree> destroy_workqueue(wq); 7a74: aa1303e0 mov x0, x19 return NULL; 7a78: d2800013 mov x19, #0x0 // #0 destroy_workqueue(wq); 7a7c: 94000000 bl 65e0 <destroy_workqueue> return NULL; 7a80: 17ffff9e b 78f8 <__alloc_workqueue_key+0x2f8> WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node || 7a84: f9400660 ldr x0, [x19, #8] 7a88: eb00003f cmp x1, x0 7a8c: 54ffeb61 b.ne 77f8 <__alloc_workqueue_key+0x1f8> // b.any 7a90: 17ffff60 b 7810 <__alloc_workqueue_key+0x210> wq->unbound_attrs = alloc_workqueue_attrs(GFP_KERNEL); 7a94: f900b27f str xzr, [x19, #352] if (attrs) { 7a98: 17ffff95 b 78ec <__alloc_workqueue_key+0x2ec> 7a9c: a90c73fb stp x27, x28, [sp, #192] } 7aa0: 94000000 bl 0 <__stack_chk_fail> Disassembly of section .text.unlikely: 0000000000000000 <pr_cont_pool_info>: { 0: a9be7bfd stp x29, x30, [sp, #-32]! pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask); 4: 52800801 mov w1, #0x40 // #64 { 8: 910003fd mov x29, sp c: f9000bf3 str x19, [sp, #16] 10: aa0003f3 mov x19, x0 pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask); 14: 90000000 adrp x0, 0 <pr_cont_pool_info> 18: 91000000 add x0, x0, #0x0 1c: f9428662 ldr x2, [x19, #1288] 20: 91002042 add x2, x2, #0x8 24: 94000000 bl 0 <printk> if (pool->node != NUMA_NO_NODE) 28: b940f661 ldr w1, [x19, #244] 2c: 3100043f cmn w1, #0x1 30: 54000080 b.eq 40 <pr_cont_pool_info+0x40> // b.none pr_cont(" node=%d", pool->node); 34: 90000000 adrp x0, 0 <pr_cont_pool_info> 38: 91000000 add x0, x0, #0x0 3c: 94000000 bl 0 <printk> pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice); 40: f9428662 ldr x2, [x19, #1288] 44: 90000000 adrp x0, 0 <pr_cont_pool_info> 48: b940fe61 ldr w1, [x19, #252] 4c: 91000000 add x0, x0, #0x0 50: b9400042 ldr w2, [x2] 54: 94000000 bl 0 <printk> } 58: f9400bf3 ldr x19, [sp, #16] 5c: a8c27bfd ldp x29, x30, [sp], #32 60: d65f03c0 ret 0000000000000064 <pr_cont_work>: { 64: a9bf7bfd stp x29, x30, [sp, #-16]! if (work->func == wq_barrier_func) { 68: 90000003 adrp x3, 0 <pr_cont_pool_info> 6c: 91000063 add x3, x3, #0x0 { 70: 910003fd mov x29, sp if (work->func == wq_barrier_func) { 74: f9400c22 ldr x2, [x1, #24] { 78: 12001c00 and w0, w0, #0xff if (work->func == wq_barrier_func) { 7c: eb03005f cmp x2, x3 80: 540001a1 b.ne b4 <pr_cont_work+0x50> // b.any 84: f9405822 ldr x2, [x1, #176] pr_cont("%s BAR(%d)", comma ? "," : "", 88: 7100001f cmp w0, #0x0 8c: 90000001 adrp x1, 0 <pr_cont_pool_info> 90: 90000000 adrp x0, 0 <pr_cont_pool_info> 94: 91000021 add x1, x1, #0x0 98: 91000000 add x0, x0, #0x0 9c: b945a842 ldr w2, [x2, #1448] a0: 9a801021 csel x1, x1, x0, ne // ne = any a4: 90000000 adrp x0, 0 <pr_cont_pool_info> a8: 91000000 add x0, x0, #0x0 ac: 94000000 bl 0 <printk> b0: 1400000a b d8 <pr_cont_work+0x74> pr_cont("%s %pf", comma ? "," : "", work->func); b4: 7100001f cmp w0, #0x0 b8: 90000001 adrp x1, 0 <pr_cont_pool_info> bc: 90000000 adrp x0, 0 <pr_cont_pool_info> c0: 91000021 add x1, x1, #0x0 c4: 91000000 add x0, x0, #0x0 c8: 9a801021 csel x1, x1, x0, ne // ne = any cc: 90000000 adrp x0, 0 <pr_cont_pool_info> d0: 91000000 add x0, x0, #0x0 d4: 94000000 bl 0 <printk> } d8: a8c17bfd ldp x29, x30, [sp], #16 dc: d65f03c0 ret Disassembly of section .init.text: 0000000000000000 <wq_sysfs_init>: { 0: a9be7bfd stp x29, x30, [sp, #-32]! err = subsys_virtual_register(&wq_subsys, NULL); 4: d2800001 mov x1, #0x0 // #0 { 8: 910003fd mov x29, sp c: a90153f3 stp x19, x20, [sp, #16] err = subsys_virtual_register(&wq_subsys, NULL); 10: 90000013 adrp x19, 0 <wq_sysfs_init> 14: 91000273 add x19, x19, #0x0 18: 91080274 add x20, x19, #0x200 1c: aa1403e0 mov x0, x20 20: 94000000 bl 0 <subsys_virtual_register> if (err) 24: 35000080 cbnz w0, 34 <wq_sysfs_init+0x34> return device_create_file(wq_subsys.dev_root, &wq_sysfs_cpumask_attr); 28: f9410a60 ldr x0, [x19, #528] 2c: 910a8261 add x1, x19, #0x2a0 30: 94000000 bl 0 <device_create_file> } 34: a94153f3 ldp x19, x20, [sp, #16] 38: a8c27bfd ldp x29, x30, [sp], #32 3c: d65f03c0 ret 0000000000000040 <workqueue_init>: * Workqueues have been created and work items queued on them, but there * are no kworkers executing the work items yet. Populate the worker pools * with the initial workers and enable future kworker creations. */ int __init workqueue_init(void) { 40: a9bb7bfd stp x29, x30, [sp, #-80]! 44: 910003fd mov x29, sp 48: a9025bf5 stp x21, x22, [sp, #32] * previously could be missing node hint and unbound pools NUMA * affinity, fix them up. */ wq_numa_init(); mutex_lock(&wq_pool_mutex); 4c: 90000015 adrp x21, 0 <wq_sysfs_init> 50: 910002b5 add x21, x21, #0x0 54: aa1503e0 mov x0, x21 { 58: a90153f3 stp x19, x20, [sp, #16] for_each_possible_cpu(cpu) { 5c: 90000014 adrp x20, 0 <nr_cpu_ids> for_each_cpu_worker_pool(pool, cpu) { 60: 90000013 adrp x19, 0 <wq_sysfs_init> for_each_possible_cpu(cpu) { 64: 91000294 add x20, x20, #0x0 for_each_cpu_worker_pool(pool, cpu) { 68: 91000273 add x19, x19, #0x0 { 6c: a90363f7 stp x23, x24, [sp, #48] 70: a9046bf9 stp x25, x26, [sp, #64] mutex_lock(&wq_pool_mutex); 74: 94000000 bl 0 <_mutex_lock> for_each_possible_cpu(cpu) { 78: 12800000 mov w0, #0xffffffff // #-1 7c: 90000001 adrp x1, 0 <__cpu_possible_mask> 80: 91000021 add x1, x1, #0x0 84: 94000000 bl 0 <cpumask_next> 88: b9400281 ldr w1, [x20] 8c: 6b01001f cmp w0, w1 90: 54000202 b.cs d0 <workqueue_init+0x90> // b.hs, b.nlast for_each_cpu_worker_pool(pool, cpu) { 94: 90000003 adrp x3, 0 <__per_cpu_offset> 98: 93407c04 sxtw x4, w0 9c: 91000063 add x3, x3, #0x0 a0: aa1303e1 mov x1, x19 a4: f8647862 ldr x2, [x3, x4, lsl #3] a8: 8b020021 add x1, x1, x2 ac: f8647865 ldr x5, [x3, x4, lsl #3] b0: aa1303e2 mov x2, x19 b4: 8b050042 add x2, x2, x5 b8: 91300042 add x2, x2, #0xc00 bc: eb02003f cmp x1, x2 c0: 54fffde2 b.cs 7c <workqueue_init+0x3c> // b.hs, b.nlast pool->node = cpu_to_node(cpu); c4: b900f43f str wzr, [x1, #244] for_each_cpu_worker_pool(pool, cpu) { c8: 91180021 add x1, x1, #0x600 cc: 17fffff8 b ac <workqueue_init+0x6c> } } list_for_each_entry(wq, &workqueues, list) d0: f94262b3 ldr x19, [x21, #1216] d4: 911302b6 add x22, x21, #0x4c0 d8: d1004273 sub x19, x19, #0x10 dc: 91004260 add x0, x19, #0x10 e0: eb16001f cmp x0, x22 e4: 54000120 b.eq 108 <workqueue_init+0xc8> // b.none wq_update_unbound_numa(wq, smp_processor_id(), true); e8: 94000000 bl 0 <debug_smp_processor_id> ec: 2a0003e1 mov w1, w0 f0: 52800022 mov w2, #0x1 // #1 f4: aa1303e0 mov x0, x19 f8: 94000000 bl 0 <wq_sysfs_init> list_for_each_entry(wq, &workqueues, list) fc: f9400a73 ldr x19, [x19, #16] 100: d1004273 sub x19, x19, #0x10 104: 17fffff6 b dc <workqueue_init+0x9c> mutex_unlock(&wq_pool_mutex); 108: aa1503e0 mov x0, x21 /* create the initial workers */ for_each_online_cpu(cpu) { 10c: 90000017 adrp x23, 0 <__cpu_online_mask> for_each_cpu_worker_pool(pool, cpu) { 110: 90000015 adrp x21, 0 <wq_sysfs_init> for_each_online_cpu(cpu) { 114: 910002f7 add x23, x23, #0x0 for_each_cpu_worker_pool(pool, cpu) { 118: 910002b5 add x21, x21, #0x0 for_each_online_cpu(cpu) { 11c: 12800016 mov w22, #0xffffffff // #-1 for_each_cpu_worker_pool(pool, cpu) { 120: 90000018 adrp x24, 0 <__per_cpu_offset> mutex_unlock(&wq_pool_mutex); 124: 94000000 bl 0 <_mutex_unlock> for_each_online_cpu(cpu) { 128: 2a1603e0 mov w0, w22 12c: aa1703e1 mov x1, x23 130: 94000000 bl 0 <cpumask_next> 134: 2a0003f6 mov w22, w0 138: b9400280 ldr w0, [x20] 13c: 6b0002df cmp w22, w0 140: 54000282 b.cs 190 <workqueue_init+0x150> // b.hs, b.nlast for_each_cpu_worker_pool(pool, cpu) { 144: 91000319 add x25, x24, #0x0 148: 93407eda sxtw x26, w22 14c: aa1503f3 mov x19, x21 150: f87a7b20 ldr x0, [x25, x26, lsl #3] 154: 8b000273 add x19, x19, x0 158: f87a7b21 ldr x1, [x25, x26, lsl #3] 15c: aa1503e0 mov x0, x21 160: 8b010000 add x0, x0, x1 164: 91300000 add x0, x0, #0xc00 168: eb00027f cmp x19, x0 16c: 54fffde2 b.cs 128 <workqueue_init+0xe8> // b.hs, b.nlast pool->flags &= ~POOL_DISASSOCIATED; 170: b940fe61 ldr w1, [x19, #252] BUG_ON(!create_worker(pool)); 174: aa1303e0 mov x0, x19 pool->flags &= ~POOL_DISASSOCIATED; 178: 121d7821 and w1, w1, #0xfffffffb 17c: b900fe61 str w1, [x19, #252] BUG_ON(!create_worker(pool)); 180: 94000000 bl 0 <wq_sysfs_init> 184: b4000380 cbz x0, 1f4 <workqueue_init+0x1b4> for_each_cpu_worker_pool(pool, cpu) { 188: 91180273 add x19, x19, #0x600 18c: 17fffff3 b 158 <workqueue_init+0x118> } } hash_for_each(unbound_pool_hash, bkt, pool, hash_node) 190: 90000014 adrp x20, 0 <wq_sysfs_init> 194: 91000294 add x20, x20, #0x0 198: 9101a296 add x22, x20, #0x68 19c: d2800015 mov x21, #0x0 // #0 1a0: f8757ad3 ldr x19, [x22, x21, lsl #3] 1a4: b50001f3 cbnz x19, 1e0 <workqueue_init+0x1a0> 1a8: 910006b5 add x21, x21, #0x1 1ac: f10102bf cmp x21, #0x40 1b0: 54ffff81 b.ne 1a0 <workqueue_init+0x160> // b.any BUG_ON(!create_worker(pool)); wq_online = true; 1b4: 52800021 mov w1, #0x1 // #1 1b8: 39012281 strb w1, [x20, #72] wq_watchdog_init(); return 0; } 1bc: 52800000 mov w0, #0x0 // #0 1c0: a94153f3 ldp x19, x20, [sp, #16] 1c4: a9425bf5 ldp x21, x22, [sp, #32] 1c8: a94363f7 ldp x23, x24, [sp, #48] 1cc: a9446bf9 ldp x25, x26, [sp, #64] 1d0: a8c57bfd ldp x29, x30, [sp], #80 1d4: d65f03c0 ret hash_for_each(unbound_pool_hash, bkt, pool, hash_node) 1d8: f9428a73 ldr x19, [x19, #1296] 1dc: b4fffe73 cbz x19, 1a8 <workqueue_init+0x168> 1e0: d1144273 sub x19, x19, #0x510 1e4: b4fffe33 cbz x19, 1a8 <workqueue_init+0x168> BUG_ON(!create_worker(pool)); 1e8: aa1303e0 mov x0, x19 1ec: 94000000 bl 0 <wq_sysfs_init> 1f0: b5ffff40 cbnz x0, 1d8 <workqueue_init+0x198> 1f4: d4210000 brk #0x800 00000000000001f8 <workqueue_init_early>: { 1f8: d10203ff sub sp, sp, #0x80 int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; 1fc: d2dffd87 mov x7, #0xffec00000000 // #281389077364736 200: f2ffffe7 movk x7, #0xffff, lsl #48 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); 204: d2802002 mov x2, #0x100 // #256 208: d2800004 mov x4, #0x0 // #0 20c: aa0203e1 mov x1, x2 { 210: a9027bfd stp x29, x30, [sp, #32] 214: 910083fd add x29, sp, #0x20 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); 218: d2a00083 mov x3, #0x40000 // #262144 { 21c: a90563f7 stp x23, x24, [sp, #80] 220: 90000017 adrp x23, 0 <__cpu_possible_mask> 224: f94002e6 ldr x6, [x23] 228: a90353f3 stp x19, x20, [sp, #48] 22c: 90000013 adrp x19, 0 <wq_sysfs_init> 230: 91000273 add x19, x19, #0x0 234: a9045bf5 stp x21, x22, [sp, #64] 238: 90000015 adrp x21, 0 <__stack_chk_guard> 23c: 910002b5 add x21, x21, #0x0 for_each_cpu_worker_pool(pool, cpu) { 240: 90000016 adrp x22, 0 <wq_sysfs_init> { 244: f94002a0 ldr x0, [x21] 248: f9000fe0 str x0, [sp, #24] 24c: d2800000 mov x0, #0x0 // #0 for_each_cpu_worker_pool(pool, cpu) { 250: 910002d6 add x22, x22, #0x0 { 254: a9066bf9 stp x25, x26, [sp, #96] for_each_possible_cpu(cpu) { 258: 9000001a adrp x26, 0 <nr_cpu_ids> 25c: 9100035a add x26, x26, #0x0 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); 260: 90000000 adrp x0, 0 <wq_sysfs_init> 264: 91000000 add x0, x0, #0x0 int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; 268: a9009ff7 stp x23, x7, [sp, #8] for_each_possible_cpu(cpu) { 26c: 12800017 mov w23, #0xffffffff // #-1 270: f9002a66 str x6, [x19, #80] { 274: a90773fb stp x27, x28, [sp, #112] pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); 278: 94000000 bl 0 <kmem_cache_create> 27c: f9002260 str x0, [x19, #64] for_each_possible_cpu(cpu) { 280: f94007e0 ldr x0, [sp, #8] 284: 91000018 add x24, x0, #0x0 288: 2a1703e0 mov w0, w23 28c: aa1803e1 mov x1, x24 290: 94000000 bl 0 <cpumask_next> 294: 2a0003f7 mov w23, w0 298: b9400340 ldr w0, [x26] 29c: 6b0002ff cmp w23, w0 2a0: 540005c2 b.cs 358 <workqueue_init_early+0x160> // b.hs, b.nlast for_each_cpu_worker_pool(pool, cpu) { 2a4: 90000019 adrp x25, 0 <__per_cpu_offset> 2a8: 93407efc sxtw x28, w23 2ac: 91000339 add x25, x25, #0x0 extern const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; static inline const struct cpumask *get_cpu_mask(unsigned int cpu) { const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; 2b0: 924016f4 and x20, x23, #0x3f p -= cpu / BITS_PER_LONG; 2b4: 53067ee0 lsr w0, w23, #6 2b8: 91000694 add x20, x20, #0x1 2bc: cb000294 sub x20, x20, x0 2c0: aa1603e1 mov x1, x22 2c4: f87c7b22 ldr x2, [x25, x28, lsl #3] 2c8: 90000000 adrp x0, 0 <cpu_bit_bitmap> 2cc: 91000000 add x0, x0, #0x0 mutex_lock(&wq_pool_mutex); 2d0: 90000018 adrp x24, 0 <wq_sysfs_init> for_each_cpu_worker_pool(pool, cpu) { 2d4: 8b02003b add x27, x1, x2 mutex_lock(&wq_pool_mutex); 2d8: 91000318 add x24, x24, #0x0 2dc: 8b140c14 add x20, x0, x20, lsl #3 2e0: 910043e0 add x0, sp, #0x10 2e4: f90003e0 str x0, [sp] for_each_cpu_worker_pool(pool, cpu) { 2e8: f87c7b22 ldr x2, [x25, x28, lsl #3] 2ec: aa1603e0 mov x0, x22 2f0: 8b020000 add x0, x0, x2 2f4: 91300000 add x0, x0, #0xc00 2f8: eb00037f cmp x27, x0 2fc: 54fffc22 b.cs 280 <workqueue_init_early+0x88> // b.hs, b.nlast BUG_ON(init_worker_pool(pool)); 300: aa1b03e0 mov x0, x27 304: 94000000 bl 0 <wq_sysfs_init> 308: 350010c0 cbnz w0, 520 <workqueue_init_early+0x328> cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); 30c: f9428762 ldr x2, [x27, #1288] pool->cpu = cpu; 310: b900f377 str w23, [x27, #240] pool->attrs->nice = std_nice[i++]; 314: f94003e0 ldr x0, [sp] 318: f9400284 ldr x4, [x20] 31c: f9000444 str x4, [x2, #8] 320: b8404403 ldr w3, [x0], #4 324: f9428762 ldr x2, [x27, #1288] 328: f90003e0 str x0, [sp] mutex_lock(&wq_pool_mutex); 32c: aa1803e0 mov x0, x24 pool->attrs->nice = std_nice[i++]; 330: b9000043 str w3, [x2] pool->node = cpu_to_node(cpu); 334: b900f77f str wzr, [x27, #244] mutex_lock(&wq_pool_mutex); 338: 94000000 bl 0 <_mutex_lock> BUG_ON(worker_pool_assign_id(pool)); 33c: aa1b03e0 mov x0, x27 340: 94000000 bl 0 <wq_sysfs_init> 344: 35000ee0 cbnz w0, 520 <workqueue_init_early+0x328> for_each_cpu_worker_pool(pool, cpu) { 348: 9118037b add x27, x27, #0x600 mutex_unlock(&wq_pool_mutex); 34c: aa1803e0 mov x0, x24 350: 94000000 bl 0 <_mutex_unlock> 354: 17ffffe5 b 2e8 <workqueue_init_early+0xf0> return kmem_cache_alloc_trace( 358: 90000016 adrp x22, 0 <kmalloc_caches> void *ret = kmem_cache_alloc(s, flags); 35c: 52901817 mov w23, #0x80c0 // #32960 return kmem_cache_alloc_trace( 360: 910002d6 add x22, x22, #0x0 unbound_std_wq_attrs[i] = attrs; 364: 910b027b add x27, x19, #0x2c0 ordered_wq_attrs[i] = attrs; 368: 910ac279 add x25, x19, #0x2b0 36c: d2800014 mov x20, #0x0 // #0 void *ret = kmem_cache_alloc(s, flags); 370: 72a02817 movk w23, #0x140, lsl #16 attrs->no_numa = true; 374: 5280003a mov w26, #0x1 // #1 378: f9401ec0 ldr x0, [x22, #56] 37c: 2a1703e1 mov w1, w23 380: 94000000 bl 0 <kmem_cache_alloc> 384: aa0003e2 mov x2, x0 if (!attrs) 388: b4000cc0 cbz x0, 520 <workqueue_init_early+0x328> attrs->nice = std_nice[i]; 38c: 910043e4 add x4, sp, #0x10 unbound_std_wq_attrs[i] = attrs; 390: f8347b60 str x0, [x27, x20, lsl #3] 394: f9400303 ldr x3, [x24] 398: f9000443 str x3, [x2, #8] 39c: f9401ec0 ldr x0, [x22, #56] 3a0: 2a1703e1 mov w1, w23 attrs->nice = std_nice[i]; 3a4: b874789c ldr w28, [x4, x20, lsl #2] 3a8: b900005c str w28, [x2] 3ac: 94000000 bl 0 <kmem_cache_alloc> if (!attrs) 3b0: b4000b80 cbz x0, 520 <workqueue_init_early+0x328> 3b4: f9400301 ldr x1, [x24] ordered_wq_attrs[i] = attrs; 3b8: f8347b20 str x0, [x25, x20, lsl #3] attrs->nice = std_nice[i]; 3bc: b900001c str w28, [x0] 3c0: f9000401 str x1, [x0, #8] attrs->no_numa = true; 3c4: 3900401a strb w26, [x0, #16] for (i = 0; i < NR_STD_WORKER_POOLS; i++) { 3c8: b5000074 cbnz x20, 3d4 <workqueue_init_early+0x1dc> 3cc: d2800034 mov x20, #0x1 // #1 3d0: 17ffffea b 378 <workqueue_init_early+0x180> system_wq = alloc_workqueue("events", 0, 0); 3d4: 90000016 adrp x22, 0 <wq_sysfs_init> 3d8: 910b4263 add x3, x19, #0x2d0 3dc: 52800002 mov w2, #0x0 // #0 3e0: 52800001 mov w1, #0x0 // #0 3e4: 90000004 adrp x4, 0 <wq_sysfs_init> 3e8: 90000000 adrp x0, 0 <wq_sysfs_init> 3ec: 91000084 add x4, x4, #0x0 3f0: 91000000 add x0, x0, #0x0 3f4: 94000000 bl 7600 <__alloc_workqueue_key> 3f8: 910002d4 add x20, x22, #0x0 system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0); 3fc: 910b6263 add x3, x19, #0x2d8 system_wq = alloc_workqueue("events", 0, 0); 400: f90002c0 str x0, [x22] system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0); 404: 52800002 mov w2, #0x0 // #0 408: 52800201 mov w1, #0x10 // #16 40c: 90000004 adrp x4, 0 <wq_sysfs_init> 410: 90000000 adrp x0, 0 <wq_sysfs_init> 414: 91000084 add x4, x4, #0x0 418: 91000000 add x0, x0, #0x0 41c: 94000000 bl 7600 <__alloc_workqueue_key> 420: f9000680 str x0, [x20, #8] system_long_wq = alloc_workqueue("events_long", 0, 0); 424: 90000005 adrp x5, 0 <wq_sysfs_init> 428: 910b8263 add x3, x19, #0x2e0 42c: 910000a0 add x0, x5, #0x0 430: 52800002 mov w2, #0x0 // #0 434: 52800001 mov w1, #0x0 // #0 438: 90000004 adrp x4, 0 <wq_sysfs_init> 43c: 91000084 add x4, x4, #0x0 440: 94000000 bl 7600 <__alloc_workqueue_key> 444: f9000a80 str x0, [x20, #16] 448: f9400300 ldr x0, [x24] 44c: 94000000 bl 0 <__sw_hweight64> system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, 450: 531e7400 lsl w0, w0, #2 454: 7108001f cmp w0, #0x200 458: 910ba263 add x3, x19, #0x2e8 45c: 52800041 mov w1, #0x2 // #2 460: 52804002 mov w2, #0x200 // #512 464: 90000004 adrp x4, 0 <wq_sysfs_init> 468: 1a82a002 csel w2, w0, w2, ge // ge = tcont 46c: 91000084 add x4, x4, #0x0 470: 90000000 adrp x0, 0 <wq_sysfs_init> 474: 91000000 add x0, x0, #0x0 478: 94000000 bl 7600 <__alloc_workqueue_key> system_freezable_wq = alloc_workqueue("events_freezable", 47c: 90000005 adrp x5, 0 <wq_sysfs_init> 480: 910bc263 add x3, x19, #0x2f0 484: 52800002 mov w2, #0x0 // #0 488: 52800081 mov w1, #0x4 // #4 48c: 90000004 adrp x4, 0 <wq_sysfs_init> 490: 91000084 add x4, x4, #0x0 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, 494: f9000e80 str x0, [x20, #24] system_freezable_wq = alloc_workqueue("events_freezable", 498: 910000a0 add x0, x5, #0x0 49c: 94000000 bl 7600 <__alloc_workqueue_key> 4a0: f9001280 str x0, [x20, #32] system_power_efficient_wq = alloc_workqueue("events_power_efficient", 4a4: 90000005 adrp x5, 0 <wq_sysfs_init> 4a8: 910be263 add x3, x19, #0x2f8 4ac: 52800002 mov w2, #0x0 // #0 4b0: 52801001 mov w1, #0x80 // #128 4b4: 910000a0 add x0, x5, #0x0 4b8: 90000004 adrp x4, 0 <wq_sysfs_init> 4bc: 91000084 add x4, x4, #0x0 4c0: 94000000 bl 7600 <__alloc_workqueue_key> system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient", 4c4: 90000005 adrp x5, 0 <wq_sysfs_init> 4c8: 52801081 mov w1, #0x84 // #132 4cc: 910c0263 add x3, x19, #0x300 4d0: 90000004 adrp x4, 0 <wq_sysfs_init> 4d4: 52800002 mov w2, #0x0 // #0 4d8: 91000084 add x4, x4, #0x0 system_power_efficient_wq = alloc_workqueue("events_power_efficient", 4dc: f9001680 str x0, [x20, #40] system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient", 4e0: 910000a0 add x0, x5, #0x0 4e4: 94000000 bl 7600 <__alloc_workqueue_key> 4e8: f9001a80 str x0, [x20, #48] BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq || 4ec: f94002c1 ldr x1, [x22] 4f0: b4000181 cbz x1, 520 <workqueue_init_early+0x328> 4f4: f9400681 ldr x1, [x20, #8] 4f8: b4000141 cbz x1, 520 <workqueue_init_early+0x328> 4fc: f9400a81 ldr x1, [x20, #16] 500: b4000101 cbz x1, 520 <workqueue_init_early+0x328> 504: f9400e81 ldr x1, [x20, #24] 508: b40000c1 cbz x1, 520 <workqueue_init_early+0x328> 50c: f9401281 ldr x1, [x20, #32] 510: b4000081 cbz x1, 520 <workqueue_init_early+0x328> 514: f9401681 ldr x1, [x20, #40] 518: b4000041 cbz x1, 520 <workqueue_init_early+0x328> 51c: b5000040 cbnz x0, 524 <workqueue_init_early+0x32c> 520: d4210000 brk #0x800 } 524: f9400fe0 ldr x0, [sp, #24] 528: f94002a1 ldr x1, [x21] 52c: ca010001 eor x1, x0, x1 530: 52800000 mov w0, #0x0 // #0 534: b4000041 cbz x1, 53c <workqueue_init_early+0x344> 538: 94000000 bl 0 <__stack_chk_fail> 53c: a9427bfd ldp x29, x30, [sp, #32] 540: a94353f3 ldp x19, x20, [sp, #48] 544: a9445bf5 ldp x21, x22, [sp, #64] 548: a94563f7 ldp x23, x24, [sp, #80] 54c: a9466bf9 ldp x25, x26, [sp, #96] 550: a94773fb ldp x27, x28, [sp, #112] 554: 910203ff add sp, sp, #0x80 558: d65f03c0 ret