From: "Uladzislau Rezki (Sony)" <urezki@xxxxxxxxx> Update the kvfree_call_rcu() with head-less support, it means an object without any rcu_head structure can be reclaimed after GP. To store pointers there are two chain-arrays maintained one for SLAB and another one is for vmalloc. Both types of objects(head-less variant and regular one) are placed there based on the type. It can be that maintaining of arrays becomes impossible due to high memory pressure. For such reason there is an emergency path. In that case objects with rcu_head inside are just queued building one way list. Later on that list is drained. As for head-less variant. Such objects do not have any rcu_head helper inside. Thus it is dynamically attached. As a result an object consists of back-pointer and regular rcu_head. It implies that emergency path can detect such object type, therefore they are tagged. So a back-pointer could be freed as well as dynamically attached wrapper. Even though such approach requires dynamic memory it needs only sizeof(unsigned long *) + sizeof(struct rcu_head) bytes, thus SLAB is used to obtain it. Finally if attaching of the rcu_head and queuing get failed, the current context has to follow might_sleep() annotation, thus below steps could be applied: a) wait until a grace period has elapsed; b) direct inlining of the kvfree() call. Reviewed-by: Joel Fernandes (Google) <joel@xxxxxxxxxxxxxxxxx> Signed-off-by: Uladzislau Rezki (Sony) <urezki@xxxxxxxxx> Signed-off-by: Joel Fernandes (Google) <joel@xxxxxxxxxxxxxxxxx> --- kernel/rcu/tree.c | 93 +++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 85 insertions(+), 8 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 3b94526f490cb..204292378101b 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2959,19 +2959,34 @@ static void kfree_rcu_work(struct work_struct *work) * when we could not allocate a bulk array. * * Under that condition an object is queued to the - * list instead. + * list instead. Please note that head-less objects + * have dynamically attached rcu_head, so they also + * contain a back-pointer that has to be freed. */ for (; head; head = next) { unsigned long offset = (unsigned long)head->func; - void *ptr = (void *)head - offset; + bool headless; + void *ptr; next = head->next; + + /* We tag the headless object, if so adjust offset. */ + headless = (((unsigned long) head - offset) & BIT(0)); + if (headless) + offset -= 1; + + ptr = (void *) head - offset; debug_rcu_head_unqueue((struct rcu_head *)ptr); + rcu_lock_acquire(&rcu_callback_map); trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset); - if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset))) + if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset))) { + if (headless) + kvfree((void *) *((unsigned long *) ptr)); + kvfree(ptr); + } rcu_lock_release(&rcu_callback_map); cond_resched_tasks_rcu_qs(); @@ -3120,6 +3135,24 @@ kvfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu *krcp, void *ptr) return true; } +static inline struct rcu_head *attach_rcu_head_to_object(void *obj) +{ + unsigned long *ptr; + + ptr = kmalloc(sizeof(unsigned long *) + + sizeof(struct rcu_head), GFP_NOWAIT | __GFP_NOWARN); + + if (!ptr) + ptr = kmalloc(sizeof(unsigned long *) + + sizeof(struct rcu_head), GFP_ATOMIC | __GFP_NOWARN); + + if (!ptr) + return NULL; + + ptr[0] = (unsigned long) obj; + return ((struct rcu_head *) ++ptr); +} + /* * Queue a request for lazy invocation of appropriate free routine after a * grace period. Please note there are three paths are maintained, two are the @@ -3138,20 +3171,37 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) unsigned long flags; struct kfree_rcu_cpu *krcp; bool expedited_drain = false; + bool success; void *ptr; + if (head) { + ptr = (void *) head - (unsigned long) func; + } else { + /* + * Please note there is a limitation for the head-less + * variant, that is why there is a clear rule for such + * objects: + * + * use it from might_sleep() context only. For other + * places please embed an rcu_head to your structures. + */ + might_sleep(); + ptr = (unsigned long *) func; + } + local_irq_save(flags); // For safely calling this_cpu_ptr(). krcp = this_cpu_ptr(&krc); if (krcp->initialized) spin_lock(&krcp->lock); - ptr = (void *)head - (unsigned long)func; - // Queue the object but don't yet schedule the batch. if (debug_rcu_head_queue(ptr)) { // Probable double kfree_rcu(), just leak. WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n", __func__, head); + + /* Mark as success and leave. */ + success = true; goto unlock_return; } @@ -3159,7 +3209,22 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) * Under high memory pressure GFP_NOWAIT can fail, * in that case the emergency path is maintained. */ - if (!kvfree_call_rcu_add_ptr_to_bulk(krcp, ptr)) { + success = kvfree_call_rcu_add_ptr_to_bulk(krcp, ptr); + if (!success) { + /* Is headless object? */ + if (head == NULL) { + head = attach_rcu_head_to_object(ptr); + if (head == NULL) + goto unlock_return; + + /* + * Tag the headless object. Such objects have a back-pointer + * to the original allocated memory, that has to be freed as + * well as dynamically attached wrapper/head. + */ + func = (rcu_callback_t) (sizeof(unsigned long *) + 1); + } + head->func = func; head->next = krcp->head; krcp->head = head; @@ -3171,15 +3236,15 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) * appropriate free calls. */ expedited_drain = true; + success = true; } WRITE_ONCE(krcp->count, krcp->count + 1); // Set timer to drain after KFREE_DRAIN_JIFFIES. if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING && - !krcp->monitor_todo) { + !krcp->monitor_todo) { krcp->monitor_todo = true; - schedule_delayed_work(&krcp->monitor_work, expedited_drain ? 0 : KFREE_DRAIN_JIFFIES); } @@ -3188,6 +3253,18 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) if (krcp->initialized) spin_unlock(&krcp->lock); local_irq_restore(flags); + + /* + * High memory pressure, so inline kvfree() after + * synchronize_rcu(). We can do it from might_sleep() + * context only, so the current CPU can pass the QS + * state. + */ + if (!success) { + debug_rcu_head_unqueue(ptr); + synchronize_rcu(); + kvfree(ptr); + } } EXPORT_SYMBOL_GPL(kvfree_call_rcu); -- 2.26.0.rc2.310.g2932bb562d-goog