This prepares for speculative page faults looking up and copying vmas under protection of an rcu read lock, instead of the usual mmap read lock. Signed-off-by: Michel Lespinasse <michel@xxxxxxxxxxxxxx> --- include/linux/mm_types.h | 16 +++++++++++----- kernel/fork.c | 11 ++++++++++- 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 70882e628908..024970635921 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -304,12 +304,18 @@ struct vm_userfaultfd_ctx {}; struct vm_area_struct { /* The first cache line has the info for VMA tree walking. */ - unsigned long vm_start; /* Our start address within vm_mm. */ - unsigned long vm_end; /* The first byte after our end address - within vm_mm. */ + union { + struct { + /* VMA covers [vm_start; vm_end) addresses within mm */ + unsigned long vm_start, vm_end; - /* linked list of VM areas per task, sorted by address */ - struct vm_area_struct *vm_next, *vm_prev; + /* linked list of VMAs per task, sorted by address */ + struct vm_area_struct *vm_next, *vm_prev; + }; +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT + struct rcu_head vm_rcu; /* Used for deferred freeing. */ +#endif + }; struct rb_node vm_rb; diff --git a/kernel/fork.c b/kernel/fork.c index 426cd0c51f9e..b6078e546114 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -369,11 +369,20 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) return new; } -void vm_area_free(struct vm_area_struct *vma) +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT +static void __vm_area_free(struct rcu_head *head) { + struct vm_area_struct *vma = container_of(head, struct vm_area_struct, + vm_rcu); kmem_cache_free(vm_area_cachep, vma); } +void vm_area_free(struct vm_area_struct *vma) +{ + call_rcu(&vma->vm_rcu, __vm_area_free); +} +#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */ + static void account_kernel_stack(struct task_struct *tsk, int account) { void *stack = task_stack_page(tsk); -- 2.20.1