[RFC v2 08/10] mm: Define mem range lock operations

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This patch introduce new mm_down/up*() operation which will run on
mmap_sem as a semaphore or as a range lock depending on
CONFIG_MEM_RANGE_LOCK.

When CONFIG_MEM_RANGE_LOCK is defined, the additional range parameter
is used, otherwise it is ignored to avoid any useless additional stack
parameter.

Signed-off-by: Laurent Dufour <ldufour@xxxxxxxxxxxxxxxxxx>
---
 include/linux/mm.h       | 27 +++++++++++++++++++++++++++
 include/linux/mm_types.h |  5 +++++
 2 files changed, 32 insertions(+)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index b09048386152..d47b28eb0a53 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -15,6 +15,7 @@
 #include <linux/debug_locks.h>
 #include <linux/mm_types.h>
 #include <linux/range.h>
+#include <linux/range_lock.h>
 #include <linux/pfn.h>
 #include <linux/percpu-refcount.h>
 #include <linux/bit_spinlock.h>
@@ -2588,5 +2589,31 @@ void __init setup_nr_node_ids(void);
 static inline void setup_nr_node_ids(void) {}
 #endif
 
+#ifdef CONFIG_MEM_RANGE_LOCK
+#define mm_range_define(r)						\
+	struct range_lock r = __RANGE_LOCK_INITIALIZER(0, RANGE_LOCK_FULL)
+#define mm_read_lock(m, r)	range_read_lock(&(m)->mmap_sem, r)
+#define mm_read_trylock(m, r)	range_read_trylock(&(m)->mmap_sem, r)
+#define mm_read_unlock(m, r)	range_read_unlock(&(m)->mmap_sem, r)
+#define mm_write_lock(m, r)	range_write_lock(&(m)->mmap_sem, r)
+#define mm_write_trylock(m, r)	range_write_trylock(&(m)->mmap_sem, r)
+#define mm_write_unlock(m, r)	range_write_unlock(&(m)->mmap_sem, r)
+#define mm_write_lock_killable(m, r) \
+	range_write_lock_interruptible(&(m)->mmap_sem, r)
+#define mm_downgrade_write(m, r) range_downgrade_write(&(m)->mmap_sem, r)
+
+#else /* CONFIG_MEM_RANGE_LOCK */
+#define mm_range_define(r)	do { } while (0)
+#define mm_read_lock(m, r)	down_read(&(m)->mmap_sem)
+#define mm_read_trylock(m, r)	down_read_trylock(&(m)->mmap_sem)
+#define mm_read_unlock(m, r)	up_read(&(m)->mmap_sem)
+#define mm_write_lock(m, r)	down_write(&(m)->mmap_sem)
+#define mm_write_trylock(m, r)	down_write_trylock(&(m)->mmap_sem)
+#define mm_write_unlock(m, r)	up_write(&(m)->mmap_sem)
+#define mm_write_lock_killable(m, r) down_write_killable(&(m)->mmap_sem)
+#define mm_downgrade_write(m, r) downgrade_write(&(m)->mmap_sem)
+
+#endif /* CONFIG_MEM_RANGE_LOCK */
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 45cdb27791a3..d40611490200 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -8,6 +8,7 @@
 #include <linux/spinlock.h>
 #include <linux/rbtree.h>
 #include <linux/rwsem.h>
+#include <linux/range_lock.h>
 #include <linux/completion.h>
 #include <linux/cpumask.h>
 #include <linux/uprobes.h>
@@ -403,7 +404,11 @@ struct mm_struct {
 	int map_count;				/* number of VMAs */
 
 	spinlock_t page_table_lock;		/* Protects page tables and some counters */
+#ifdef CONFIG_MEM_RANGE_LOCK
+	struct range_lock_tree mmap_sem;
+#else
 	struct rw_semaphore mmap_sem;
+#endif
 
 	struct list_head mmlist;		/* List of maybe swapped mm's.	These are globally strung
 						 * together off init_mm.mmlist, and are protected
-- 
2.7.4

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@xxxxxxxxx.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@xxxxxxxxx";> email@xxxxxxxxx </a>



[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]
  Powered by Linux