Re: [RFC PATCH 2/6] zsmalloc: make zspage lock preemptible

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 





On 27. 01. 25 08:59, Sergey Senozhatsky wrote:
Switch over from rwlock_t to a atomic_t variable that takes
negative value when the page is under migration, or positive
values when the page is used by zsmalloc users (object map,
etc.)  Using a rwsem per-zspage is a little too memory heavy,
a simple atomic_t should suffice, after all we only need to
mark zspage as either used-for-write or used-for-read.  This
is needed to make zsmalloc preemtible in the future.

Signed-off-by: Sergey Senozhatsky <senozhatsky@xxxxxxxxxxxx>
---
  mm/zsmalloc.c | 112 +++++++++++++++++++++++++++++---------------------
  1 file changed, 66 insertions(+), 46 deletions(-)

diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 817626a351f8..28a75bfbeaa6 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -257,6 +257,9 @@ static inline void free_zpdesc(struct zpdesc *zpdesc)
  	__free_page(page);
  }
+#define ZS_PAGE_UNLOCKED 0
+#define ZS_PAGE_WRLOCKED	-1
+
  struct zspage {
  	struct {
  		unsigned int huge:HUGE_BITS;
@@ -269,7 +272,7 @@ struct zspage {
  	struct zpdesc *first_zpdesc;
  	struct list_head list; /* fullness list */
  	struct zs_pool *pool;
-	rwlock_t lock;
+	atomic_t lock;
  };
struct mapping_area {
@@ -290,11 +293,53 @@ static bool ZsHugePage(struct zspage *zspage)
  	return zspage->huge;
  }
-static void migrate_lock_init(struct zspage *zspage);
-static void migrate_read_lock(struct zspage *zspage);
-static void migrate_read_unlock(struct zspage *zspage);
-static void migrate_write_lock(struct zspage *zspage);
-static void migrate_write_unlock(struct zspage *zspage);
+static void zspage_lock_init(struct zspage *zspage)
+{
+	atomic_set(&zspage->lock, ZS_PAGE_UNLOCKED);
+}
+
+static void zspage_read_lock(struct zspage *zspage)
+{
+	atomic_t *lock = &zspage->lock;
+	int old;
+
+	while (1) {
+		old = atomic_read(lock);
+		if (old == ZS_PAGE_WRLOCKED) {
+			cpu_relax();
+			continue;
+		}
+
+		if (atomic_cmpxchg(lock, old, old + 1) == old)
+			return;

You can use atomic_try_cmpxchg() here:

if (atomic_try_cmpxchg(lock, &old, old + 1))
        return;

+
+		cpu_relax();
+	}
+}
+
+static void zspage_read_unlock(struct zspage *zspage)
+{
+	atomic_dec(&zspage->lock);
+}
+
+static void zspage_write_lock(struct zspage *zspage)
+{
+	atomic_t *lock = &zspage->lock;
+	int old;
+
+	while (1) {
+		old = atomic_cmpxchg(lock, ZS_PAGE_UNLOCKED, ZS_PAGE_WRLOCKED);
+		if (old == ZS_PAGE_UNLOCKED)
+			return;

Also, the above code can be rewritten as:

while (1) {
        old = ZS_PAGE_UNLOCKED;
        if (atomic_try_cmpxchg (lock, &old, ZS_PAGE_WRLOCKED))
                return;	
+
+		cpu_relax();
+	}
+}

The above change will result in a slightly better generated asm.

Uros.




[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux