[PATCH 31/58] staging/lustre/ldlm: move procfs ldlm pool stats to sysfs

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Oleg Drokin <green@xxxxxxxxxxxxxx>

Suitable contents of /proc/fs/lustre/ldlm/namespaces/.../pools/
is moved to /sys/fs/lustre/ldlm/namespaces/.../pools/:
cancel_rate grant_plan grant_speed lock_volume_factor
server_lock_volume granted grant_rate limit recalc_period

Signed-off-by: Oleg Drokin <green@xxxxxxxxxxxxxx>
---
 drivers/staging/lustre/lustre/ldlm/ldlm_internal.h | 73 +++++++++++++++++-----
 drivers/staging/lustre/lustre/ldlm/ldlm_pool.c     | 70 +++++++++++----------
 drivers/staging/lustre/sysfs-fs-lustre             | 62 ++++++++++++++++++
 3 files changed, 159 insertions(+), 46 deletions(-)

diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
index 70b909f..636451d 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
@@ -238,40 +238,85 @@ enum ldlm_policy_res {
 
 typedef enum ldlm_policy_res ldlm_policy_res_t;
 
-#define LDLM_POOL_PROC_READER_SEQ_SHOW(var, type)			    \
-	static int lprocfs_##var##_seq_show(struct seq_file *m, void *v) \
+#define LDLM_POOL_SYSFS_PRINT_int(v) sprintf(buf, "%d\n", v)
+#define LDLM_POOL_SYSFS_SET_int(a, b) { a = b; }
+#define LDLM_POOL_SYSFS_PRINT_u64(v) sprintf(buf, "%lld\n", v)
+#define LDLM_POOL_SYSFS_SET_u64(a, b) { a = b; }
+#define LDLM_POOL_SYSFS_PRINT_atomic(v) sprintf(buf, "%d\n", atomic_read(&v))
+#define LDLM_POOL_SYSFS_SET_atomic(a, b) atomic_set(&a, b)
+
+#define LDLM_POOL_SYSFS_READER_SHOW(var, type)				    \
+	static ssize_t var##_show(struct kobject *kobj,			    \
+				  struct attribute *attr,		    \
+				  char *buf)				    \
 	{								    \
-		struct ldlm_pool *pl = m->private;			    \
+		struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool, \
+						    pl_kobj);		    \
 		type tmp;						    \
 									    \
 		spin_lock(&pl->pl_lock);				    \
 		tmp = pl->pl_##var;					    \
 		spin_unlock(&pl->pl_lock);				    \
 									    \
-		return lprocfs_rd_uint(m, &tmp);			    \
+		return LDLM_POOL_SYSFS_PRINT_##type(tmp);		    \
 	}								    \
 	struct __##var##__dummy_read {; } /* semicolon catcher */
 
-#define LDLM_POOL_PROC_WRITER(var, type)				    \
-	static int lprocfs_wr_##var(struct file *file,			    \
-				const char __user *buffer,		    \
-				unsigned long count, void *data)	    \
+#define LDLM_POOL_SYSFS_WRITER_STORE(var, type)				    \
+	static ssize_t var##_store(struct kobject *kobj,		    \
+				     struct attribute *attr,		    \
+				     const char *buffer,		    \
+				     unsigned long count)		    \
 	{								    \
-		struct ldlm_pool *pl = data;				    \
-		type tmp;						    \
+		struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool, \
+						    pl_kobj);		    \
+		unsigned long tmp;					    \
 		int rc;							    \
 									    \
-		rc = lprocfs_wr_uint(file, buffer, count, &tmp);	    \
+		rc = kstrtoul(buffer, 10, &tmp);			    \
 		if (rc < 0) {						    \
-			CERROR("Can't parse user input, rc = %d\n", rc);    \
 			return rc;					    \
 		}							    \
 									    \
 		spin_lock(&pl->pl_lock);				    \
-		pl->pl_##var = tmp;					    \
+		LDLM_POOL_SYSFS_SET_##type(pl->pl_##var, tmp);		    \
 		spin_unlock(&pl->pl_lock);				    \
 									    \
-		return rc;						    \
+		return count;						    \
+	}								    \
+	struct __##var##__dummy_write {; } /* semicolon catcher */
+
+#define LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(var, type)			    \
+	static ssize_t var##_show(struct kobject *kobj,		    \
+				    struct attribute *attr,		    \
+				    char *buf)				    \
+	{								    \
+		struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool, \
+						    pl_kobj);		    \
+									    \
+		return LDLM_POOL_SYSFS_PRINT_##type(pl->pl_##var);	    \
+	}								    \
+	struct __##var##__dummy_read {; } /* semicolon catcher */
+
+#define LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(var, type)			    \
+	static ssize_t var##_store(struct kobject *kobj,		    \
+				     struct attribute *attr,		    \
+				     const char *buffer,		    \
+				     unsigned long count)		    \
+	{								    \
+		struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool, \
+						    pl_kobj);		    \
+		unsigned long tmp;					    \
+		int rc;							    \
+									    \
+		rc = kstrtoul(buffer, 10, &tmp);			    \
+		if (rc < 0) {						    \
+			return rc;					    \
+		}							    \
+									    \
+		LDLM_POOL_SYSFS_SET_##type(pl->pl_##var, tmp);		    \
+									    \
+		return count;						    \
 	}								    \
 	struct __##var##__dummy_write {; } /* semicolon catcher */
 
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
index 0968868..ed74735 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
@@ -696,9 +696,12 @@ static int lprocfs_pool_state_seq_show(struct seq_file *m, void *unused)
 }
 LPROC_SEQ_FOPS_RO(lprocfs_pool_state);
 
-static int lprocfs_grant_speed_seq_show(struct seq_file *m, void *unused)
+static ssize_t grant_speed_show(struct kobject *kobj, struct attribute *attr,
+				char *buf)
 {
-	struct ldlm_pool *pl = m->private;
+	struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool,
+					    pl_kobj);
+
 	int	       grant_speed;
 
 	spin_lock(&pl->pl_lock);
@@ -706,29 +709,36 @@ static int lprocfs_grant_speed_seq_show(struct seq_file *m, void *unused)
 	grant_speed = atomic_read(&pl->pl_grant_rate) -
 			atomic_read(&pl->pl_cancel_rate);
 	spin_unlock(&pl->pl_lock);
-	return lprocfs_rd_uint(m, &grant_speed);
+	return sprintf(buf, "%d\n", grant_speed);
 }
+LUSTRE_RO_ATTR(grant_speed);
 
-LDLM_POOL_PROC_READER_SEQ_SHOW(grant_plan, int);
-LPROC_SEQ_FOPS_RO(lprocfs_grant_plan);
+LDLM_POOL_SYSFS_READER_SHOW(grant_plan, int);
+LUSTRE_RO_ATTR(grant_plan);
 
-LDLM_POOL_PROC_READER_SEQ_SHOW(recalc_period, int);
-LDLM_POOL_PROC_WRITER(recalc_period, int);
-static ssize_t lprocfs_recalc_period_seq_write(struct file *file,
-					       const char __user *buf,
-					       size_t len, loff_t *off)
-{
-	struct seq_file *seq = file->private_data;
+LDLM_POOL_SYSFS_READER_SHOW(recalc_period, int);
+LDLM_POOL_SYSFS_WRITER_STORE(recalc_period, int);
+LUSTRE_RW_ATTR(recalc_period);
 
-	return lprocfs_wr_recalc_period(file, buf, len, seq->private);
-}
-LPROC_SEQ_FOPS(lprocfs_recalc_period);
+LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(server_lock_volume, u64);
+LUSTRE_RO_ATTR(server_lock_volume);
+
+LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(limit, atomic);
+LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(limit, atomic);
+LUSTRE_RW_ATTR(limit);
+
+LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(granted, atomic);
+LUSTRE_RO_ATTR(granted);
+
+LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(cancel_rate, atomic);
+LUSTRE_RO_ATTR(cancel_rate);
 
-LPROC_SEQ_FOPS_RO_TYPE(ldlm_pool, u64);
-LPROC_SEQ_FOPS_RO_TYPE(ldlm_pool, atomic);
-LPROC_SEQ_FOPS_RW_TYPE(ldlm_pool_rw, atomic);
+LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(grant_rate, atomic);
+LUSTRE_RO_ATTR(grant_rate);
 
-LPROC_SEQ_FOPS_RO(lprocfs_grant_speed);
+LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(lock_volume_factor, atomic);
+LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(lock_volume_factor, atomic);
+LUSTRE_RW_ATTR(lock_volume_factor);
 
 #define LDLM_POOL_ADD_VAR(name, var, ops)			\
 	do {							\
@@ -740,6 +750,15 @@ LPROC_SEQ_FOPS_RO(lprocfs_grant_speed);
 
 /* These are for pools in /sys/fs/lustre/ldlm/namespaces/.../pool */
 static struct attribute *ldlm_pl_attrs[] = {
+	&lustre_attr_grant_speed.attr,
+	&lustre_attr_grant_plan.attr,
+	&lustre_attr_recalc_period.attr,
+	&lustre_attr_server_lock_volume.attr,
+	&lustre_attr_limit.attr,
+	&lustre_attr_granted.attr,
+	&lustre_attr_cancel_rate.attr,
+	&lustre_attr_grant_rate.attr,
+	&lustre_attr_lock_volume_factor.attr,
 	NULL,
 };
 
@@ -800,19 +819,6 @@ static int ldlm_pool_proc_init(struct ldlm_pool *pl)
 	memset(pool_vars, 0, sizeof(pool_vars));
 	pool_vars[0].name = var_name;
 
-	LDLM_POOL_ADD_VAR("server_lock_volume", &pl->pl_server_lock_volume,
-			  &ldlm_pool_u64_fops);
-	LDLM_POOL_ADD_VAR("limit", &pl->pl_limit, &ldlm_pool_rw_atomic_fops);
-	LDLM_POOL_ADD_VAR("granted", &pl->pl_granted, &ldlm_pool_atomic_fops);
-	LDLM_POOL_ADD_VAR("grant_speed", pl, &lprocfs_grant_speed_fops);
-	LDLM_POOL_ADD_VAR("cancel_rate", &pl->pl_cancel_rate,
-			  &ldlm_pool_atomic_fops);
-	LDLM_POOL_ADD_VAR("grant_rate", &pl->pl_grant_rate,
-			  &ldlm_pool_atomic_fops);
-	LDLM_POOL_ADD_VAR("grant_plan", pl, &lprocfs_grant_plan_fops);
-	LDLM_POOL_ADD_VAR("recalc_period", pl, &lprocfs_recalc_period_fops);
-	LDLM_POOL_ADD_VAR("lock_volume_factor", &pl->pl_lock_volume_factor,
-			  &ldlm_pool_rw_atomic_fops);
 	LDLM_POOL_ADD_VAR("state", pl, &lprocfs_pool_state_fops);
 
 	pl->pl_stats = lprocfs_alloc_stats(LDLM_POOL_LAST_STAT -
diff --git a/drivers/staging/lustre/sysfs-fs-lustre b/drivers/staging/lustre/sysfs-fs-lustre
index 3aaa5ae..39295e8 100644
--- a/drivers/staging/lustre/sysfs-fs-lustre
+++ b/drivers/staging/lustre/sysfs-fs-lustre
@@ -249,3 +249,65 @@ Description:
 		in significant speedups due to reduced lock-pingpong RPCs.
 		0 - disabled
 		1 - enabled (default)
+
+What:		/sys/fs/lustre/ldlm/namespaces/<name>/pool/granted
+Date:		May 2015
+Contact:	"Oleg Drokin" <oleg.drokin@xxxxxxxxx>
+Description:
+		Displays number of granted locks in this namespace
+
+What:		/sys/fs/lustre/ldlm/namespaces/<name>/pool/grant_rate
+Date:		May 2015
+Contact:	"Oleg Drokin" <oleg.drokin@xxxxxxxxx>
+Description:
+		Number of granted locks in this namespace during last
+		time interval
+
+What:		/sys/fs/lustre/ldlm/namespaces/<name>/pool/cancel_rate
+Date:		May 2015
+Contact:	"Oleg Drokin" <oleg.drokin@xxxxxxxxx>
+Description:
+		Number of lock cancellations in this namespace during
+		last time interval
+
+What:		/sys/fs/lustre/ldlm/namespaces/<name>/pool/grant_speed
+Date:		May 2015
+Contact:	"Oleg Drokin" <oleg.drokin@xxxxxxxxx>
+Description:
+		Calculated speed of lock granting (grant_rate - cancel_rate)
+		in this namespace
+
+What:		/sys/fs/lustre/ldlm/namespaces/<name>/pool/grant_plan
+Date:		May 2015
+Contact:	"Oleg Drokin" <oleg.drokin@xxxxxxxxx>
+Description:
+		Estimated number of locks to be granted in the next time
+		interval in this namespace
+
+What:		/sys/fs/lustre/ldlm/namespaces/<name>/pool/limit
+Date:		May 2015
+Contact:	"Oleg Drokin" <oleg.drokin@xxxxxxxxx>
+Description:
+		Controls number of allowed locks in this pool.
+		When lru_size is 0, this is the actual limit then.
+
+What:		/sys/fs/lustre/ldlm/namespaces/<name>/pool/lock_volume_factor
+Date:		May 2015
+Contact:	"Oleg Drokin" <oleg.drokin@xxxxxxxxx>
+Description:
+		Multiplier for all lock volume calculations above.
+		Default is 1. Increase to make the client to more agressively
+		clean it's lock LRU list for this namespace.
+
+What:		/sys/fs/lustre/ldlm/namespaces/<name>/pool/server_lock_volume
+Date:		May 2015
+Contact:	"Oleg Drokin" <oleg.drokin@xxxxxxxxx>
+Description:
+		Calculated server lock volume.
+
+What:		/sys/fs/lustre/ldlm/namespaces/<name>/pool/recalc_period
+Date:		May 2015
+Contact:	"Oleg Drokin" <oleg.drokin@xxxxxxxxx>
+Description:
+		Controls length of time between recalculation of above
+		values (in seconds).
-- 
2.1.0

_______________________________________________
devel mailing list
devel@xxxxxxxxxxxxxxxxxxxxxx
http://driverdev.linuxdriverproject.org/mailman/listinfo/driverdev-devel




[Index of Archives]     [Linux Driver Backports]     [DMA Engine]     [Linux GPIO]     [Linux SPI]     [Video for Linux]     [Linux USB Devel]     [Linux Coverity]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [Yosemite Backpacking]
  Powered by Linux