From: Marc Eshel <eshel@xxxxxxxxxxxxxxx> - unquoted Add NFS lock support to GFS2. (Untested.) Signed-off-by: J. Bruce Fields <bfields@xxxxxxxxxxxxxx> --- fs/gfs2/locking/dlm/plock.c | 76 +++++++++++++++++++++++++++++++++++++-- fs/gfs2/ops_file.c | 5 +++ include/linux/lock_dlm_plock.h | 3 ++ 3 files changed, 80 insertions(+), 4 deletions(-) diff --git a/fs/gfs2/locking/dlm/plock.c b/fs/gfs2/locking/dlm/plock.c index 3799f19..da6c01c 100644 --- a/fs/gfs2/locking/dlm/plock.c +++ b/fs/gfs2/locking/dlm/plock.c @@ -59,6 +59,8 @@ static void send_op(struct plock_op *op) wake_up(&send_wq); } +/* XXX: unlock? cancel? ... */ + int gdlm_plock(void *lockspace, struct lm_lockname *name, struct file *file, int cmd, struct file_lock *fl) { @@ -79,9 +81,20 @@ int gdlm_plock(void *lockspace, struct lm_lockname *name, op->info.start = fl->fl_start; op->info.end = fl->fl_end; op->info.owner = (__u64)(long) fl->fl_owner; + if (fl->fl_lmops && fl->fl_lmops->fl_notify) { + op->info.callback = fl->fl_lmops->fl_notify; + /* might need to make a copy */ + op->info.fl = fl; + op->info.file = file; + } else + op->info.callback = NULL; send_op(op); - wait_event(recv_wq, (op->done != 0)); + + if (op->info.callback == NULL) + wait_event(recv_wq, (op->done != 0)); + else + return -EINPROGRESS; spin_lock(&ops_lock); if (!list_empty(&op->list)) { @@ -103,6 +116,58 @@ int gdlm_plock(void *lockspace, struct lm_lockname *name, return rv; } +/* Returns failure iff a succesful lock operation should be canceled */ +static int gdlm_plock_callback(struct plock_op *op) +{ + struct file *file; + struct file_lock *fl; + int (*notify)(void *, void *, int) = NULL; + int rv = 0; + + spin_lock(&ops_lock); + if (!list_empty(&op->list)) { + printk(KERN_INFO "plock op on list\n"); + list_del(&op->list); + } + spin_unlock(&ops_lock); + + /* check if the following 2 are still valid or make a copy */ + file = op->info.file; + fl = op->info.fl; + notify = op->info.callback; + + if (op->info.rv) { + notify(fl, NULL, op->info.rv); + goto out; + } + + /* got fs lock; bookkeep locally as well: */ + if (posix_lock_file(file, fl)) { + /* + * This can only happen in the case of kmalloc() failure. + * The filesystem's own lock is the authoritative lock, + * so a failure to get the lock locally is not a disaster. + * As long as GFS cannot reliably cancel locks (especially + * in a low-memory situation), we're better off ignoring + * this failure than trying to recover. + */ + log_error("gdlm_plock: vfs lock error file %p fl %p", + file, fl); + } + + rv = notify(fl, NULL, 0); + if (rv) { + /* XXX: We need to cancel the fs lock here: */ + printk("gfs2 lock granted after lock request failed;" + " dangling lock!\n"); + goto out; + } + +out: + kfree(op); + return rv; +} + int gdlm_punlock(void *lockspace, struct lm_lockname *name, struct file *file, struct file_lock *fl) { @@ -243,9 +308,12 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count, } spin_unlock(&ops_lock); - if (found) - wake_up(&recv_wq); - else + if (found) { + if (op->info.callback) + count = gdlm_plock_callback(op); + else + wake_up(&recv_wq); + } else printk(KERN_INFO "gdlm dev_write no op %x %llx\n", info.fsid, (unsigned long long)info.number); return count; diff --git a/fs/gfs2/ops_file.c b/fs/gfs2/ops_file.c index faa07e4..acf23b2 100644 --- a/fs/gfs2/ops_file.c +++ b/fs/gfs2/ops_file.c @@ -576,6 +576,11 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl) } } + if (cmd == F_CANCELLK) { + /* Hack: */ + cmd = F_SETLK; + fl->fl_type = F_UNLCK; + } if (IS_GETLK(cmd)) return gfs2_lm_plock_get(sdp, &name, file, fl); else if (fl->fl_type == F_UNLCK) diff --git a/include/linux/lock_dlm_plock.h b/include/linux/lock_dlm_plock.h index fc34151..809c5b7 100644 --- a/include/linux/lock_dlm_plock.h +++ b/include/linux/lock_dlm_plock.h @@ -35,6 +35,9 @@ struct gdlm_plock_info { __u64 start; __u64 end; __u64 owner; + void *callback; + void *fl; + void *file; }; #endif -- 1.5.0.rc1.g72fe - To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html