currently, the rbd_wait_state_locked() will wait forever if we can't get our state locked. Example: rbd map --exclusive test1 --> /dev/rbd0 rbd map test1 --> /dev/rbd1 dd if=/dev/zero of=/dev/rbd1 bs=1M count=1 --> IO blocked To avoid this problem, this patch introduce a timeout design in rbd_wait_state_locked(). Then rbd_wait_state_locked() will return error when we reach a timeout. Signed-off-by: Dongsheng Yang <dongsheng.yang@xxxxxxxxxxxx> --- drivers/block/rbd.c | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 96657ad..199819d 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -761,6 +761,7 @@ struct rbd_options { #define RBD_READ_ONLY_DEFAULT false #define RBD_LOCK_ON_READ_DEFAULT false #define RBD_EXCLUSIVE_DEFAULT false +#define RBD_WAIT_LOCK_TIMEOUT_DEFAULT MAX_SCHEDULE_TIMEOUT static int parse_rbd_opts_token(char *c, void *private) { @@ -3494,8 +3495,9 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, /* * lock_rwsem must be held for read */ -static void rbd_wait_state_locked(struct rbd_device *rbd_dev) +static int rbd_wait_state_locked(struct rbd_device *rbd_dev) { + long timeo = RBD_WAIT_LOCK_TIMEOUT_DEFAULT; DEFINE_WAIT(wait); do { @@ -3508,12 +3510,18 @@ static void rbd_wait_state_locked(struct rbd_device *rbd_dev) prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait, TASK_UNINTERRUPTIBLE); up_read(&rbd_dev->lock_rwsem); - schedule(); + timeo = schedule_timeout(timeo); down_read(&rbd_dev->lock_rwsem); + if (!timeo) { + finish_wait(&rbd_dev->lock_waitq, &wait); + rbd_warn(rbd_dev, "timed out in waiting state locked"); + return -ETIMEDOUT; + } } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED && !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)); finish_wait(&rbd_dev->lock_waitq, &wait); + return 0; } static void rbd_queue_workfn(struct work_struct *work) @@ -3606,7 +3614,9 @@ static void rbd_queue_workfn(struct work_struct *work) result = -EROFS; goto err_unlock; } - rbd_wait_state_locked(rbd_dev); + result = rbd_wait_state_locked(rbd_dev); + if (result) + goto err_unlock; } if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { result = -EBLACKLISTED; @@ -5261,6 +5271,8 @@ static void rbd_dev_image_unlock(struct rbd_device *rbd_dev) static int rbd_add_acquire_lock(struct rbd_device *rbd_dev) { + int ret = 0; + if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) { rbd_warn(rbd_dev, "exclusive-lock feature is not enabled"); return -EINVAL; @@ -5268,8 +5280,11 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev) /* FIXME: "rbd map --exclusive" should be in interruptible */ down_read(&rbd_dev->lock_rwsem); - rbd_wait_state_locked(rbd_dev); + ret = rbd_wait_state_locked(rbd_dev); up_read(&rbd_dev->lock_rwsem); + if (ret) { + return ret; + } if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) { rbd_warn(rbd_dev, "failed to acquire exclusive lock"); return -EROFS; -- 1.8.3.1 -- To unsubscribe from this list: send the line "unsubscribe ceph-devel" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html