From: Heinz Mauelshagen <heinzm@xxxxxxxxxx> Signed-off-by: Heinz Mauelshagen <heinzm@xxxxxxxxxx> Signed-off-by: Mike Snitzer <snitzer@xxxxxxxxxx> --- drivers/md/dm-init.c | 2 +- drivers/md/dm-kcopyd.c | 4 ++-- drivers/md/dm-snap.c | 6 +++--- drivers/md/dm.c | 8 ++++---- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c index de8d8d0049e0..52362453ab58 100644 --- a/drivers/md/dm-init.c +++ b/drivers/md/dm-init.c @@ -295,7 +295,7 @@ static int __init dm_init_init(void) if (waitfor[i]) { DMINFO("waiting for device %s ...", waitfor[i]); while (!dm_get_dev_t(waitfor[i])) - msleep(5); + fsleep(5000); } } diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index 837f2a6dd00e..132b50d06121 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -110,7 +110,7 @@ static DEFINE_SPINLOCK(throttle_spinlock); * The reason for this is unknown but possibly due to jiffies rounding errors * or read/write cache inside the disk. */ -#define SLEEP_MSEC 100 +#define SLEEP_USEC 100000 /* * Maximum number of sleep events. There is a theoretical livelock if more @@ -158,7 +158,7 @@ static void io_job_start(struct dm_kcopyd_throttle *t) if (unlikely(skew > 0) && slept < MAX_SLEEPS) { slept++; spin_unlock_irq(&throttle_spinlock); - msleep(SLEEP_MSEC); + fsleep(SLEEP_USEC); goto try_again; } diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 99d5cf1130f0..c50368512bee 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -298,12 +298,12 @@ static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk) /* * This conflicting I/O is extremely improbable in the caller, - * so msleep(1) is sufficient and there is no need for a wait queue. + * so fsleep(1000) is sufficient and there is no need for a wait queue. */ static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk) { while (__chunk_is_tracked(s, chunk)) - msleep(1); + fsleep(1000); } /* @@ -1494,7 +1494,7 @@ static void snapshot_dtr(struct dm_target *ti) unregister_snapshot(s); while (atomic_read(&s->pending_exceptions_count)) - msleep(1); + fsleep(1000); /* * Ensure instructions in mempool_exit aren't reordered * before atomic_read. diff --git a/drivers/md/dm.c b/drivers/md/dm.c index c4a99bdb956c..2e4193ae064c 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -436,7 +436,7 @@ static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, r = ti->type->prepare_ioctl(ti, bdev); if (r == -ENOTCONN && !fatal_signal_pending(current)) { dm_put_live_table(md, *srcu_idx); - msleep(10); + fsleep(10000); goto retry; } @@ -2442,7 +2442,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait) set_bit(DMF_POST_SUSPENDING, &md->flags); dm_table_postsuspend_targets(map); } - /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ + /* dm_put_live_table must be before fsleep, otherwise deadlock is possible */ dm_put_live_table(md, srcu_idx); mutex_unlock(&md->suspend_lock); @@ -2454,7 +2454,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait) */ if (wait) while (atomic_read(&md->holders)) - msleep(1); + fsleep(1000); else if (atomic_read(&md->holders)) DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", dm_device_name(md), atomic_read(&md->holders)); @@ -2531,7 +2531,7 @@ static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_st break; } - msleep(5); + fsleep(5000); } return r; -- 2.37.0 (Apple Git-136) -- dm-devel mailing list dm-devel@xxxxxxxxxx https://listman.redhat.com/mailman/listinfo/dm-devel