Use spinlock in pipe_read/write cost too much time,IMO pipe->{head,tail} can be protected by __pipe_{lock,unlock}. On the other hand, we can use __pipe_lock/unlock to protect the pipe->head/tail in pipe_resize_ring and post_one_notification. Signed-off-by: Hongchen Zhang <zhanghongchen@xxxxxxxxxxx> --- fs/pipe.c | 24 ++++-------------------- include/linux/pipe_fs_i.h | 12 ++++++++++++ kernel/watch_queue.c | 8 ++++---- 3 files changed, 20 insertions(+), 24 deletions(-) diff --git a/fs/pipe.c b/fs/pipe.c index 42c7ff41c2db..cf449779bf71 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -98,16 +98,6 @@ void pipe_unlock(struct pipe_inode_info *pipe) } EXPORT_SYMBOL(pipe_unlock); -static inline void __pipe_lock(struct pipe_inode_info *pipe) -{ - mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT); -} - -static inline void __pipe_unlock(struct pipe_inode_info *pipe) -{ - mutex_unlock(&pipe->mutex); -} - void pipe_double_lock(struct pipe_inode_info *pipe1, struct pipe_inode_info *pipe2) { @@ -253,8 +243,7 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to) */ was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage); for (;;) { - /* Read ->head with a barrier vs post_one_notification() */ - unsigned int head = smp_load_acquire(&pipe->head); + unsigned int head = pipe->head; unsigned int tail = pipe->tail; unsigned int mask = pipe->ring_size - 1; @@ -322,14 +311,12 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to) if (!buf->len) { pipe_buf_release(pipe, buf); - spin_lock_irq(&pipe->rd_wait.lock); #ifdef CONFIG_WATCH_QUEUE if (buf->flags & PIPE_BUF_FLAG_LOSS) pipe->note_loss = true; #endif tail++; pipe->tail = tail; - spin_unlock_irq(&pipe->rd_wait.lock); } total_len -= chars; if (!total_len) @@ -506,16 +493,13 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from) * it, either the reader will consume it or it'll still * be there for the next write. */ - spin_lock_irq(&pipe->rd_wait.lock); head = pipe->head; if (pipe_full(head, pipe->tail, pipe->max_usage)) { - spin_unlock_irq(&pipe->rd_wait.lock); continue; } pipe->head = head + 1; - spin_unlock_irq(&pipe->rd_wait.lock); /* Insert it into the buffer array */ buf = &pipe->bufs[head & mask]; @@ -1260,14 +1244,14 @@ int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots) if (unlikely(!bufs)) return -ENOMEM; - spin_lock_irq(&pipe->rd_wait.lock); + __pipe_lock(pipe); mask = pipe->ring_size - 1; head = pipe->head; tail = pipe->tail; n = pipe_occupancy(head, tail); if (nr_slots < n) { - spin_unlock_irq(&pipe->rd_wait.lock); + __pipe_unlock(pipe); kfree(bufs); return -EBUSY; } @@ -1303,7 +1287,7 @@ int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots) pipe->tail = tail; pipe->head = head; - spin_unlock_irq(&pipe->rd_wait.lock); + __pipe_unlock(pipe); /* This might have made more room for writers */ wake_up_interruptible(&pipe->wr_wait); diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index 6cb65df3e3ba..f5084daf6eaf 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h @@ -2,6 +2,8 @@ #ifndef _LINUX_PIPE_FS_I_H #define _LINUX_PIPE_FS_I_H +#include <linux/fs.h> + #define PIPE_DEF_BUFFERS 16 #define PIPE_BUF_FLAG_LRU 0x01 /* page is on the LRU */ @@ -223,6 +225,16 @@ static inline void pipe_discard_from(struct pipe_inode_info *pipe, #define PIPE_SIZE PAGE_SIZE /* Pipe lock and unlock operations */ +static inline void __pipe_lock(struct pipe_inode_info *pipe) +{ + mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT); +} + +static inline void __pipe_unlock(struct pipe_inode_info *pipe) +{ + mutex_unlock(&pipe->mutex); +} + void pipe_lock(struct pipe_inode_info *); void pipe_unlock(struct pipe_inode_info *); void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *); diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c index a6f9bdd956c3..92e46cfe9419 100644 --- a/kernel/watch_queue.c +++ b/kernel/watch_queue.c @@ -108,7 +108,7 @@ static bool post_one_notification(struct watch_queue *wqueue, if (!pipe) return false; - spin_lock_irq(&pipe->rd_wait.lock); + __pipe_lock(pipe); mask = pipe->ring_size - 1; head = pipe->head; @@ -135,17 +135,17 @@ static bool post_one_notification(struct watch_queue *wqueue, buf->offset = offset; buf->len = len; buf->flags = PIPE_BUF_FLAG_WHOLE; - smp_store_release(&pipe->head, head + 1); /* vs pipe_read() */ + pipe->head = head + 1; if (!test_and_clear_bit(note, wqueue->notes_bitmap)) { - spin_unlock_irq(&pipe->rd_wait.lock); + __pipe_unlock(pipe); BUG(); } wake_up_interruptible_sync_poll_locked(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM); done = true; out: - spin_unlock_irq(&pipe->rd_wait.lock); + __pipe_unlock(pipe); if (done) kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); return done; base-commit: c8451c141e07a8d05693f6c8d0e418fbb4b68bb7 -- 2.31.1