From: Jérôme Glisse <jglisse@xxxxxxxxxx> A common pattern in code is that we have a buffer_head and we want to get the first buffer_head in buffer_head list for a page. Before this patch it was simply done with page_buffers(bh->b_page). This patch introduce an helper bh_first_for_page(struct buffer_head *) which can use a new flag (also introduced in this patch) to find the first buffer_head struct for a given page. This patch use page_buffers(bh->b_page) for now but latter patch can update this helper to handle special page differently and instead scan buffer_head list until a buffer_head with first_for_page flag set is found. Signed-off-by: Jérôme Glisse <jglisse@xxxxxxxxxx> Cc: Jens Axboe <axboe@xxxxxxxxx> CC: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Cc: Alexander Viro <viro@xxxxxxxxxxxxxxxxxx> Cc: linux-fsdevel@xxxxxxxxxxxxxxx Cc: Tejun Heo <tj@xxxxxxxxxx> Cc: Jan Kara <jack@xxxxxxx> Cc: Josef Bacik <jbacik@xxxxxx> Cc: Mel Gorman <mgorman@xxxxxxxxxxxxxxxxxxx> --- fs/buffer.c | 4 ++-- include/linux/buffer_head.h | 18 ++++++++++++++++++ 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/fs/buffer.c b/fs/buffer.c index 422204701a3b..44beba15c38d 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -276,7 +276,7 @@ static void end_buffer_async_read(struct address_space *mapping, * two buffer heads end IO at almost the same time and both * decide that the page is now completely done. */ - first = page_buffers(page); + first = bh_first_for_page(bh); local_irq_save(flags); bit_spin_lock(BH_Uptodate_Lock, &first->b_state); clear_buffer_async_read(bh); @@ -332,7 +332,7 @@ void end_buffer_async_write(struct address_space *mapping, struct page *page, SetPageError(page); } - first = page_buffers(page); + first = bh_first_for_page(bh); local_irq_save(flags); bit_spin_lock(BH_Uptodate_Lock, &first->b_state); diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 7ae60f59f27e..22e79307c055 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -39,6 +39,12 @@ enum bh_state_bits { BH_Prio, /* Buffer should be submitted with REQ_PRIO */ BH_Defer_Completion, /* Defer AIO completion to workqueue */ + /* + * First buffer_head for a page ie page->private is pointing to this + * buffer_head struct. + */ + BH_FirstForPage, + BH_PrivateStart,/* not a state bit, but the first bit available * for private allocation by other entities */ @@ -135,6 +141,7 @@ BUFFER_FNS(Unwritten, unwritten) BUFFER_FNS(Meta, meta) BUFFER_FNS(Prio, prio) BUFFER_FNS(Defer_Completion, defer_completion) +BUFFER_FNS(FirstForPage, first_for_page) #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) @@ -278,11 +285,22 @@ void buffer_init(void); * inline definitions */ +/* + * bh_first_for_page - return first buffer_head for a page + * @bh: buffer_head for which we want the first buffer_head for same page + * Returns: first buffer_head within the same page as given buffer_head + */ +static inline struct buffer_head *bh_first_for_page(struct buffer_head *bh) +{ + return page_buffers(bh->b_page); +} + static inline void attach_page_buffers(struct page *page, struct buffer_head *head) { get_page(page); SetPagePrivate(page); + set_buffer_first_for_page(head); set_page_private(page, (unsigned long)head); } -- 2.14.3