Re: [PATCH v2 03/11] mm: add support for async page locking

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Fri, 22 May 2020 14:57:20 -0600 Jens Axboe wrote:
> 
> I did some reshuffling of this patch before sending it out, and
> I ended up sending a previous version. Please look at this one instead.
> 
> commit d8f0a0bfc4a0742cb461287561b956bc56e90976
> Author: Jens Axboe <axboe@xxxxxxxxx>
> Date:   Fri May 22 09:12:09 2020 -0600
> 
>     mm: add support for async page locking
>     
>     Normally waiting for a page to become unlocked, or locking the page,
>     requires waiting for IO to complete. Add support for lock_page_async()
>     and wait_on_page_locked_async(), which are callback based instead. This
>     allows a caller to get notified when a page becomes unlocked, rather
>     than wait for it.
>     
>     We use the iocb->private field to pass in this necessary data for this
>     to happen. struct wait_page_key is made public, and we define struct
>     wait_page_async as the interface between the caller and the core.
>     
>     Signed-off-by: Jens Axboe <axboe@xxxxxxxxx>
> 
> diff --git a/include/linux/fs.h b/include/linux/fs.h
> index 7e84d823c6a8..82b989695ab9 100644
> --- a/include/linux/fs.h
> +++ b/include/linux/fs.h
> @@ -314,6 +314,8 @@ enum rw_hint {
>  #define IOCB_SYNC		(1 << 5)
>  #define IOCB_WRITE		(1 << 6)
>  #define IOCB_NOWAIT		(1 << 7)
> +/* iocb->private holds wait_page_async struct */
> +#define IOCB_WAITQ		(1 << 8)
>  
>  struct kiocb {
>  	struct file		*ki_filp;
> diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
> index a8f7bd8ea1c6..e260bcd071e4 100644
> --- a/include/linux/pagemap.h
> +++ b/include/linux/pagemap.h
> @@ -456,8 +456,21 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
>  	return pgoff;
>  }
>  
> +/* This has the same layout as wait_bit_key - see fs/cachefiles/rdwr.c */
> +struct wait_page_key {
> +	struct page *page;
> +	int bit_nr;
> +	int page_match;
> +};
> +
> +struct wait_page_async {
> +	struct wait_queue_entry wait;
> +	struct wait_page_key key;
> +};
> +
>  extern void __lock_page(struct page *page);
>  extern int __lock_page_killable(struct page *page);
> +extern int __lock_page_async(struct page *page, struct wait_page_async *wait);
>  extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
>  				unsigned int flags);
>  extern void unlock_page(struct page *page);
> @@ -494,6 +507,14 @@ static inline int lock_page_killable(struct page *page)
>  	return 0;
>  }
>  
Feel free to add doc to avoid toes curling...

> +static inline int lock_page_async(struct page *page,
> +				  struct wait_page_async *wait)
> +{
> +	if (!trylock_page(page))
> +		return __lock_page_async(page, wait);
> +	return 0;
> +}
> +
>  /*
>   * lock_page_or_retry - Lock the page, unless this would block and the
>   * caller indicated that it can handle a retry.
> diff --git a/mm/filemap.c b/mm/filemap.c
> index 80747f1377d5..ebee7350ea3b 100644
> --- a/mm/filemap.c
> +++ b/mm/filemap.c
> @@ -990,13 +990,6 @@ void __init pagecache_init(void)
>  	page_writeback_init();
>  }
>  
> -/* This has the same layout as wait_bit_key - see fs/cachefiles/rdwr.c */
> -struct wait_page_key {
> -	struct page *page;
> -	int bit_nr;
> -	int page_match;
> -};
> -
>  struct wait_page_queue {
>  	struct page *page;
>  	int bit_nr;
> @@ -1210,6 +1203,33 @@ int wait_on_page_bit_killable(struct page *page, int bit_nr)
>  }
>  EXPORT_SYMBOL(wait_on_page_bit_killable);
>  
> +static int __wait_on_page_locked_async(struct page *page,
> +				       struct wait_page_async *wait)
> +{
> +	struct wait_queue_head *q = page_waitqueue(page);
> +	int ret = 0;
> +
> +	wait->key.page = page;
> +	wait->key.bit_nr = PG_locked;
> +
> +	spin_lock_irq(&q->lock);
> +	if (PageLocked(page)) {
> +		__add_wait_queue_entry_tail(q, &wait->wait);
> +		SetPageWaiters(page);
> +		ret = -EIOCBQUEUED;
> +	}
> +	spin_unlock_irq(&q->lock);
> +	return ret;
> +}
> +
> +static int wait_on_page_locked_async(struct page *page,
> +				     struct wait_page_async *wait)
> +{
> +	if (!PageLocked(page))
> +		return 0;
> +	return __wait_on_page_locked_async(compound_head(page), wait);
> +}
> +
>  /**
>   * put_and_wait_on_page_locked - Drop a reference and wait for it to be unlocked
>   * @page: The page to wait for.
> @@ -1372,6 +1392,11 @@ int __lock_page_killable(struct page *__page)
>  }
>  EXPORT_SYMBOL_GPL(__lock_page_killable);
>  
> +int __lock_page_async(struct page *page, struct wait_page_async *wait)
> +{
> +	return wait_on_page_locked_async(page, wait);

The slow path seems harder to read than the fast one.  And one of the quick
questions rises: is it a locked page after return?





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux