Re: [PATCH] SCHED: attribute page lock and waitqueue functions as sched

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 





On Thu, Nov 4, 2021 at 2:59 AM Matthew Wilcox <willy@xxxxxxxxxxxxx> wrote:
On Thu, Nov 04, 2021 at 02:47:03AM +0800, Jimmy Shiu wrote:
> Bug: 144961676
> Bug: 144713689
> Bug: 172212772

A bug number is meaningless without knowing which bug tracker they
refer to.  I suggest contemplating the meaning of the 'U' in URL.

> Signed-off-by: Minchan Kim <minchan@xxxxxxxxxx>
> Signed-off-by: Jimmy Shiu <jimmyshiu@xxxxxxxxxx>
> (cherry picked from commit 1e4de875d9e0cfaccf5131bcc709ae8646cdc168)

what tree is that commit ID in?  I suggest it's meaningless and
should be removed.

> @@ -687,7 +688,7 @@ static inline void folio_lock(struct folio *folio)
>  /*
>   * lock_page may only be called if we have the page's inode pinned.
>   */
> -static inline void lock_page(struct page *page)
> +static inline __sched void lock_page(struct page *page)

Why do you need to tag a static inline function as __sched?  This would
be the only place where that is done.
There's no guarantee that any function will be expanded inline.
Ex: I checked the System.map, the `folio_wait_bit_common` isn't expanded. 

> +++ b/kernel/sched/wait.c
> @@ -404,7 +404,8 @@ void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_en
>  }
>  EXPORT_SYMBOL(finish_wait);

> -int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
> +__sched int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned int mode,
> +                                  int sync, void *key)
>  {

This function doesn't sleep.  Why does it need to be tagged __sched?
Right, I will remove this. 

> @@ -440,7 +441,7 @@ static inline bool is_kthread_should_stop(void)
>   * }                                         smp_mb(); // C
>   * remove_wait_queue(&wq_head, &wait);               wq_entry->flags |= WQ_FLAG_WOKEN;
>   */
> -long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
> +__sched long wait_woken(struct wait_queue_entry *wq_entry, unsigned int mode, long timeout)

This one makes sense, but you've extended the length of the line past 80
columns, and it seems like it should be a separate patch with its own
justification.
OK, .let me create another patch for this. 

> +__sched int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned int mode,
> +                             int sync, void *key)

This doesn't seem to sleep either?
Right, I will remove this.  

> +++ b/mm/filemap.c
> @@ -1271,7 +1271,7 @@ static inline bool folio_trylock_flag(struct folio *folio, int bit_nr,
>  /* How many times do we accept lock stealing from under a waiter? */
>  int sysctl_page_lock_unfairness = 5;

> -static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
> +static inline __sched int folio_wait_bit_common(struct folio *folio, int bit_nr,
>               int state, enum behavior behavior)
>  {
>       wait_queue_head_t *q = folio_waitqueue(folio);
> @@ -1411,13 +1411,13 @@ static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
>       return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR;
>  }

> -void folio_wait_bit(struct folio *folio, int bit_nr)
> +__sched void folio_wait_bit(struct folio *folio, int bit_nr)
>  {
>       folio_wait_bit_common(folio, bit_nr, TASK_UNINTERRUPTIBLE, SHARED);
>  }
>  EXPORT_SYMBOL(folio_wait_bit);

> -int folio_wait_bit_killable(struct folio *folio, int bit_nr)
> +__sched int folio_wait_bit_killable(struct folio *folio, int bit_nr)
>  {
>       return folio_wait_bit_common(folio, bit_nr, TASK_KILLABLE, SHARED);
>  }
> @@ -1628,21 +1628,21 @@ EXPORT_SYMBOL_GPL(page_endio);
>   * __folio_lock - Get a lock on the folio, assuming we need to sleep to get it.
>   * @folio: The folio to lock
>   */
> -void __folio_lock(struct folio *folio)
> +__sched void __folio_lock(struct folio *folio)
>  {
>       folio_wait_bit_common(folio, PG_locked, TASK_UNINTERRUPTIBLE,
>                               EXCLUSIVE);
>  }
>  EXPORT_SYMBOL(__folio_lock);

> -int __folio_lock_killable(struct folio *folio)
> +__sched int __folio_lock_killable(struct folio *folio)
>  {
>       return folio_wait_bit_common(folio, PG_locked, TASK_KILLABLE,
>                                       EXCLUSIVE);
>  }
>  EXPORT_SYMBOL_GPL(__folio_lock_killable);

> -static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait)
> +static __sched int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait)
>  {
>       struct wait_queue_head *q = folio_waitqueue(folio);
>       int ret = 0;
> @@ -1679,7 +1679,7 @@ static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait)
>   * If neither ALLOW_RETRY nor KILLABLE are set, will always return true
>   * with the folio locked and the mmap_lock unperturbed.
>   */
> -bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm,
> +__sched bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm,
>                        unsigned int flags)
>  {
>       if (fault_flag_allow_retry_first(flags)) {
> --
> 2.34.0.rc0.344.g81b53c2807-goog
>
>

[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux