On Fri, Nov 20, 2020 at 02:07:19PM -0400, Jason Gunthorpe wrote: > On Fri, Nov 20, 2020 at 10:54:43AM +0100, Daniel Vetter wrote: > > diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h > > index d5ece7a9a403..f94405d43fd1 100644 > > --- a/include/linux/sched/mm.h > > +++ b/include/linux/sched/mm.h > > @@ -180,6 +180,22 @@ static inline void fs_reclaim_acquire(gfp_t gfp_mask) { } > > static inline void fs_reclaim_release(gfp_t gfp_mask) { } > > #endif > > > > +/** > > + * might_alloc - Marks possible allocation sites > > + * @gfp_mask: gfp_t flags that would be use to allocate > > + * > > + * Similar to might_sleep() and other annotations this can be used in functions > > + * that might allocate, but often dont. Compiles to nothing without > > + * CONFIG_LOCKDEP. Includes a conditional might_sleep() if @gfp allows blocking. > > + */ > > +static inline void might_alloc(gfp_t gfp_mask) > > +{ > > + fs_reclaim_acquire(gfp_mask); > > + fs_reclaim_release(gfp_mask); > > + > > + might_sleep_if(gfpflags_allow_blocking(gfp_mask)); > > +} > > Reviewed-by: Jason Gunthorpe <jgg@xxxxxxxxxx> > > Oh, I just had a another thread with Matt about xarray, this would be > perfect to add before xas_nomem(): Yeah I think there's plenty of places where this will be useful. Want to slap a sob onto this diff so I can include it for the next round, or will you or Matt send this out when my might_alloc has landed? -Daniel > > diff --git a/lib/idr.c b/lib/idr.c > index f4ab4f4aa3c7f5..722d9ddff53221 100644 > --- a/lib/idr.c > +++ b/lib/idr.c > @@ -391,6 +391,8 @@ int ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max, > if ((int)max < 0) > max = INT_MAX; > > + might_alloc(gfp); > + > retry: > xas_lock_irqsave(&xas, flags); > next: > diff --git a/lib/xarray.c b/lib/xarray.c > index 5fa51614802ada..dd260ee7dcae9a 100644 > --- a/lib/xarray.c > +++ b/lib/xarray.c > @@ -1534,6 +1534,8 @@ void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) > XA_STATE(xas, xa, index); > void *curr; > > + might_alloc(gfp); > + > if (WARN_ON_ONCE(xa_is_advanced(entry))) > return XA_ERROR(-EINVAL); > if (xa_track_free(xa) && !entry) > @@ -1600,6 +1602,8 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index, > XA_STATE(xas, xa, index); > void *curr; > > + might_alloc(gfp); > + > if (WARN_ON_ONCE(xa_is_advanced(entry))) > return XA_ERROR(-EINVAL); > > @@ -1637,6 +1641,8 @@ int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) > XA_STATE(xas, xa, index); > void *curr; > > + might_alloc(gfp); > + > if (WARN_ON_ONCE(xa_is_advanced(entry))) > return -EINVAL; > if (!entry) > @@ -1806,6 +1812,8 @@ int __xa_alloc(struct xarray *xa, u32 *id, void *entry, > { > XA_STATE(xas, xa, 0); > > + might_alloc(gfp); > + > if (WARN_ON_ONCE(xa_is_advanced(entry))) > return -EINVAL; > if (WARN_ON_ONCE(!xa_track_free(xa))) -- Daniel Vetter Software Engineer, Intel Corporation http://blog.ffwll.ch