On Fri, 15 Apr 2011, Christoph Lameter wrote: > The bit map of free objects in a slab page is determined in various functions > if debugging is enabled. > > Provide a common function for that purpose. > Although it makes writing to /sys/kernel/slab/cache/validate slower because of the double iteration in validate_slab(). > Signed-off-by: Christoph Lameter <cl@xxxxxxxxx> > > --- > mm/slub.c | 34 ++++++++++++++++++++++------------ > 1 file changed, 22 insertions(+), 12 deletions(-) > > Index: linux-2.6/mm/slub.c > =================================================================== > --- linux-2.6.orig/mm/slub.c 2011-03-30 14:09:27.000000000 -0500 > +++ linux-2.6/mm/slub.c 2011-03-30 14:30:24.000000000 -0500 > @@ -271,10 +271,6 @@ static inline void set_freepointer(struc > for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\ > __p += (__s)->size) > > -/* Scan freelist */ > -#define for_each_free_object(__p, __s, __free) \ > - for (__p = (__free); __p; __p = get_freepointer((__s), __p)) > - > /* Determine object index from a given position */ > static inline int slab_index(void *p, struct kmem_cache *s, void *addr) > { > @@ -330,6 +326,21 @@ static inline int oo_objects(struct kmem > return x.x & OO_MASK; > } > > +/* > + * Determine a map of object in use on a page. > + * > + * Slab lock or node listlock must be held to guarantee that the page does > + * not vanish from under us. > + */ > +static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map) > +{ > + void *p; > + void *addr = page_address(page); > + > + for (p = page->freelist; p; p = get_freepointer(s, p)) > + set_bit(slab_index(p, s, addr), map); > +} > + > #ifdef CONFIG_SLUB_DEBUG > /* > * Debug settings: This generates a warning without CONFIG_SLUB_DEBUG: mm/slub.c:335: warning: âget_mapâ defined but not used