Since a list entry may be removed, so use list_for_each_entry_safe instead of list_for_each_entry. Signed-off-by: Bob Liu <lliubbo@xxxxxxxxx> --- mm/slob.c | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/slob.c b/mm/slob.c index 3f19a34..e2af18b 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -320,7 +320,7 @@ static void *slob_page_alloc(struct slob_page *sp, size_t size, int align) */ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) { - struct slob_page *sp; + struct slob_page *sp, *tmp; struct list_head *prev; struct list_head *slob_list; slob_t *b = NULL; @@ -335,7 +335,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) spin_lock_irqsave(&slob_lock, flags); /* Iterate through each partially free page, try to find room */ - list_for_each_entry(sp, slob_list, list) { + list_for_each_entry_safe(sp, tmp, slob_list, list) { #ifdef CONFIG_NUMA /* * If there's a node specification, search for a partial -- 1.5.6.3 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxxx For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>