On Mon, Jun 06, 2016 at 11:11:51PM +0300, Dan Carpenter wrote: > Hello Minchan Kim, > > The patch 312fcae22703: "zsmalloc: support compaction" from Apr 15, > 2015, leads to the following static checker warning: > > mm/zsmalloc.c:1521 obj_malloc() > warn: 'OBJ_ALLOCATED_TAG' is a shifter (not for '|='). > > mm/zsmalloc.c > 1510 static unsigned long obj_malloc(struct size_class *class, > 1511 struct zspage *zspage, unsigned long handle) > 1512 { > 1513 int i, nr_page, offset; > 1514 unsigned long obj; > 1515 struct link_free *link; > 1516 > 1517 struct page *m_page; > 1518 unsigned long m_offset; > 1519 void *vaddr; > 1520 > 1521 handle |= OBJ_ALLOCATED_TAG; > ^^^^^^^^^^^^^^^^^ > It's weird to use the same define for a bit number > > 1522 obj = get_freeobj(zspage); > 1523 > 1524 offset = obj * class->size; > 1525 nr_page = offset >> PAGE_SHIFT; > 1526 m_offset = offset & ~PAGE_MASK; > 1527 m_page = get_first_page(zspage); > 1528 > 1529 for (i = 0; i < nr_page; i++) > 1530 m_page = get_next_page(m_page); > 1531 > 1532 vaddr = kmap_atomic(m_page); > 1533 link = (struct link_free *)vaddr + m_offset / sizeof(*link); > 1534 set_freeobj(zspage, link->next >> OBJ_ALLOCATED_TAG); > ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ > And also a bit shifter. TAG normally implies it is a bit and not a > shift? Thanks for the report, Dan! >From 42d08096ad7ea255fad36da215fea4c148b919f8 Mon Sep 17 00:00:00 2001 From: Minchan Kim <minchan@xxxxxxxxxx> Date: Tue, 7 Jun 2016 13:46:09 +0900 Subject: [PATCH] zsmalloc: use OBJ_TAG_BIT for bit shifter Static check warns using tag as bit shifter. It doesn't break current working but not good for redability. Let's use OBJ_TAG_BIT as bit shifter instead of OBJ_ALLOCATED_TAG. Cc: Sergey Senozhatsky <sergey.senozhatsky.work@xxxxxxxxx> Reported-by: Dan Carpenter <dan.carpenter@xxxxxxxxxx> Signed-off-by: Minchan Kim <minchan@xxxxxxxxxx> --- mm/zsmalloc.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index a575b47e6c92..aec70d741503 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1065,7 +1065,7 @@ static void init_zspage(struct size_class *class, struct zspage *zspage) link = (struct link_free *)vaddr + off / sizeof(*link); while ((off += class->size) < PAGE_SIZE) { - link->next = freeobj++ << OBJ_ALLOCATED_TAG; + link->next = freeobj++ << OBJ_TAG_BITS; link += class->size / sizeof(*link); } @@ -1076,13 +1076,13 @@ static void init_zspage(struct size_class *class, struct zspage *zspage) */ next_page = get_next_page(page); if (next_page) { - link->next = freeobj++ << OBJ_ALLOCATED_TAG; + link->next = freeobj++ << OBJ_TAG_BITS; } else { /* - * Reset OBJ_ALLOCATED_TAG bit to last link to tell + * Reset OBJ_TAG_BITS bit to last link to tell * whether it's allocated object or not. */ - link->next = -1 << OBJ_ALLOCATED_TAG; + link->next = -1 << OBJ_TAG_BITS; } kunmap_atomic(vaddr); page = next_page; @@ -1531,7 +1531,7 @@ static unsigned long obj_malloc(struct size_class *class, vaddr = kmap_atomic(m_page); link = (struct link_free *)vaddr + m_offset / sizeof(*link); - set_freeobj(zspage, link->next >> OBJ_ALLOCATED_TAG); + set_freeobj(zspage, link->next >> OBJ_TAG_BITS); if (likely(!PageHugeObject(m_page))) /* record handle in the header of allocated chunk */ link->handle = handle; @@ -1633,7 +1633,7 @@ static void obj_free(struct size_class *class, unsigned long obj) /* Insert this object in containing zspage's freelist */ link = (struct link_free *)(vaddr + f_offset); - link->next = get_freeobj(zspage) << OBJ_ALLOCATED_TAG; + link->next = get_freeobj(zspage) << OBJ_TAG_BITS; kunmap_atomic(vaddr); set_freeobj(zspage, f_objidx); mod_zspage_inuse(zspage, -1); -- 1.9.1 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>