Hello Tian Tao, The patch 6753c561f653: "mm/zswap: add the flag can_sleep_mapped" from Jan 20, 2021, leads to the following static checker warning: mm/zswap.c:947 zswap_writeback_entry() error: potentially dereferencing uninitialized 'entry'. mm/zswap.c 927 static int zswap_writeback_entry(struct zpool *pool, unsigned long handle) 928 { 929 struct zswap_header *zhdr; 930 swp_entry_t swpentry; 931 struct zswap_tree *tree; 932 pgoff_t offset; 933 struct zswap_entry *entry; 934 struct page *page; 935 struct scatterlist input, output; 936 struct crypto_acomp_ctx *acomp_ctx; 937 938 u8 *src, *tmp; 939 unsigned int dlen; 940 int ret; 941 struct writeback_control wbc = { 942 .sync_mode = WB_SYNC_NONE, 943 }; 944 945 if (!zpool_can_sleep_mapped(pool)) { 946 947 tmp = kmalloc(entry->length, GFP_ATOMIC); ^^^^^^^^^^^^^ "entry" uninitialized. 948 if (!tmp) 949 return -ENOMEM; 950 } 951 952 /* extract swpentry from data */ 953 zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO); 954 swpentry = zhdr->swpentry; /* here */ 955 tree = zswap_trees[swp_type(swpentry)]; 956 offset = swp_offset(swpentry); 957 958 /* find and ref zswap entry */ 959 spin_lock(&tree->lock); 960 entry = zswap_entry_find_get(&tree->rbroot, offset); 961 if (!entry) { 962 /* entry was invalidated */ 963 spin_unlock(&tree->lock); 964 zpool_unmap_handle(pool, handle); 965 return 0; memory leak. 966 } 967 spin_unlock(&tree->lock); 968 BUG_ON(offset != entry->offset); 969 970 /* try to allocate swap cache page */ 971 switch (zswap_get_swap_cache_page(swpentry, &page)) { 972 case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */ 973 ret = -ENOMEM; 974 goto fail; 975 976 case ZSWAP_SWAPCACHE_EXIST: 977 /* page is already in the swap cache, ignore for now */ 978 put_page(page); 979 ret = -EEXIST; 980 goto fail; 981 982 case ZSWAP_SWAPCACHE_NEW: /* page is locked */ 983 /* decompress */ 984 acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx); 985 986 dlen = PAGE_SIZE; 987 src = (u8 *)zhdr + sizeof(struct zswap_header); 988 989 if (!zpool_can_sleep_mapped(pool)) { 990 991 memcpy(tmp, src, entry->length); 992 src = tmp; 993 994 zpool_unmap_handle(pool, handle); Why not just do a "src = tmp = kmemdup(src, entry->length, GFP_ATOMIC); right, here? That would avoid unnecessary allocations for the other cases. This path calls zpool_unmap_handle() and it frees the "tmp" buffer. The other fail paths only free the "tmp" buffer but don't call zpool_unmap_handle() so is that a leak? 995 } 996 997 mutex_lock(acomp_ctx->mutex); 998 sg_init_one(&input, src, entry->length); 999 sg_init_table(&output, 1); 1000 sg_set_page(&output, page, PAGE_SIZE, 0); 1001 acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, dlen); 1002 ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait); 1003 dlen = acomp_ctx->req->dlen; 1004 mutex_unlock(acomp_ctx->mutex); 1005 1006 BUG_ON(ret); 1007 BUG_ON(dlen != PAGE_SIZE); 1008 1009 /* page is up to date */ 1010 SetPageUptodate(page); 1011 } 1012 1013 /* move it to the tail of the inactive list after end_writeback */ 1014 SetPageReclaim(page); 1015 1016 /* start writeback */ 1017 __swap_writepage(page, &wbc, end_swap_bio_write); 1018 put_page(page); 1019 zswap_written_back_pages++; 1020 1021 spin_lock(&tree->lock); 1022 /* drop local reference */ 1023 zswap_entry_put(tree, entry); 1024 1025 /* 1026 * There are two possible situations for entry here: 1027 * (1) refcount is 1(normal case), entry is valid and on the tree 1028 * (2) refcount is 0, entry is freed and not on the tree 1029 * because invalidate happened during writeback 1030 * search the tree and free the entry if find entry 1031 */ 1032 if (entry == zswap_rb_search(&tree->rbroot, offset)) 1033 zswap_entry_put(tree, entry); 1034 spin_unlock(&tree->lock); 1035 1036 goto end; 1037 1038 /* 1039 * if we get here due to ZSWAP_SWAPCACHE_EXIST 1040 * a load may be happening concurrently. 1041 * it is safe and okay to not free the entry. 1042 * if we free the entry in the following put 1043 * it is also okay to return !0 1044 */ 1045 fail: 1046 spin_lock(&tree->lock); 1047 zswap_entry_put(tree, entry); 1048 spin_unlock(&tree->lock); 1049 1050 end: 1051 if (zpool_can_sleep_mapped(pool)) 1052 zpool_unmap_handle(pool, handle); 1053 else 1054 kfree(tmp); 1055 1056 return ret; 1057 } regards, dan carpenter