[..] > > @@ -1555,28 +1473,35 @@ bool zswap_store(struct folio *folio) > insert_entry: > entry->swpentry = swp; > entry->objcg = objcg; > - if (objcg) { > - obj_cgroup_charge_zswap(objcg, entry->length); > - /* Account before objcg ref is moved to tree */ I do not understand this comment, but it seems to care about the charging happening before the entry is added to the tree. This patch will move it after the tree insertion. Johannes, do you mind elaborating what this comment is referring to? It should be clarified, updated, or removed as part of this movement. > > - count_objcg_event(objcg, ZSWPOUT); > - } > > - /* map */ > - spin_lock(&tree->lock); > /* > * The folio may have been dirtied again, invalidate the > * possibly stale entry before inserting the new entry. > */ > - if (zswap_rb_insert(&tree->rbroot, entry, &dupentry) == -EEXIST) { > - zswap_invalidate_entry(tree, dupentry); > - WARN_ON(zswap_rb_insert(&tree->rbroot, entry, &dupentry)); > + old = xa_store(tree, offset, entry, GFP_KERNEL); > + if (xa_is_err(old)) { > + int err = xa_err(old); > + if (err == -ENOMEM) > + zswap_reject_alloc_fail++; > + else > + WARN_ONCE(err, "%s: xa_store failed: %d\n", > + __func__, err); > + goto store_failed; > + } > + if (old) > + zswap_entry_free(old); > + > + if (objcg) { > + obj_cgroup_charge_zswap(objcg, entry->length); > + /* Account before objcg ref is moved to tree */ > + count_objcg_event(objcg, ZSWPOUT); > } > + > if (entry->length) { > INIT_LIST_HEAD(&entry->lru); > zswap_lru_add(&zswap.list_lru, entry); > atomic_inc(&zswap.nr_stored); > } > - spin_unlock(&tree->lock); > > /* update stats */ > atomic_inc(&zswap_stored_pages); [..]