Hi Fabio, > On Jul 20, 2022, at 8:43 AM, Fabio M. De Francesco <fmdefrancesco@xxxxxxxxx> wrote: > > kmap() is being deprecated in favor of kmap_local_page(). > > Two main problems with kmap(): (1) It comes with an overhead as mapping > space is restricted and protected by a global lock for synchronization and > (2) it also requires global TLB invalidation when the kmap’s pool wraps > and it might block when the mapping space is fully utilized until a slot > becomes available. > > With kmap_local_page() the mappings are per thread, CPU local, can take > page faults, and can be called from any context (including interrupts). > It is faster than kmap() in kernels with HIGHMEM enabled. Furthermore, > the tasks can be preempted and, when they are scheduled to run again, the > kernel virtual addresses are restored and still valid. > > Since its use in bnode.c is safe everywhere, it should be preferred. > > Therefore, replace kmap() with kmap_local_page() in bnode.c. Where > possible, use the suited standard helpers (memzero_page(), memcpy_page()) > instead of open coding kmap_local_page() plus memset() or memcpy(). > Looks reasonable. Makes sense from my point of view. Reviewed-by: Viacheslav Dubeyko <slava@xxxxxxxxxxx> Thanks, Slava. > Suggested-by: Ira Weiny <ira.weiny@xxxxxxxxx> > Signed-off-by: Fabio M. De Francesco <fmdefrancesco@xxxxxxxxx> > --- > fs/hfsplus/bnode.c | 105 +++++++++++++++++++++------------------------ > 1 file changed, 48 insertions(+), 57 deletions(-) > > diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c > index 177fae4e6581..3a1c77d0df48 100644 > --- a/fs/hfsplus/bnode.c > +++ b/fs/hfsplus/bnode.c > @@ -29,14 +29,12 @@ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len) > off &= ~PAGE_MASK; > > l = min_t(int, len, PAGE_SIZE - off); > - memcpy(buf, kmap(*pagep) + off, l); > - kunmap(*pagep); > + memcpy_from_page(buf, *pagep, off, l); > > while ((len -= l) != 0) { > buf += l; > l = min_t(int, len, PAGE_SIZE); > - memcpy(buf, kmap(*++pagep), l); > - kunmap(*pagep); > + memcpy_from_page(buf, *++pagep, 0, l); > } > } > > @@ -82,16 +80,14 @@ void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len) > off &= ~PAGE_MASK; > > l = min_t(int, len, PAGE_SIZE - off); > - memcpy(kmap(*pagep) + off, buf, l); > + memcpy_to_page(*pagep, off, buf, l); > set_page_dirty(*pagep); > - kunmap(*pagep); > > while ((len -= l) != 0) { > buf += l; > l = min_t(int, len, PAGE_SIZE); > - memcpy(kmap(*++pagep), buf, l); > + memcpy_to_page(*++pagep, 0, buf, l); > set_page_dirty(*pagep); > - kunmap(*pagep); > } > } > > @@ -112,15 +108,13 @@ void hfs_bnode_clear(struct hfs_bnode *node, int off, int len) > off &= ~PAGE_MASK; > > l = min_t(int, len, PAGE_SIZE - off); > - memset(kmap(*pagep) + off, 0, l); > + memzero_page(*pagep, off, l); > set_page_dirty(*pagep); > - kunmap(*pagep); > > while ((len -= l) != 0) { > l = min_t(int, len, PAGE_SIZE); > - memset(kmap(*++pagep), 0, l); > + memzero_page(*++pagep, 0, l); > set_page_dirty(*pagep); > - kunmap(*pagep); > } > } > > @@ -142,24 +136,20 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst, > > if (src == dst) { > l = min_t(int, len, PAGE_SIZE - src); > - memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l); > - kunmap(*src_page); > + memcpy_page(*dst_page, src, *src_page, src, l); > set_page_dirty(*dst_page); > - kunmap(*dst_page); > > while ((len -= l) != 0) { > l = min_t(int, len, PAGE_SIZE); > - memcpy(kmap(*++dst_page), kmap(*++src_page), l); > - kunmap(*src_page); > + memcpy_page(*++dst_page, 0, *++src_page, 0, l); > set_page_dirty(*dst_page); > - kunmap(*dst_page); > } > } else { > void *src_ptr, *dst_ptr; > > do { > - src_ptr = kmap(*src_page) + src; > - dst_ptr = kmap(*dst_page) + dst; > + dst_ptr = kmap_local_page(*dst_page) + dst; > + src_ptr = kmap_local_page(*src_page) + src; > if (PAGE_SIZE - src < PAGE_SIZE - dst) { > l = PAGE_SIZE - src; > src = 0; > @@ -171,9 +161,9 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst, > } > l = min(len, l); > memcpy(dst_ptr, src_ptr, l); > - kunmap(*src_page); > + kunmap_local(src_ptr); > set_page_dirty(*dst_page); > - kunmap(*dst_page); > + kunmap_local(dst_ptr); > if (!dst) > dst_page++; > else > @@ -185,6 +175,7 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst, > void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) > { > struct page **src_page, **dst_page; > + void *src_ptr, *dst_ptr; > int l; > > hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len); > @@ -202,27 +193,28 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) > > if (src == dst) { > while (src < len) { > - memmove(kmap(*dst_page), kmap(*src_page), src); > - kunmap(*src_page); > + dst_ptr = kmap_local_page(*dst_page); > + src_ptr = kmap_local_page(*src_page); > + memmove(dst_ptr, src_ptr, src); > + kunmap_local(src_ptr); > set_page_dirty(*dst_page); > - kunmap(*dst_page); > + kunmap_local(dst_ptr); > len -= src; > src = PAGE_SIZE; > src_page--; > dst_page--; > } > src -= len; > - memmove(kmap(*dst_page) + src, > - kmap(*src_page) + src, len); > - kunmap(*src_page); > + dst_ptr = kmap_local_page(*dst_page); > + src_ptr = kmap_local_page(*src_page); > + memmove(dst_ptr + src, src_ptr + src, len); > + kunmap_local(src_ptr); > set_page_dirty(*dst_page); > - kunmap(*dst_page); > + kunmap_local(dst_ptr); > } else { > - void *src_ptr, *dst_ptr; > - > do { > - src_ptr = kmap(*src_page) + src; > - dst_ptr = kmap(*dst_page) + dst; > + dst_ptr = kmap_local_page(*dst_page) + dst; > + src_ptr = kmap_local_page(*src_page) + src; > if (src < dst) { > l = src; > src = PAGE_SIZE; > @@ -234,9 +226,9 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) > } > l = min(len, l); > memmove(dst_ptr - l, src_ptr - l, l); > - kunmap(*src_page); > + kunmap_local(src_ptr); > set_page_dirty(*dst_page); > - kunmap(*dst_page); > + kunmap_local(dst_ptr); > if (dst == PAGE_SIZE) > dst_page--; > else > @@ -251,26 +243,27 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) > > if (src == dst) { > l = min_t(int, len, PAGE_SIZE - src); > - memmove(kmap(*dst_page) + src, > - kmap(*src_page) + src, l); > - kunmap(*src_page); > + > + dst_ptr = kmap_local_page(*dst_page) + src; > + src_ptr = kmap_local_page(*src_page) + src; > + memmove(dst_ptr, src_ptr, l); > + kunmap_local(src_ptr); > set_page_dirty(*dst_page); > - kunmap(*dst_page); > + kunmap_local(dst_ptr); > > while ((len -= l) != 0) { > l = min_t(int, len, PAGE_SIZE); > - memmove(kmap(*++dst_page), > - kmap(*++src_page), l); > - kunmap(*src_page); > + dst_ptr = kmap_local_page(*++dst_page); > + src_ptr = kmap_local_page(*++src_page); > + memmove(dst_ptr, src_ptr, l); > + kunmap_local(src_ptr); > set_page_dirty(*dst_page); > - kunmap(*dst_page); > + kunmap_local(dst_ptr); > } > } else { > - void *src_ptr, *dst_ptr; > - > do { > - src_ptr = kmap(*src_page) + src; > - dst_ptr = kmap(*dst_page) + dst; > + dst_ptr = kmap_local_page(*dst_page) + dst; > + src_ptr = kmap_local_page(*src_page) + src; > if (PAGE_SIZE - src < > PAGE_SIZE - dst) { > l = PAGE_SIZE - src; > @@ -283,9 +276,9 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) > } > l = min(len, l); > memmove(dst_ptr, src_ptr, l); > - kunmap(*src_page); > + kunmap_local(src_ptr); > set_page_dirty(*dst_page); > - kunmap(*dst_page); > + kunmap_local(dst_ptr); > if (!dst) > dst_page++; > else > @@ -502,14 +495,14 @@ struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num) > if (!test_bit(HFS_BNODE_NEW, &node->flags)) > return node; > > - desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) + > - node->page_offset); > + desc = (struct hfs_bnode_desc *)(kmap_local_page(node->page[0]) + > + node->page_offset); > node->prev = be32_to_cpu(desc->prev); > node->next = be32_to_cpu(desc->next); > node->num_recs = be16_to_cpu(desc->num_recs); > node->type = desc->type; > node->height = desc->height; > - kunmap(node->page[0]); > + kunmap_local(desc); > > switch (node->type) { > case HFS_NODE_HEADER: > @@ -593,14 +586,12 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num) > } > > pagep = node->page; > - memset(kmap(*pagep) + node->page_offset, 0, > - min_t(int, PAGE_SIZE, tree->node_size)); > + memzero_page(*pagep, node->page_offset, > + min_t(int, PAGE_SIZE, tree->node_size)); > set_page_dirty(*pagep); > - kunmap(*pagep); > for (i = 1; i < tree->pages_per_bnode; i++) { > - memset(kmap(*++pagep), 0, PAGE_SIZE); > + memzero_page(*++pagep, 0, PAGE_SIZE); > set_page_dirty(*pagep); > - kunmap(*pagep); > } > clear_bit(HFS_BNODE_NEW, &node->flags); > wake_up(&node->lock_wq); > -- > 2.37.1 >