On Tue, Apr 04, 2023 at 04:53:17PM +0200, Andrey Albershteyn wrote: > In case of different Merkle tree block size fs-verity expects > ->read_merkle_tree_page() to return Merkle tree page filled with > Merkle tree blocks. The XFS stores each merkle tree block under > extended attribute. Those attributes are addressed by block offset > into Merkle tree. > > This patch make ->read_merkle_tree_page() to fetch multiple merkle > tree blocks based on size ratio. Also the reference to each xfs_buf > is passed with page->private to ->drop_page(). > > Signed-off-by: Andrey Albershteyn <aalbersh@xxxxxxxxxx> > --- > fs/xfs/xfs_verity.c | 74 +++++++++++++++++++++++++++++++++++---------- > fs/xfs/xfs_verity.h | 8 +++++ > 2 files changed, 66 insertions(+), 16 deletions(-) > > diff --git a/fs/xfs/xfs_verity.c b/fs/xfs/xfs_verity.c > index a9874ff4efcd..ef0aff216f06 100644 > --- a/fs/xfs/xfs_verity.c > +++ b/fs/xfs/xfs_verity.c > @@ -134,6 +134,10 @@ xfs_read_merkle_tree_page( > struct page *page = NULL; > __be64 name = cpu_to_be64(index << PAGE_SHIFT); > uint32_t bs = 1 << log_blocksize; > + int blocks_per_page = > + (1 << (PAGE_SHIFT - log_blocksize)); > + int n = 0; > + int offset = 0; > struct xfs_da_args args = { > .dp = ip, > .attr_filter = XFS_ATTR_VERITY, > @@ -143,26 +147,59 @@ xfs_read_merkle_tree_page( > .valuelen = bs, > }; > int error = 0; > + bool is_checked = true; > + struct xfs_verity_buf_list *buf_list; > > page = alloc_page(GFP_KERNEL); > if (!page) > return ERR_PTR(-ENOMEM); > > - error = xfs_attr_get(&args); > - if (error) { > - kmem_free(args.value); > - xfs_buf_rele(args.bp); > + buf_list = kzalloc(sizeof(struct xfs_verity_buf_list), GFP_KERNEL); > + if (!buf_list) { > put_page(page); > - return ERR_PTR(-EFAULT); > + return ERR_PTR(-ENOMEM); > } > > - if (args.bp->b_flags & XBF_VERITY_CHECKED) > + /* > + * Fill the page with Merkle tree blocks. The blcoks_per_page is higher > + * than 1 when fs block size != PAGE_SIZE or Merkle tree block size != > + * PAGE SIZE > + */ > + for (n = 0; n < blocks_per_page; n++) { Ahah, ok, that's why we can't pass the xfs_buf pages up to fsverity. > + offset = bs * n; > + name = cpu_to_be64(((index << PAGE_SHIFT) + offset)); Really this ought to be a typechecked helper... struct xfs_fsverity_merkle_key { __be64 merkleoff; }; static inline void xfs_fsverity_merkle_key_to_disk(struct xfs_fsverity_merkle_key *k, loff_t pos) { k->merkeloff = cpu_to_be64(pos); } > + args.name = (const uint8_t *)&name; > + > + error = xfs_attr_get(&args); > + if (error) { > + kmem_free(args.value); > + /* > + * No more Merkle tree blocks (e.g. this was the last > + * block of the tree) > + */ > + if (error == -ENOATTR) > + break; > + xfs_buf_rele(args.bp); > + put_page(page); > + kmem_free(buf_list); > + return ERR_PTR(-EFAULT); > + } > + > + buf_list->bufs[buf_list->buf_count++] = args.bp; > + > + /* One of the buffers was dropped */ > + if (!(args.bp->b_flags & XBF_VERITY_CHECKED)) > + is_checked = false; If there's enough memory pressure to cause the merkle tree pages to get evicted, what are the chances that the xfs_bufs survive the eviction? > + memcpy(page_address(page) + offset, args.value, args.valuelen); > + kmem_free(args.value); > + args.value = NULL; > + } > + > + if (is_checked) > SetPageChecked(page); > + page->private = (unsigned long)buf_list; > > - page->private = (unsigned long)args.bp; > - memcpy(page_address(page), args.value, args.valuelen); > - > - kmem_free(args.value); > return page; > } > > @@ -191,16 +228,21 @@ xfs_write_merkle_tree_block( > > static void > xfs_drop_page( > - struct page *page) > + struct page *page) > { > - struct xfs_buf *buf = (struct xfs_buf *)page->private; > + int i = 0; > + struct xfs_verity_buf_list *buf_list = > + (struct xfs_verity_buf_list *)page->private; > > - ASSERT(buf != NULL); > + ASSERT(buf_list != NULL); > > - if (PageChecked(page)) > - buf->b_flags |= XBF_VERITY_CHECKED; > + for (i = 0; i < buf_list->buf_count; i++) { > + if (PageChecked(page)) > + buf_list->bufs[i]->b_flags |= XBF_VERITY_CHECKED; > + xfs_buf_rele(buf_list->bufs[i]); > + } > > - xfs_buf_rele(buf); > + kmem_free(buf_list); > put_page(page); > } > > diff --git a/fs/xfs/xfs_verity.h b/fs/xfs/xfs_verity.h > index ae5d87ca32a8..433b2f4ae3bc 100644 > --- a/fs/xfs/xfs_verity.h > +++ b/fs/xfs/xfs_verity.h > @@ -16,4 +16,12 @@ extern const struct fsverity_operations xfs_verity_ops; > #define xfs_verity_ops NULL > #endif /* CONFIG_FS_VERITY */ > > +/* Minimal Merkle tree block size is 1024 */ > +#define XFS_VERITY_MAX_MBLOCKS_PER_PAGE (1 << (PAGE_SHIFT - 10)) > + > +struct xfs_verity_buf_list { > + unsigned int buf_count; > + struct xfs_buf *bufs[XFS_VERITY_MAX_MBLOCKS_PER_PAGE]; So... this is going to be a 520-byte allocation on arm64 with 64k pages? Even if the merkle tree block size is the same as the page size? Ouch. --D > +}; > + > #endif /* __XFS_VERITY_H__ */ > -- > 2.38.4 >