On Wed, Dec 18, 2019 at 06:51:34AM -0800, Satya Tangirala wrote: > diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c > index 1f4b8a277060..d28d8e803554 100644 > --- a/fs/crypto/bio.c > +++ b/fs/crypto/bio.c > @@ -46,26 +46,35 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, > { > const unsigned int blockbits = inode->i_blkbits; > const unsigned int blocksize = 1 << blockbits; > + const bool inlinecrypt = fscrypt_inode_uses_inline_crypto(inode); > struct page *ciphertext_page; > struct bio *bio; > int ret, err = 0; > > - ciphertext_page = fscrypt_alloc_bounce_page(GFP_NOWAIT); > - if (!ciphertext_page) > - return -ENOMEM; > + if (inlinecrypt) { > + ciphertext_page = ZERO_PAGE(0); > + } else { > + ciphertext_page = fscrypt_alloc_bounce_page(GFP_NOWAIT); > + if (!ciphertext_page) > + return -ENOMEM; > + } > > while (len--) { > - err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk, > - ZERO_PAGE(0), ciphertext_page, > - blocksize, 0, GFP_NOFS); > - if (err) > - goto errout; > + if (!inlinecrypt) { > + err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk, > + ZERO_PAGE(0), ciphertext_page, > + blocksize, 0, GFP_NOFS); > + if (err) > + goto errout; > + } > > bio = bio_alloc(GFP_NOWAIT, 1); > if (!bio) { > err = -ENOMEM; > goto errout; > } > + fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOIO); > + > bio_set_dev(bio, inode->i_sb->s_bdev); > bio->bi_iter.bi_sector = pblk << (blockbits - 9); > bio_set_op_attrs(bio, REQ_OP_WRITE, 0); > @@ -87,7 +96,8 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, > } > err = 0; > errout: > - fscrypt_free_bounce_page(ciphertext_page); > + if (!inlinecrypt) > + fscrypt_free_bounce_page(ciphertext_page); > return err; > } > EXPORT_SYMBOL(fscrypt_zeroout_range); FYI, I've just applied a patch (https://lore.kernel.org/r/20191226160813.53182-1-ebiggers@xxxxxxxxxx/) to fscrypt.git#master that optimizes this function to write multiple pages at a time. So this part of this patch will need to be reworked. I suggest just handling the inline and fs-layer encryption cases separately. I maintain a testing branch that has all the pending patches I'm interested in applied, so I actually already hacked together the following to resolve the conflict. Please double check it carefully before using it in v7 though: static int fscrypt_zeroout_range_inlinecrypt(const struct inode *inode, pgoff_t lblk, sector_t pblk, unsigned int len) { const unsigned int blockbits = inode->i_blkbits; const unsigned int blocks_per_page_bits = PAGE_SHIFT - blockbits; const unsigned int blocks_per_page = 1 << blocks_per_page_bits; unsigned int i; struct bio *bio; int ret, err; /* This always succeeds since __GFP_DIRECT_RECLAIM is set. */ bio = bio_alloc(GFP_NOFS, BIO_MAX_PAGES); do { bio_set_dev(bio, inode->i_sb->s_bdev); bio->bi_iter.bi_sector = pblk << (blockbits - 9); bio_set_op_attrs(bio, REQ_OP_WRITE, 0); fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOFS); i = 0; do { unsigned int blocks_this_page = min(len, blocks_per_page); unsigned int bytes_this_page = blocks_this_page << blockbits; ret = bio_add_page(bio, ZERO_PAGE(0), bytes_this_page, 0); if (WARN_ON(ret != bytes_this_page)) { err = -EIO; goto out; } lblk += blocks_this_page; pblk += blocks_this_page; len -= blocks_this_page; } while (++i != BIO_MAX_PAGES && len != 0); err = submit_bio_wait(bio); if (err) goto out; bio_reset(bio); } while (len != 0); err = 0; out: bio_put(bio); return err; }