On Fri, Mar 16, 2018 at 4:32 AM, Yan, Zheng <zyan@xxxxxxxxxx> wrote: > Signed-off-by: "Yan, Zheng" <zyan@xxxxxxxxxx> > --- > fs/ceph/file.c | 9 ++++++--- > 1 file changed, 6 insertions(+), 3 deletions(-) > > diff --git a/fs/ceph/file.c b/fs/ceph/file.c > index 1e9dbe77a880..fef8968011ee 100644 > --- a/fs/ceph/file.c > +++ b/fs/ceph/file.c > @@ -614,7 +614,8 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to, > struct ceph_aio_request { > struct kiocb *iocb; > size_t total_len; > - int write; > + bool write; > + bool should_dirty; > int error; > struct list_head osd_reqs; > unsigned int num_reqs; > @@ -724,7 +725,7 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req) > } > } > > - ceph_put_page_vector(osd_data->pages, num_pages, !aio_req->write); > + ceph_put_page_vector(osd_data->pages, num_pages, aio_req->should_dirty); > ceph_osdc_put_request(req); > > if (rc < 0) > @@ -821,6 +822,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, > size_t count = iov_iter_count(iter); > loff_t pos = iocb->ki_pos; > bool write = iov_iter_rw(iter) == WRITE; > + bool should_dirty = !write && iter_is_iovec(iter); > > if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP) > return -EROFS; > @@ -888,6 +890,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, > if (aio_req) { > aio_req->iocb = iocb; > aio_req->write = write; > + aio_req->should_dirty = should_dirty; > INIT_LIST_HEAD(&aio_req->osd_reqs); > if (write) { > aio_req->mtime = mtime; > @@ -945,7 +948,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, > len = ret; > } > > - ceph_put_page_vector(pages, num_pages, !write); > + ceph_put_page_vector(pages, num_pages, should_dirty); > > ceph_osdc_put_request(req); > if (ret < 0) Hi Zheng, Can you explain how this fixes the invalid memory access reported in "KASAN: use-after-free Read in set_page_dirty_lock"? Did you come up with a different reproducer? Thanks, Ilya -- To unsubscribe from this list: send the line "unsubscribe ceph-devel" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html