Hello. On pátek 2. dubna 2021 17:53:41 CEST Konstantin Komarov wrote: > ... > +/*helper for ntfs_file_write_iter (compressed files)*/ > +static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter > *from) +{ > + int err; > + struct file *file = iocb->ki_filp; > + size_t count = iov_iter_count(from); > + loff_t pos = iocb->ki_pos; > + struct inode *inode = file_inode(file); > + loff_t i_size = inode->i_size; > + struct address_space *mapping = inode->i_mapping; > + struct ntfs_inode *ni = ntfs_i(inode); > + u64 valid = ni->i_valid; > + struct ntfs_sb_info *sbi = ni->mi.sbi; > + struct page *page, **pages = NULL; > + size_t written = 0; > + u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits; > + u32 frame_size = 1u << frame_bits; > + u32 pages_per_frame = frame_size >> PAGE_SHIFT; > + u32 ip, off; > + CLST frame; > + u64 frame_vbo; > + pgoff_t index; > + bool frame_uptodate; > + > + if (frame_size < PAGE_SIZE) { > + /* > + * frame_size == 8K if cluster 512 > + * frame_size == 64K if cluster 4096 > + */ > + ntfs_inode_warn(inode, "page size is bigger than frame size"); > + return -EOPNOTSUPP; > + } > + > + pages = ntfs_malloc(pages_per_frame * sizeof(struct page *)); > + if (!pages) > + return -ENOMEM; > + > + current->backing_dev_info = inode_to_bdi(inode); > + err = file_remove_privs(file); > + if (err) > + goto out; > + > + err = file_update_time(file); > + if (err) > + goto out; > + > + /* zero range [valid : pos) */ > + while (valid < pos) { > + CLST lcn, clen; > + > + frame = valid >> frame_bits; > + frame_vbo = valid & ~(frame_size - 1); > + off = valid & (frame_size - 1); > + > + err = attr_data_get_block(ni, frame << NTFS_LZNT_CUNIT, 0, &lcn, > + &clen, NULL); > + if (err) > + goto out; > + > + if (lcn == SPARSE_LCN) { > + ni->i_valid = valid = > + frame_vbo + ((u64)clen << sbi->cluster_bits); > + continue; > + } > + > + /* Load full frame */ > + err = ntfs_get_frame_pages(mapping, frame_vbo >> PAGE_SHIFT, > + pages, pages_per_frame, > + &frame_uptodate); > + if (err) > + goto out; > + > + if (!frame_uptodate && off) { > + err = ni_read_frame(ni, frame_vbo, pages, > + pages_per_frame); > + if (err) { > + for (ip = 0; ip < pages_per_frame; ip++) { > + page = pages[ip]; > + unlock_page(page); > + put_page(page); > + } > + goto out; > + } > + } > + > + ip = off >> PAGE_SHIFT; > + off = offset_in_page(valid); > + for (; ip < pages_per_frame; ip++, off = 0) { > + page = pages[ip]; > + zero_user_segment(page, off, PAGE_SIZE); > + flush_dcache_page(page); > + SetPageUptodate(page); > + } > + > + ni_lock(ni); > + err = ni_write_frame(ni, pages, pages_per_frame); > + ni_unlock(ni); > + > + for (ip = 0; ip < pages_per_frame; ip++) { > + page = pages[ip]; > + SetPageUptodate(page); > + unlock_page(page); > + put_page(page); > + } > + > + if (err) > + goto out; > + > + ni->i_valid = valid = frame_vbo + frame_size; > + } > + > + /* copy user data [pos : pos + count) */ > + while (count) { > + size_t copied, bytes; > + > + off = pos & (frame_size - 1); > + bytes = frame_size - off; > + if (bytes > count) > + bytes = count; > + > + frame = pos >> frame_bits; > + frame_vbo = pos & ~(frame_size - 1); > + index = frame_vbo >> PAGE_SHIFT; > + > + if (unlikely(iov_iter_fault_in_readable(from, bytes))) { > + err = -EFAULT; > + goto out; > + } > + > + /* Load full frame */ > + err = ntfs_get_frame_pages(mapping, index, pages, > + pages_per_frame, &frame_uptodate); > + if (err) > + goto out; > + > + if (!frame_uptodate) { > + loff_t to = pos + bytes; > + > + if (off || (to < i_size && (to & (frame_size - 1)))) { > + err = ni_read_frame(ni, frame_vbo, pages, > + pages_per_frame); > + if (err) { > + for (ip = 0; ip < pages_per_frame; > + ip++) { > + page = pages[ip]; > + unlock_page(page); > + put_page(page); > + } > + goto out; > + } > + } > + } > + > + WARN_ON(!bytes); > + copied = 0; > + ip = off >> PAGE_SHIFT; > + off = offset_in_page(pos); > + > + /* copy user data to pages */ > + for (;;) { > + size_t cp, tail = PAGE_SIZE - off; > + > + page = pages[ip]; > + cp = iov_iter_copy_from_user_atomic(page, from, off, > + min(tail, bytes)); For 5.14, iov_iter_copy_from_user_atomic() has to be replaced by copy_page_from_iter_atomic(). > + flush_dcache_page(page); > + iov_iter_advance(from, cp); And iov_iter_advance() should be removed then. Please see upstream commit f0b65f39ac505e8f1dcdaa165aa7b8c0bd6fd454 for detailed explanation. > + copied += cp; > + bytes -= cp; > + if (!bytes || !cp) > + break; > + > + if (cp < tail) { > + off += cp; > + } else { > + ip++; > + off = 0; > + } > + } > + > + ni_lock(ni); > + err = ni_write_frame(ni, pages, pages_per_frame); > + ni_unlock(ni); > + > + for (ip = 0; ip < pages_per_frame; ip++) { > + page = pages[ip]; > + ClearPageDirty(page); > + SetPageUptodate(page); > + unlock_page(page); > + put_page(page); > + } > + > + if (err) > + goto out; > + > + /* > + * We can loop for a long time in here. Be nice and allow > + * us to schedule out to avoid softlocking if preempt > + * is disabled. > + */ > + cond_resched(); > + > + pos += copied; > + written += copied; > + > + count = iov_iter_count(from); > + } > + > +out: > + ntfs_free(pages); > + > + current->backing_dev_info = NULL; > + > + if (err < 0) > + return err; > + > + iocb->ki_pos += written; > + if (iocb->ki_pos > ni->i_valid) > + ni->i_valid = iocb->ki_pos; > + > + return written; > +} > ... Thanks. -- Oleksandr Natalenko (post-factum)