Jens, any concerns the new block device ioctls? On Sun, Nov 8, 2015 at 11:28 AM, Dan Williams <dan.j.williams@xxxxxxxxx> wrote: > If an application wants exclusive access to all of the persistent memory > provided by an NVDIMM namespace it can use this raw-block-dax facility > to forgo establishing a filesystem. This capability is targeted > primarily to hypervisors wanting to provision persistent memory for > guests. It can be disabled / enabled dynamically via the new BLKDAXSET > ioctl. > > Cc: Jeff Moyer <jmoyer@xxxxxxxxxx> > Cc: Christoph Hellwig <hch@xxxxxx> > Cc: Dave Chinner <david@xxxxxxxxxxxxx> > Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> > Cc: Ross Zwisler <ross.zwisler@xxxxxxxxxxxxxxx> > Reported-by: kbuild test robot <fengguang.wu@xxxxxxxxx> > Reviewed-by: Jan Kara <jack@xxxxxxxx> > Signed-off-by: Dan Williams <dan.j.williams@xxxxxxxxx> > --- > block/ioctl.c | 60 +++++++++++++++++++++++++++++++ > fs/block_dev.c | 90 ++++++++++++++++++++++++++++++++++++++++++++++- > include/linux/fs.h | 3 ++ > include/uapi/linux/fs.h | 2 + > 4 files changed, 154 insertions(+), 1 deletion(-) > > diff --git a/block/ioctl.c b/block/ioctl.c > index 8061eba42887..604438f36ddd 100644 > --- a/block/ioctl.c > +++ b/block/ioctl.c > @@ -295,6 +295,52 @@ static inline int is_unrecognized_ioctl(int ret) > ret == -ENOIOCTLCMD; > } > > +#ifdef CONFIG_FS_DAX > +static bool blkdev_dax_capable(struct block_device *bdev) > +{ > + struct gendisk *disk = bdev->bd_disk; > + > + if (!disk->fops->direct_access) > + return false; > + > + /* > + * If the partition is not aligned on a page boundary, we can't > + * do dax I/O to it. > + */ > + if ((bdev->bd_part->start_sect % (PAGE_SIZE / 512)) > + || (bdev->bd_part->nr_sects % (PAGE_SIZE / 512))) > + return false; > + > + return true; > +} > + > +static int blkdev_set_dax(struct block_device *bdev, int n) > +{ > + int rc = 0; > + > + if (n) > + n = S_DAX; > + > + if (n && !blkdev_dax_capable(bdev)) > + return -ENOTTY; > + > + mutex_lock(&bdev->bd_inode->i_mutex); > + if (bdev->bd_map_count == 0) > + inode_set_flags(bdev->bd_inode, n, S_DAX); > + else > + rc = -EBUSY; > + mutex_unlock(&bdev->bd_inode->i_mutex); > + return rc; > +} > +#else > +static int blkdev_set_dax(struct block_device *bdev, int n) > +{ > + if (n) > + return -ENOTTY; > + return 0; > +} > +#endif > + > /* > * always keep this in sync with compat_blkdev_ioctl() > */ > @@ -449,6 +495,20 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, > case BLKTRACETEARDOWN: > ret = blk_trace_ioctl(bdev, cmd, (char __user *) arg); > break; > + case BLKDAXSET: > + if (!capable(CAP_SYS_ADMIN)) > + return -EACCES; > + > + if (get_user(n, (int __user *)(arg))) > + return -EFAULT; > + n = !!n; > + if (n == !!(bdev->bd_inode->i_flags & S_DAX)) > + return 0; > + > + return blkdev_set_dax(bdev, n); > + case BLKDAXGET: > + return put_int(arg, !!(bdev->bd_inode->i_flags & S_DAX)); > + break; > default: > ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg); > } > diff --git a/fs/block_dev.c b/fs/block_dev.c > index d3820f6418c8..09d10667cc19 100644 > --- a/fs/block_dev.c > +++ b/fs/block_dev.c > @@ -1687,13 +1687,101 @@ static const struct address_space_operations def_blk_aops = { > .is_dirty_writeback = buffer_check_dirty_writeback, > }; > > +#ifdef CONFIG_FS_DAX > +/* > + * In the raw block case we do not need to contend with truncation nor > + * unwritten file extents. Without those concerns there is no need for > + * additional locking beyond the mmap_sem context that these routines > + * are already executing under. > + * > + * Note, there is no protection if the block device is dynamically > + * resized (partition grow/shrink) during a fault. A stable block device > + * size is already not enforced in the blkdev_direct_IO path. > + * > + * For DAX, it is the responsibility of the block device driver to > + * ensure the whole-disk device size is stable while requests are in > + * flight. > + * > + * Finally, unlike the filemap_page_mkwrite() case there is no > + * filesystem superblock to sync against freezing. We still include a > + * pfn_mkwrite callback for dax drivers to receive write fault > + * notifications. > + */ > +static int blkdev_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) > +{ > + return __dax_fault(vma, vmf, blkdev_get_block, NULL); > +} > + > +static int blkdev_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, > + pmd_t *pmd, unsigned int flags) > +{ > + return __dax_pmd_fault(vma, addr, pmd, flags, blkdev_get_block, NULL); > +} > + > +static void blkdev_vm_open(struct vm_area_struct *vma) > +{ > + struct inode *bd_inode = bdev_file_inode(vma->vm_file); > + struct block_device *bdev = I_BDEV(bd_inode); > + > + mutex_lock(&bd_inode->i_mutex); > + bdev->bd_map_count++; > + mutex_unlock(&bd_inode->i_mutex); > +} > + > +static void blkdev_vm_close(struct vm_area_struct *vma) > +{ > + struct inode *bd_inode = bdev_file_inode(vma->vm_file); > + struct block_device *bdev = I_BDEV(bd_inode); > + > + mutex_lock(&bd_inode->i_mutex); > + bdev->bd_map_count--; > + mutex_unlock(&bd_inode->i_mutex); > +} > + > +static const struct vm_operations_struct blkdev_dax_vm_ops = { > + .open = blkdev_vm_open, > + .close = blkdev_vm_close, > + .fault = blkdev_dax_fault, > + .pmd_fault = blkdev_dax_pmd_fault, > + .pfn_mkwrite = blkdev_dax_fault, > +}; > + > +static const struct vm_operations_struct blkdev_default_vm_ops = { > + .open = blkdev_vm_open, > + .close = blkdev_vm_close, > + .fault = filemap_fault, > + .map_pages = filemap_map_pages, > +}; > + > +static int blkdev_mmap(struct file *file, struct vm_area_struct *vma) > +{ > + struct inode *bd_inode = bdev_file_inode(file); > + struct block_device *bdev = I_BDEV(bd_inode); > + > + file_accessed(file); > + mutex_lock(&bd_inode->i_mutex); > + bdev->bd_map_count++; > + if (IS_DAX(bd_inode)) { > + vma->vm_ops = &blkdev_dax_vm_ops; > + vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE; > + } else { > + vma->vm_ops = &blkdev_default_vm_ops; > + } > + mutex_unlock(&bd_inode->i_mutex); > + > + return 0; > +} > +#else > +#define blkdev_mmap generic_file_mmap > +#endif > + > const struct file_operations def_blk_fops = { > .open = blkdev_open, > .release = blkdev_close, > .llseek = block_llseek, > .read_iter = blkdev_read_iter, > .write_iter = blkdev_write_iter, > - .mmap = generic_file_mmap, > + .mmap = blkdev_mmap, > .fsync = blkdev_fsync, > .unlocked_ioctl = block_ioctl, > #ifdef CONFIG_COMPAT > diff --git a/include/linux/fs.h b/include/linux/fs.h > index 72d8a844c692..8fb2d4b848bf 100644 > --- a/include/linux/fs.h > +++ b/include/linux/fs.h > @@ -482,6 +482,9 @@ struct block_device { > int bd_fsfreeze_count; > /* Mutex for freeze */ > struct mutex bd_fsfreeze_mutex; > +#ifdef CONFIG_FS_DAX > + int bd_map_count; > +#endif > }; > > /* > diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h > index 9b964a5920af..cc2f0fdae707 100644 > --- a/include/uapi/linux/fs.h > +++ b/include/uapi/linux/fs.h > @@ -152,6 +152,8 @@ struct inodes_stat_t { > #define BLKSECDISCARD _IO(0x12,125) > #define BLKROTATIONAL _IO(0x12,126) > #define BLKZEROOUT _IO(0x12,127) > +#define BLKDAXSET _IO(0x12,128) > +#define BLKDAXGET _IO(0x12,129) > > #define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ > #define FIBMAP _IO(0x00,1) /* bmap access */ > > _______________________________________________ > Linux-nvdimm mailing list > Linux-nvdimm@xxxxxxxxxxxx > https://lists.01.org/mailman/listinfo/linux-nvdimm -- To unsubscribe from this list: send the line "unsubscribe linux-block" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html