[RFC 7/7] zuf: Write/Read && mmap implementation

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The despatch to the server can operate on buffers up to
4 Mega bytes. Any bigger operations are split up and despatched
at this size.

Also if a multy-segments aio is used each segment is despatched
on its own. (TODO this can be easily fixed with sg operations)

On write if any mmaped buffers changed, for example new
allocated holes do to this write or a previous mmaped COW
was written. A range subset of the written range can be returned
for the Kernel to call mapping_unmap on.

mmap is achived with the GET_BLOCK operation. GET_BLOCK will
return if we need to unmap the previous mapped PTE in case
we are writing a prevouse faulted hole or in case of a COW.

Signed-off-by: Boaz Harrosh <boazh@xxxxxxxxxx>
---
 fs/zuf/Makefile  |   1 +
 fs/zuf/_extern.h |   7 ++
 fs/zuf/file.c    |   3 +
 fs/zuf/mmap.c    | 335 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
 fs/zuf/rw.c      | 167 +++++++++++++++++++++++++++
 fs/zuf/zus_api.h |  26 +++++
 6 files changed, 539 insertions(+)
 create mode 100644 fs/zuf/mmap.c
 create mode 100644 fs/zuf/rw.c

diff --git a/fs/zuf/Makefile b/fs/zuf/Makefile
index 4c125f7..0eb933c 100644
--- a/fs/zuf/Makefile
+++ b/fs/zuf/Makefile
@@ -17,5 +17,6 @@ zuf-y += md.o t2.o t1.o
 zuf-y += zuf-core.o zuf-root.o
 
 # Main FS
+zuf-y += rw.o mmap.o
 zuf-y += super.o inode.o directory.o file.o namei.o symlink.o
 zuf-y += module.o
diff --git a/fs/zuf/_extern.h b/fs/zuf/_extern.h
index cf2e80f..16e99e9 100644
--- a/fs/zuf/_extern.h
+++ b/fs/zuf/_extern.h
@@ -46,6 +46,13 @@ bool zuf_dir_emit(struct super_block *sb, struct dir_context *ctx,
 uint zuf_prepare_symname(struct zufs_ioc_new_inode *ioc_new_inode,
 			const char *symname, ulong len, struct page *pages[2]);
 
+/* mmap.c */
+int zuf_file_mmap(struct file *file, struct vm_area_struct *vma);
+
+/* rw.c */
+ssize_t zuf_rw_read_iter(struct kiocb *kiocb, struct iov_iter *ii);
+ssize_t zuf_rw_write_iter(struct kiocb *kiocb, struct iov_iter *ii);
+
 /* file.c */
 int zuf_isync(struct inode *inode, loff_t start, loff_t end, int datasync);
 
diff --git a/fs/zuf/file.c b/fs/zuf/file.c
index 3b37d9f..3fe59d1 100644
--- a/fs/zuf/file.c
+++ b/fs/zuf/file.c
@@ -386,6 +386,9 @@ static int zuf_file_release(struct inode *inode, struct file *filp)
 
 const struct file_operations zuf_file_operations = {
 	.llseek			= zuf_llseek,
+	.read_iter		= zuf_rw_read_iter,
+	.write_iter		= zuf_rw_write_iter,
+	.mmap			= zuf_file_mmap,
 	.open			= generic_file_open,
 	.fsync			= zuf_fsync,
 	.flush			= zuf_flush,
diff --git a/fs/zuf/mmap.c b/fs/zuf/mmap.c
new file mode 100644
index 0000000..b4c8689
--- /dev/null
+++ b/fs/zuf/mmap.c
@@ -0,0 +1,335 @@
+/*
+ * BRIEF DESCRIPTION
+ *
+ * Read/Write operations.
+ *
+ * Copyright (c) 2018 NetApp Inc. All rights reserved.
+ *
+ * ZUFS-License: GPL-2.0 OR BSD-3-Clause. See module.c for LICENSE details.
+ *
+ * Authors:
+ *	Boaz Harrosh <boazh@xxxxxxxxxx>
+ */
+
+#include <linux/pfn_t.h>
+#include "zuf.h"
+
+/* ~~~ Functions for mmap and page faults ~~~ */
+
+/* MAP_PRIVATE, copy data to user private page (cow_page) */
+static int _cow_private_page(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct inode *inode = vma->vm_file->f_mapping->host;
+	struct zuf_sb_info *sbi = SBI(inode->i_sb);
+	struct zuf_inode_info *zii = ZUII(inode);
+	struct zufs_ioc_IO IO = {
+		.hdr.operation = ZUS_OP_READ,
+		.hdr.in_len = sizeof(IO),
+		.hdr.out_len = 0,
+		.hdr.offset = 0,
+		.hdr.len = PAGE_SIZE,
+		.zus_ii = zii->zus_ii,
+		/* FIXME: Kernel guys this name is confusing should be pgindex*/
+		.filepos = md_p2o(vmf->pgoff),
+	};
+	int err;
+
+	/* Basically a READ into vmf->cow_page */
+	err = zufs_dispatch(ZUF_ROOT(sbi), &IO.hdr, &vmf->cow_page, 1);
+	if (unlikely(err)) {
+		zuf_err("[%ld] What??? bn=0x%lx address=0x%lx => %d\n",
+			inode->i_ino, vmf->pgoff, vmf->address, err);
+		/* FIXME: Probably return VM_FAULT_SIGBUS */
+	}
+
+	/*HACK: This is an hack since Kernel v4.7 where a VM_FAULT_LOCKED with
+	 * vmf->page==NULL is no longer supported. Looks like for now this way
+	 * works well. We let mm mess around with unlocking and putting its own
+	 * cow_page.
+	 */
+	vmf->page = vmf->cow_page;
+	get_page(vmf->page);
+	lock_page(vmf->page);
+
+	return VM_FAULT_LOCKED;
+}
+
+int _rw_init_zero_page(struct zuf_inode_info *zii)
+{
+	if (zii->zero_page)
+		return 0;
+
+	zii->zero_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+	if (unlikely(!zii->zero_page))
+		return -ENOMEM;
+	zii->zero_page->mapping = zii->vfs_inode.i_mapping;
+	return 0;
+}
+
+static int _get_block(struct zuf_sb_info *sbi, struct zuf_inode_info *zii,
+		      int rw, ulong index, struct zufs_ioc_get_block *get_block)
+{
+	get_block->hdr.operation = ZUS_OP_GET_BLOCK;
+
+	get_block->hdr.in_len = sizeof(*get_block); /* FIXME */
+	get_block->hdr.out_start = 0; /* FIXME */
+	get_block->hdr.out_len = sizeof(*get_block); /* FIXME */
+
+	get_block->zus_ii = zii->zus_ii;
+	get_block->index = index;
+	get_block->rw = rw;
+
+	return zufs_dispatch(ZUF_ROOT(sbi), &get_block->hdr, NULL, 0);
+}
+
+static int zuf_write_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
+			   bool pfn_mkwrite)
+{
+	struct inode *inode = vma->vm_file->f_mapping->host;
+	struct zuf_sb_info *sbi = SBI(inode->i_sb);
+	struct zuf_inode_info *zii = ZUII(inode);
+	struct zus_inode *zi = zii->zi;
+	struct zufs_ioc_get_block get_block = {};
+	int fault = VM_FAULT_SIGBUS;
+	pgoff_t size;
+	ulong pfn;
+	int err;
+
+	zuf_dbg_mmap("[%ld] vm_start=0x%lx vm_end=0x%lx VA=0x%lx "
+		    "pgoff=0x%lx vmf_flags=0x%x cow_page=%p page=%p\n",
+		    _zi_ino(zi), vma->vm_start, vma->vm_end,
+		    vmf->address, vmf->pgoff, vmf->flags,
+		    vmf->cow_page, vmf->page);
+
+	if (unlikely(vmf->page && vmf->page != zii->zero_page)) {
+		zuf_err("[%ld] vm_start=0x%lx vm_end=0x%lx VA=0x%lx "
+			"pgoff=0x%lx vmf_flags=0x%x page=%p cow_page=%p\n",
+			_zi_ino(zi), vma->vm_start, vma->vm_end,
+			vmf->address, vmf->pgoff, vmf->flags,
+			vmf->page, vmf->cow_page);
+		return VM_FAULT_SIGBUS;
+	}
+
+	sb_start_pagefault(inode->i_sb);
+	zuf_smr_lock_pagefault(zii);
+
+	size = md_o2p_up(i_size_read(inode));
+	if (unlikely(vmf->pgoff >= size)) {
+		ulong pgoff = vma->vm_pgoff +
+					md_o2p((vmf->address - vma->vm_start));
+
+		zuf_err("[%ld] pgoff(0x%lx)(0x%lx) >= size(0x%lx) => SIGBUS\n",
+			 _zi_ino(zi), vmf->pgoff, pgoff, size);
+
+		fault = VM_FAULT_SIGBUS;
+		goto out;
+	}
+
+	if (vmf->cow_page) {
+		zuf_warn("cow is write\n");
+		fault = _cow_private_page(vma, vmf);
+		goto out;
+	}
+
+	zus_inode_cmtime_now(inode, zi);
+	/* NOTE: zus needs to flush the zi */
+
+	err = _get_block(sbi, zii, WRITE, vmf->pgoff, &get_block);
+	if (unlikely(err)) {
+		zuf_err("crap => %d\n", err);
+		goto out;
+	}
+
+	if (get_block.ret_flags & ZUFS_GBF_NEW) {
+		/* newly created block */
+		unmap_mapping_range(inode->i_mapping, vmf->pgoff << PAGE_SHIFT,
+				    PAGE_SIZE, 0);
+	} else if (pfn_mkwrite) {
+		/* If the block did not change just tell mm to flip
+		 * the write bit
+		 */
+		fault = VM_FAULT_WRITE;
+		goto out;
+	}
+
+	pfn = md_pfn(sbi->md, get_block.pmem_bn);
+	err = vm_insert_mixed_mkwrite(vma, vmf->address,
+			      phys_to_pfn_t(PFN_PHYS(pfn), PFN_MAP | PFN_DEV));
+	if (unlikely(err)) {
+		zuf_err("crap => %d\n", err);
+		goto out;
+	}
+
+	zuf_dbg_mmap("[%ld] vm_insert_mixed 0x%lx prot=0x%lx => %d\n",
+		    _zi_ino(zi), pfn, vma->vm_page_prot.pgprot, err);
+
+	zuf_sync_inc(inode);
+
+	fault = VM_FAULT_NOPAGE;
+out:
+	zuf_smr_unlock(zii);
+	sb_end_pagefault(inode->i_sb);
+	return fault;
+}
+
+static int zuf_pfn_mkwrite(struct vm_fault *vmf)
+{
+	return zuf_write_fault(vmf->vma, vmf, true);
+}
+
+static int zuf_read_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct inode *inode = vma->vm_file->f_mapping->host;
+	struct zuf_sb_info *sbi = SBI(inode->i_sb);
+	struct zuf_inode_info *zii = ZUII(inode);
+	struct zus_inode *zi = zii->zi;
+	struct zufs_ioc_get_block get_block = {};
+	int fault = VM_FAULT_SIGBUS;
+	pgoff_t size;
+	ulong pfn;
+	int err;
+
+	zuf_dbg_mmap("[%ld] vm_start=0x%lx vm_end=0x%lx VA=0x%lx "
+		    "pgoff=0x%lx vmf_flags=0x%x cow_page=%p page=%p\n",
+		    _zi_ino(zi), vma->vm_start, vma->vm_end,
+		    vmf->address, vmf->pgoff, vmf->flags,
+		    vmf->cow_page, vmf->page);
+
+	zuf_smr_lock_pagefault(zii);
+
+	size = md_o2p_up(i_size_read(inode));
+	if (unlikely(vmf->pgoff >= size)) {
+		ulong pgoff = vma->vm_pgoff +
+					md_o2p((vmf->address - vma->vm_start));
+
+		zuf_err("[%ld] pgoff(0x%lx)(0x%lx) >= size(0x%lx) => SIGBUS\n",
+			 _zi_ino(zi), vmf->pgoff, pgoff, size);
+		goto out;
+	}
+
+	if (vmf->cow_page) {
+		zuf_warn("cow is read\n");
+		fault = _cow_private_page(vma, vmf);
+		goto out;
+	}
+
+	file_accessed(vma->vm_file);
+	/* NOTE: zus needs to flush the zi */
+
+	err = _get_block(sbi, zii, READ, vmf->pgoff, &get_block);
+	if (unlikely(err)) {
+		zuf_err("crap => %d\n", err);
+		goto out;
+	}
+
+	if (get_block.pmem_bn == 0) {
+		/* Hole in file */
+		err = _rw_init_zero_page(zii);
+		if (unlikely(err))
+			goto out;
+
+		err = vm_insert_page(vma, vmf->address, zii->zero_page);
+		zuf_dbg_mmap("[%ld] inserted zero\n", _zi_ino(zi));
+
+		/* NOTE: we are fooling mm, we do not need this page
+		 * to be locked and get(ed)
+		 */
+		fault = VM_FAULT_NOPAGE;
+		goto out;
+	}
+
+	/* We have a real page */
+	pfn = md_pfn(sbi->md, get_block.pmem_bn);
+	err = vm_insert_mixed(vma, vmf->address,
+			      phys_to_pfn_t(PFN_PHYS(pfn), PFN_MAP | PFN_DEV));
+	if (unlikely(err)) {
+		zuf_err("[%ld] vm_insert_page/mixed => %d\n", _zi_ino(zi), err);
+		goto out;
+	}
+
+	zuf_dbg_mmap("[%ld] vm_insert_mixed 0x%lx prot=0x%lx => %d\n",
+		    _zi_ino(zi), pfn, vma->vm_page_prot.pgprot, err);
+
+	fault = VM_FAULT_NOPAGE;
+
+out:
+	zuf_smr_unlock(zii);
+	return fault;
+}
+
+static int zuf_fault(struct vm_fault *vmf)
+{
+	bool write_fault = (0 != (vmf->flags & FAULT_FLAG_WRITE));
+
+	if (write_fault)
+		return zuf_write_fault(vmf->vma, vmf, false);
+	else
+		return zuf_read_fault(vmf->vma, vmf);
+}
+
+static int zuf_page_mkwrite(struct vm_fault *vmf)
+{
+	struct vm_area_struct *vma = vmf->vma;
+	struct inode *inode = vma->vm_file->f_mapping->host;
+
+	/* our zero page doesn't really hold the correct offset to the file in
+	 * page->index so vmf->pgoff is incorrect, lets fix that
+	 */
+	vmf->pgoff = vma->vm_pgoff +
+				((vmf->address - vma->vm_start) >> PAGE_SHIFT);
+
+	zuf_dbg_mmap("[%ld] pgoff=0x%lx\n", inode->i_ino, vmf->pgoff);
+
+	/* call fault handler to get a real page for writing */
+	return zuf_write_fault(vma, vmf, false);
+}
+
+static void zuf_mmap_open(struct vm_area_struct *vma)
+{
+	struct zuf_inode_info *zii = ZUII(file_inode(vma->vm_file));
+
+	atomic_inc(&zii->vma_count);
+}
+
+static void zuf_mmap_close(struct vm_area_struct *vma)
+{
+	struct inode *inode = file_inode(vma->vm_file);
+	int vma_count = atomic_dec_return(&ZUII(inode)->vma_count);
+
+	if (unlikely(vma_count < 0))
+		zuf_err("[%ld] WHAT??? vma_count=%d\n",
+			 inode->i_ino, vma_count);
+	else if (unlikely(vma_count == 0))
+		/* TOZU _despatch_mmap_close(inode)
+		 * User-mode would like to know we have no more
+		 * mapping on this inode
+		 */
+		;
+}
+
+static const struct vm_operations_struct zuf_vm_ops = {
+	.fault		= zuf_fault,
+	.page_mkwrite	= zuf_page_mkwrite,
+	.pfn_mkwrite	= zuf_pfn_mkwrite,
+	.open           = zuf_mmap_open,
+	.close		= zuf_mmap_close,
+};
+
+int zuf_file_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct inode *inode = file_inode(file);
+	struct zuf_inode_info *zii = ZUII(inode);
+
+	file_accessed(file);
+
+	vma->vm_flags |= VM_MIXEDMAP;
+	vma->vm_ops = &zuf_vm_ops;
+
+	atomic_inc(&zii->vma_count);
+
+	zuf_dbg_vfs("[%ld] start=0x%lx end=0x%lx flags=0x%lx page_prot=0x%lx\n",
+		     file->f_mapping->host->i_ino, vma->vm_start, vma->vm_end,
+		     vma->vm_flags, pgprot_val(vma->vm_page_prot));
+
+	return 0;
+}
diff --git a/fs/zuf/rw.c b/fs/zuf/rw.c
new file mode 100644
index 0000000..ec7cd9b
--- /dev/null
+++ b/fs/zuf/rw.c
@@ -0,0 +1,167 @@
+/*
+ * BRIEF DESCRIPTION
+ *
+ * Read/Write operations.
+ *
+ * Copyright (c) 2018 NetApp Inc. All rights reserved.
+ *
+ * ZUFS-License: GPL-2.0 OR BSD-3-Clause. See module.c for LICENSE details.
+ *
+ * Authors:
+ *	Boaz Harrosh <boazh@xxxxxxxxxx>
+ */
+#include <linux/uio.h>
+
+#include "zuf.h"
+
+/* ~~~ Functions for read_iter ~~~ */
+
+static int _IO_dispatch(struct zuf_sb_info *sbi, struct zus_inode_info *zus_ii,
+			int operation, uint pgoffset, struct page **pages,
+			uint nump, u64 filepos, uint len)
+{
+	struct zufs_ioc_IO IO = {
+		.hdr.operation = operation,
+		.hdr.in_len = sizeof(IO),
+		.hdr.out_len = 0,
+		.hdr.offset = pgoffset,
+		.hdr.len = len,
+		.zus_ii = zus_ii,
+		.filepos = filepos,
+	};
+
+	return zufs_dispatch(ZUF_ROOT(sbi), &IO.hdr, pages, nump);
+}
+
+static ssize_t _zufs_IO(struct zuf_sb_info *sbi, struct inode *inode,
+			int operation, struct iov_iter *ii, loff_t pos)
+{
+	struct zuf_inode_info *zii = ZUII(inode);
+	int err = -EINVAL;
+	loff_t start_pos = pos;
+
+	while (iov_iter_count(ii)) {
+		struct page *pages[ZUS_API_MAP_MAX_PAGES];
+		ssize_t bytes;
+		size_t pgoffset;
+		uint nump, i;
+
+		bytes = iov_iter_get_pages(ii, pages, ZUS_API_MAP_MAX_SIZE,
+					   ZUS_API_MAP_MAX_PAGES, &pgoffset);
+		if (bytes < 0) {
+			err = bytes;
+			break;
+		}
+
+		nump = DIV_ROUND_UP(bytes + pgoffset, PAGE_SIZE);
+		err = _IO_dispatch(sbi, zii->zus_ii, operation, pgoffset, pages,
+				   nump, pos, bytes);
+
+		for (i = 0; i < nump; ++i)
+			put_page(pages[i]);
+
+		if (unlikely(err))
+			break;
+
+		iov_iter_advance(ii, bytes);
+		pos += bytes;
+	}
+
+	if (unlikely(pos == start_pos))
+		return err;
+	return pos - start_pos;
+}
+
+static ssize_t _read_iter(struct inode *inode, struct kiocb *kiocb,
+			  struct iov_iter *ii)
+{
+	struct super_block *sb = inode->i_sb;
+	ssize_t ret;
+
+	/* EOF protection */
+	if (unlikely(kiocb->ki_pos > i_size_read(inode)))
+		return 0;
+
+	iov_iter_truncate(ii, i_size_read(inode) - kiocb->ki_pos);
+	if (unlikely(!iov_iter_count(ii))) {
+		/* Don't let zero len reads have any effect */
+		zuf_dbg_rw("called with NULL len\n");
+		return 0;
+	}
+
+	ret = _zufs_IO(SBI(sb), inode, ZUS_OP_READ, ii, kiocb->ki_pos);
+	if (unlikely(ret < 0))
+		return ret;
+
+	kiocb->ki_pos += ret;
+	return ret;
+}
+
+ssize_t zuf_rw_read_iter(struct kiocb *kiocb, struct iov_iter *ii)
+{
+	struct inode *inode = file_inode(kiocb->ki_filp);
+	ssize_t ret;
+
+	zuf_dbg_vfs("[%ld] ppos=0x%llx len=0x%zx\n",
+		     inode->i_ino, kiocb->ki_pos, iov_iter_count(ii));
+
+	file_accessed(kiocb->ki_filp);
+	ret = _read_iter(inode, kiocb, ii);
+
+	zuf_dbg_vfs("[%ld] => 0x%lx\n", inode->i_ino, ret);
+	return ret;
+}
+
+/* ~~~ Functions for write_iter ~~~ */
+
+static ssize_t _write_iter(struct inode *inode, struct kiocb *kiocb,
+			  struct iov_iter *ii)
+{
+	ssize_t ret;
+
+	ret = _zufs_IO(SBI(inode->i_sb), inode, ZUS_OP_WRITE, ii,
+		       kiocb->ki_pos);
+	if (unlikely(ret < 0))
+		return ret;
+
+	kiocb->ki_pos += ret;
+	return ret;
+}
+
+static int _remove_privs_locked(struct inode *inode, struct file *file)
+{
+	int ret = file_remove_privs(file);
+
+	return ret;
+}
+
+ssize_t zuf_rw_write_iter(struct kiocb *kiocb, struct iov_iter *ii)
+{
+	struct inode *inode = file_inode(kiocb->ki_filp);
+	struct zuf_inode_info *zii = ZUII(inode);
+	ssize_t ret;
+
+	zuf_dbg_vfs("[%ld] ppos=0x%llx len=0x%zx\n",
+		     inode->i_ino, kiocb->ki_pos, iov_iter_count(ii));
+
+	ret = generic_write_checks(kiocb, ii);
+	if (unlikely(ret < 0))
+		goto out;
+
+	ret = _remove_privs_locked(inode, kiocb->ki_filp);
+	if (unlikely(ret < 0))
+		goto out;
+
+	zus_inode_cmtime_now(inode, zii->zi);
+	ret = _write_iter(inode, kiocb, ii);
+
+	if (kiocb->ki_pos > i_size_read(inode))
+		i_size_write(inode, kiocb->ki_pos);
+
+	inode->i_blocks = le64_to_cpu(zii->zi->i_blocks);
+
+out:
+
+	zuf_dbg_vfs("[%ld] => 0x%lx\n", inode->i_ino, ret);
+	return ret;
+}
diff --git a/fs/zuf/zus_api.h b/fs/zuf/zus_api.h
index 5870d63..90b34e4 100644
--- a/fs/zuf/zus_api.h
+++ b/fs/zuf/zus_api.h
@@ -529,6 +529,32 @@ static inline bool zufs_zde_emit(struct zufs_readdir_iter *rdi, __u64 ino,
 	return true;
 }
 
+/* ZUS_OP_READ/ZUS_OP_WRITE */
+struct zufs_ioc_IO {
+	struct zufs_ioc_hdr hdr;
+	struct zus_inode_info *zus_ii; /* IN */
+
+	__u64 filepos;
+};
+
+enum {
+	ZUFS_GBF_RESERVED = 1,
+	ZUFS_GBF_NEW = 2,
+};
+
+/* ZUS_OP_GET_BLOCK */
+struct zufs_ioc_get_block {
+	struct zufs_ioc_hdr hdr;
+	 /* IN */
+	struct zus_inode_info *zus_ii;
+	__u64 index; /* page index in file */
+	__u64 rw; /* Some flags + READ or WRITE */
+
+	/* OUT */
+	zu_dpp_t pmem_bn; /* zero return means: map a hole */
+	__u64 ret_flags;  /* One of ZUFS_GBF_XXX */
+};
+
 /* ZUS_OP_GET_SYMLINK */
 struct zufs_ioc_get_link {
 	struct zufs_ioc_hdr hdr;
-- 
2.5.5





[Index of Archives]     [Linux Ext4 Filesystem]     [Union Filesystem]     [Filesystem Testing]     [Ceph Users]     [Ecryptfs]     [AutoFS]     [Kernel Newbies]     [Share Photos]     [Security]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux Cachefs]     [Reiser Filesystem]     [Linux RAID]     [Samba]     [Device Mapper]     [CEPH Development]

  Powered by Linux