[RFC PATCH 1/3] staging, zsmalloc: introduce zs_mem_[read/write]

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



If object is on boundary of page, zs_map_object() copy content of object
to pre-allocated page and return virtual address of
this pre-allocated page. If user inform zsmalloc of memcpy region,
we can avoid this copy overhead.
This patch implement two API and these get information of memcpy region.
Using this information, we can do memcpy without overhead.

For USE_PGTABLE_MAPPING case, we can avoid flush cache and tlb overhead
via these API.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx>
---
These are [RFC] patches, because I don't test and
I don't have test environment, yet. Just compile test done.
If there is positive comment, I will setup test env and check correctness.
These are based on v3.8-rc3.
If rebase is needed, please notify me what tree I should rebase.

diff --git a/drivers/staging/zsmalloc/zsmalloc-main.c b/drivers/staging/zsmalloc/zsmalloc-main.c
index 09a9d35..e3ef5a5 100644
--- a/drivers/staging/zsmalloc/zsmalloc-main.c
+++ b/drivers/staging/zsmalloc/zsmalloc-main.c
@@ -1045,6 +1045,118 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
 }
 EXPORT_SYMBOL_GPL(zs_unmap_object);
 
+void zs_mem_read(struct zs_pool *pool, unsigned long handle,
+			void *dest, unsigned long src_off, size_t n)
+{
+	struct page *page;
+	unsigned long obj_idx, off;
+
+	unsigned int class_idx;
+	enum fullness_group fg;
+	struct size_class *class;
+	struct page *pages[2];
+	int sizes[2];
+	void *addr;
+
+	BUG_ON(!handle);
+
+	/*
+	 * Because we use per-cpu mapping areas shared among the
+	 * pools/users, we can't allow mapping in interrupt context
+	 * because it can corrupt another users mappings.
+	 */
+	BUG_ON(in_interrupt());
+
+	obj_handle_to_location(handle, &page, &obj_idx);
+	get_zspage_mapping(get_first_page(page), &class_idx, &fg);
+	class = &pool->size_class[class_idx];
+	off = obj_idx_to_offset(page, obj_idx, class->size);
+	off += src_off;
+
+	BUG_ON(class->size < n);
+
+	if (off + n <= PAGE_SIZE) {
+		/* this object is contained entirely within a page */
+		addr = kmap_atomic(page);
+		memcpy(dest, addr + off, n);
+		kunmap_atomic(addr);
+		return;
+	}
+
+	/* this object spans two pages */
+	pages[0] = page;
+	pages[1] = get_next_page(page);
+	BUG_ON(!pages[1]);
+
+	sizes[0] = PAGE_SIZE - off;
+	sizes[1] = n - sizes[0];
+
+	addr = kmap_atomic(pages[0]);
+	memcpy(dest, addr + off, sizes[0]);
+	kunmap_atomic(addr);
+
+	addr = kmap_atomic(pages[1]);
+	memcpy(dest + sizes[0], addr, sizes[1]);
+	kunmap_atomic(addr);
+}
+EXPORT_SYMBOL_GPL(zs_mem_read);
+
+void zs_mem_write(struct zs_pool *pool, unsigned long handle,
+			const void *src, unsigned long dest_off, size_t n)
+{
+	struct page *page;
+	unsigned long obj_idx, off;
+
+	unsigned int class_idx;
+	enum fullness_group fg;
+	struct size_class *class;
+	struct page *pages[2];
+	int sizes[2];
+	void *addr;
+
+	BUG_ON(!handle);
+
+	/*
+	 * Because we use per-cpu mapping areas shared among the
+	 * pools/users, we can't allow mapping in interrupt context
+	 * because it can corrupt another users mappings.
+	 */
+	BUG_ON(in_interrupt());
+
+	obj_handle_to_location(handle, &page, &obj_idx);
+	get_zspage_mapping(get_first_page(page), &class_idx, &fg);
+	class = &pool->size_class[class_idx];
+	off = obj_idx_to_offset(page, obj_idx, class->size);
+	off += dest_off;
+
+	BUG_ON(class->size < n);
+
+	if (off + n <= PAGE_SIZE) {
+		/* this object is contained entirely within a page */
+		addr = kmap_atomic(page);
+		memcpy(addr + off, src, n);
+		kunmap_atomic(addr);
+		return;
+	}
+
+	/* this object spans two pages */
+	pages[0] = page;
+	pages[1] = get_next_page(page);
+	BUG_ON(!pages[1]);
+
+	sizes[0] = PAGE_SIZE - off;
+	sizes[1] = n - sizes[0];
+
+	addr = kmap_atomic(pages[0]);
+	memcpy(addr + off, src, sizes[0]);
+	kunmap_atomic(addr);
+
+	addr = kmap_atomic(pages[1]);
+	memcpy(addr, src + sizes[0], sizes[1]);
+	kunmap_atomic(addr);
+}
+EXPORT_SYMBOL_GPL(zs_mem_write);
+
 u64 zs_get_total_size_bytes(struct zs_pool *pool)
 {
 	int i;
diff --git a/drivers/staging/zsmalloc/zsmalloc.h b/drivers/staging/zsmalloc/zsmalloc.h
index de2e8bf..fb70f00 100644
--- a/drivers/staging/zsmalloc/zsmalloc.h
+++ b/drivers/staging/zsmalloc/zsmalloc.h
@@ -38,6 +38,11 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
 			enum zs_mapmode mm);
 void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
 
+void zs_mem_read(struct zs_pool *pool, unsigned long handle,
+			void *dest, unsigned long src_off, size_t n);
+void zs_mem_write(struct zs_pool *pool, unsigned long handle,
+			const void *src, unsigned long dest_off, size_t n);
+
 u64 zs_get_total_size_bytes(struct zs_pool *pool);
 
 #endif
-- 
1.7.9.5

_______________________________________________
devel mailing list
devel@xxxxxxxxxxxxxxxxxxxxxx
http://driverdev.linuxdriverproject.org/mailman/listinfo/devel


[Index of Archives]     [Linux Driver Backports]     [DMA Engine]     [Linux GPIO]     [Linux SPI]     [Video for Linux]     [Linux USB Devel]     [Linux Coverity]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [Yosemite Backpacking]
  Powered by Linux