This introduces a new dma device that provides a single ioctl call that provides DMA read and write functionality to the user space. Signed-off-by: Dmitry Kalinkin <dmitry.kalinkin@xxxxxxxxx> Cc: Igor Alekseev <igor.alekseev@xxxxxxx> --- In the last reply Martyn suggested a rework of this to make it use existing bus/vme/ctl instead of creating a new bus/vme/dma%i device and also dynamically allocate a dma resource in each request. I think this doesn't need those adjustments. I think that dynamic allocation doesn't solve any practical problem that isn't caused by current kernel api. I also think that separate device is a good feature because it allows for discovery of dma capatibility from userspace. The interface with separate chardev also allows to provide DMA read() and write() syscalls that can come handy in pair with /bin/dd. --- drivers/staging/vme/devices/vme_user.c | 201 ++++++++++++++++++++++++++++++++- drivers/staging/vme/devices/vme_user.h | 11 ++ 2 files changed, 209 insertions(+), 3 deletions(-) diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c index 9cca97a..5cc782e 100644 --- a/drivers/staging/vme/devices/vme_user.c +++ b/drivers/staging/vme/devices/vme_user.c @@ -79,15 +79,18 @@ static unsigned int bus_num; * We shall support 4 masters and 4 slaves with this driver. */ #define VME_MAJOR 221 /* VME Major Device Number */ -#define VME_DEVS 9 /* Number of dev entries */ +#define VME_DEVS 10 /* Number of dev entries */ #define MASTER_MINOR 0 #define MASTER_MAX 3 #define SLAVE_MINOR 4 #define SLAVE_MAX 7 #define CONTROL_MINOR 8 +#define DMA_MINOR 9 -#define PCI_BUF_SIZE 0x20000 /* Size of one slave image buffer */ +#define PCI_BUF_SIZE 0x20000 /* Size of one slave image buffer */ + +#define VME_MAX_DMA_LEN 0x4000000 /* Maximal DMA transfer length */ /* * Structure to handle image related parameters. @@ -112,7 +115,7 @@ static const int type[VME_DEVS] = { MASTER_MINOR, MASTER_MINOR, MASTER_MINOR, MASTER_MINOR, SLAVE_MINOR, SLAVE_MINOR, SLAVE_MINOR, SLAVE_MINOR, - CONTROL_MINOR + CONTROL_MINOR, DMA_MINOR }; struct vme_user_vma_priv { @@ -343,6 +346,168 @@ static loff_t vme_user_llseek(struct file *file, loff_t off, int whence) return -EINVAL; } +static int vme_user_sg_to_dma_list(const struct vme_dma_op *dma_op, + struct sg_table *sgt, + int sg_count, struct vme_dma_list *dma_list) +{ + ssize_t pos = 0; + struct scatterlist *sg; + int i, ret; + + for_each_sg(sgt->sgl, sg, sg_count, i) { + struct vme_dma_attr *pci_attr, *vme_attr, *src, *dest; + dma_addr_t hw_address = sg_dma_address(sg); + unsigned int hw_len = sg_dma_len(sg); + + vme_attr = vme_dma_vme_attribute(dma_op->vme_addr + pos, + dma_op->aspace, + dma_op->cycle, + dma_op->dwidth); + if (!vme_attr) + return -ENOMEM; + + pci_attr = vme_dma_pci_attribute(hw_address); + if (!pci_attr) { + vme_dma_free_attribute(vme_attr); + return -ENOMEM; + } + + switch (dma_op->dir) { + case VME_DMA_MEM_TO_VME: + src = pci_attr; + dest = vme_attr; + break; + case VME_DMA_VME_TO_MEM: + src = vme_attr; + dest = pci_attr; + break; + } + + ret = vme_dma_list_add(dma_list, src, dest, hw_len); + + /* + * XXX VME API doesn't mention whether we should keep + * attributes around + */ + vme_dma_free_attribute(vme_attr); + vme_dma_free_attribute(pci_attr); + + if (ret) + return ret; + + pos += hw_len; + } + + return 0; +} + +static enum dma_data_direction vme_dir_to_dma_dir(unsigned vme_dir) +{ + switch (vme_dir) { + case VME_DMA_VME_TO_MEM: + return DMA_FROM_DEVICE; + case VME_DMA_MEM_TO_VME: + return DMA_TO_DEVICE; + } + + return DMA_NONE; +} + +static ssize_t vme_user_dma_ioctl(unsigned int minor, + const struct vme_dma_op *dma_op) +{ + unsigned int offset = offset_in_page(dma_op->buf_vaddr); + unsigned long nr_pages; + enum dma_data_direction dir; + struct vme_dma_list *dma_list; + struct sg_table *sgt = NULL; + struct page **pages = NULL; + long got_pages; + ssize_t count; + int retval, sg_count; + + /* Prevent WARN from dma_map_sg */ + if (dma_op->count == 0) + return 0; + + /* + * This is a voluntary limit to prevent huge allocation for pages + * array. VME_MAX_DMA_LEN is not a fundamental VME constraint. + */ + count = min_t(size_t, dma_op->count, VME_MAX_DMA_LEN); + nr_pages = (offset + count + PAGE_SIZE - 1) >> PAGE_SHIFT; + + dir = vme_dir_to_dma_dir(dma_op->dir); + if (dir == DMA_NONE) + return -EINVAL; + + pages = kmalloc_array(nr_pages, sizeof(pages[0]), GFP_KERNEL); + if (!pages) { + retval = -ENOMEM; + goto free; + } + + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) { + retval = -ENOMEM; + goto free; + } + + dma_list = vme_new_dma_list(image[minor].resource); + if (!dma_list) { + retval = -ENOMEM; + goto free; + } + + got_pages = get_user_pages_fast(dma_op->buf_vaddr, nr_pages, + dir == DMA_FROM_DEVICE, pages); + if (got_pages != nr_pages) { + pr_debug("Not all pages were pinned\n"); + retval = (got_pages < 0) ? got_pages : -EFAULT; + goto release_pages; + } + + retval = sg_alloc_table_from_pages(sgt, pages, nr_pages, + offset, count, GFP_KERNEL); + if (retval) + goto release_pages; + + sg_count = dma_map_sg(vme_user_bridge->dev.parent, + sgt->sgl, sgt->nents, dir); + if (!sg_count) { + pr_debug("DMA mapping error\n"); + retval = -EFAULT; + goto free_sgt; + } + + retval = vme_user_sg_to_dma_list(dma_op, sgt, sg_count, dma_list); + if (retval) + goto dma_unmap; + + retval = vme_dma_list_exec(dma_list); + +dma_unmap: + dma_unmap_sg(vme_user_bridge->dev.parent, sgt->sgl, sgt->nents, dir); + +free_sgt: + sg_free_table(sgt); + +release_pages: + if (got_pages > 0) + release_pages(pages, got_pages, 0); + + vme_dma_list_free(dma_list); + +free: + kfree(sgt); + kfree(pages); + + if (retval) + return retval; + + return count; +} + /* * The ioctls provided by the old VME access method (the one at vmelinux.org) * are most certainly wrong as the effectively push the registers layout @@ -359,6 +524,7 @@ static int vme_user_ioctl(struct inode *inode, struct file *file, struct vme_master master; struct vme_slave slave; struct vme_irq_id irq_req; + struct vme_dma_op dma_op; unsigned long copied; unsigned int minor = MINOR(inode->i_rdev); int retval; @@ -467,6 +633,19 @@ static int vme_user_ioctl(struct inode *inode, struct file *file, break; } break; + case DMA_MINOR: + switch (cmd) { + case VME_DO_DMA: + copied = copy_from_user(&dma_op, argp, + sizeof(dma_op)); + if (copied != 0) { + pr_warn("Partial copy from userspace\n"); + return -EFAULT; + } + + return vme_user_dma_ioctl(minor, &dma_op); + } + break; } return -EINVAL; @@ -678,6 +857,15 @@ static int vme_user_probe(struct vme_dev *vdev) } } + image[DMA_MINOR].resource = vme_dma_request(vme_user_bridge, + VME_DMA_VME_TO_MEM | VME_DMA_MEM_TO_VME); + if (!image[DMA_MINOR].resource) { + dev_warn(&vdev->dev, + "Unable to allocate dma resource\n"); + err = -ENOMEM; + goto err_master; + } + /* Create sysfs entries - on udev systems this creates the dev files */ vme_user_sysfs_class = class_create(THIS_MODULE, driver_name); if (IS_ERR(vme_user_sysfs_class)) { @@ -700,6 +888,9 @@ static int vme_user_probe(struct vme_dev *vdev) case SLAVE_MINOR: name = "bus/vme/s%d"; break; + case DMA_MINOR: + name = "bus/vme/dma0"; + break; default: err = -EINVAL; goto err_sysfs; @@ -724,6 +915,8 @@ err_sysfs: } class_destroy(vme_user_sysfs_class); + vme_dma_free(image[DMA_MINOR].resource); + /* Ensure counter set correcty to unalloc all master windows */ i = MASTER_MAX + 1; err_master: @@ -764,6 +957,8 @@ static int vme_user_remove(struct vme_dev *dev) } class_destroy(vme_user_sysfs_class); + vme_dma_free(image[DMA_MINOR].resource); + for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) { kfree(image[i].kern_buf); vme_master_free(image[i].resource); diff --git a/drivers/staging/vme/devices/vme_user.h b/drivers/staging/vme/devices/vme_user.h index b8cc7bc..252b1c9 100644 --- a/drivers/staging/vme/devices/vme_user.h +++ b/drivers/staging/vme/devices/vme_user.h @@ -48,11 +48,22 @@ struct vme_irq_id { __u8 statid; }; +struct vme_dma_op { + __u64 vme_addr; /* Starting Address on the VMEbus */ + __u64 buf_vaddr; /* Pointer to userspace memory */ + __u32 count; /* Count of bytes to copy */ + __u32 aspace; /* Address Space */ + __u32 cycle; /* Cycle properties */ + __u32 dwidth; /* Data transfer width */ + __u32 dir; /* Transfer Direction */ +}; + #define VME_GET_SLAVE _IOR(VME_IOC_MAGIC, 1, struct vme_slave) #define VME_SET_SLAVE _IOW(VME_IOC_MAGIC, 2, struct vme_slave) #define VME_GET_MASTER _IOR(VME_IOC_MAGIC, 3, struct vme_master) #define VME_SET_MASTER _IOW(VME_IOC_MAGIC, 4, struct vme_master) #define VME_IRQ_GEN _IOW(VME_IOC_MAGIC, 5, struct vme_irq_id) +#define VME_DO_DMA _IOW(VME_IOC_MAGIC, 7, struct vme_dma_op) #endif /* _VME_USER_H_ */ -- 1.8.3.1 _______________________________________________ devel mailing list devel@xxxxxxxxxxxxxxxxxxxxxx http://driverdev.linuxdriverproject.org/mailman/listinfo/driverdev-devel