Introduces a new f_mtp.c function driver that allows implementation of Media Transfer Protocol stack via userspace. It uses a dedicated character driver u_mtp to implement the user space file i/o interface. mtp provides a glue between USB function endpoints on one side and character device on the other. The user space can read and write to the USB function's bulk OUT and bulk IN endpoints by reading/writing the character device. The USB function character devices will be available at /dev/gmtp0...N Signed-off-by: Arnaud Mandy <ext-arnaud.2.mandy@xxxxxxxxx> --- drivers/usb/gadget/f_mtp.c | 2064 ++++++++++++++++++++++++++++++++++++++++++++ 1 files changed, 2064 insertions(+), 0 deletions(-) create mode 100644 drivers/usb/gadget/f_mtp.c diff --git a/drivers/usb/gadget/f_mtp.c b/drivers/usb/gadget/f_mtp.c new file mode 100644 index 0000000..e49364d --- /dev/null +++ b/drivers/usb/gadget/f_mtp.c @@ -0,0 +1,2064 @@ +/* + * f_mtp.c -- USB MTP Function Driver + * + * Copyright (C) 2009 Nokia Corporation + * Contact: Roger Quadros <roger.quadros at nokia.com> + * + * Based on f_obex.c by Felipe Balbi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/utsname.h> +#include <linux/uaccess.h> +#include <linux/ioctl.h> +#include <linux/usb/ptp.h> +#include <linux/list.h> +#include <linux/vmalloc.h> +#include <linux/interrupt.h> +#include <linux/kfifo.h> +#include <linux/poll.h> +#include "gadget_chips.h" + +/*---------------------------------------------------------------------------*/ +/* mtp definitions */ + +/* Number of USB requests that can be queued at a time */ +#define MTP_QUEUE_SIZE 4 + +/* size in bytes of RX and TX FIFOs */ +#define MTP_BUF_SIZE 65536 + +static unsigned queue_size = MTP_QUEUE_SIZE; +module_param(queue_size, uint, 0); +MODULE_PARM_DESC(queue_size, "Number of USB requests to queue at a time. Default 4"); + +static unsigned buflen = MTP_BUF_SIZE; +module_param(buflen, uint, 0); +MODULE_PARM_DESC(buflen, "kfifo buffer size. Default 65536"); + +enum mtp_buf_state { + BUF_EMPTY = 0, + BUF_FULL, + BUF_BUSY, +}; + +struct mtp_buf { + struct usb_request *r; + bool busy; + struct mtp_buf *next; + enum mtp_buf_state state; +}; + +struct mtp_ep_desc { + struct usb_endpoint_descriptor *mtp_in; + struct usb_endpoint_descriptor *mtp_out; + struct usb_endpoint_descriptor *mtp_int; +}; + +/* --------------------------------------------------------------------------*/ +/* f_mtp definitions */ +#define MAX_STATUS_DATA_SIZE (PTP_MAX_STATUS_SIZE - 4) + +/* device status cache */ +struct device_status { + u16 length; + u16 code; + u8 data[MAX_STATUS_DATA_SIZE]; +}; + +/* Class specific control request buffer */ +struct ctrl_request { + bool valid; + u16 length; + u8 data[PTP_MAX_CONTROL_SIZE]; +}; + +struct f_mtp { + struct usb_gadget *gadget; + struct usb_function func; + + struct usb_ep *ep_out; + struct usb_ep *ep_in; + struct usb_ep *ep_notify; + + struct mtp_ep_desc in_use; + struct mtp_ep_desc fs; + struct mtp_ep_desc hs; + + int usb_speed; + + struct usb_composite_dev *cdev; + u8 ctrl_id; + u8 mtp_id; + u8 connected; + struct device_status dev_status; + struct ctrl_request ctrl_req; + spinlock_t lock; +}; + +/*---------------------------------------------------------------------------*/ + +static inline struct f_mtp *func_to_mtp(struct usb_function *f) +{ + return container_of(f, struct f_mtp, func); +} + +/* + * USB String Descriptors + */ + +static struct usb_string mtp_string_defs[] = { + { 0, "MTP" ,}, + { /* ZEROES END LIST */ }, +}; + +static struct usb_gadget_strings mtp_string_table = { + .language = 0x0409, /* en-US */ + .strings = mtp_string_defs, +}; + +static struct usb_gadget_strings *mtp_strings[] = { + &mtp_string_table, + NULL, +}; + +/* + * USB Interface Descriptors + */ + +static struct usb_interface_descriptor mtp_intf __initdata = { + .bLength = sizeof(mtp_intf), + .bDescriptorType = USB_DT_INTERFACE, + .bAlternateSetting = 0, + .bNumEndpoints = 3, + .bInterfaceClass = USB_CLASS_STILL_IMAGE, + .bInterfaceSubClass = USB_SUBCLASS_PTP, + .bInterfaceProtocol = USB_PROTOCOL_PTP, +}; + +/* + * USB Endpoint Descriptors + */ + +/* High speed support */ +static struct usb_endpoint_descriptor mtp_ep_hs_in_desc __initdata = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(PTP_HS_DATA_PKT_SIZE), + .bInterval = 0, +}; + + +static struct usb_endpoint_descriptor mtp_ep_hs_out_desc __initdata = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(PTP_HS_DATA_PKT_SIZE), + .bInterval = 0, +}; + + +static struct usb_endpoint_descriptor mtp_ep_hs_int_desc __initdata = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = __constant_cpu_to_le16(PTP_HS_EVENT_PKT_SIZE), + .bInterval = 12, +}; + +static struct usb_descriptor_header *mtp_hs_function[] __initdata = { + (struct usb_descriptor_header *) &mtp_intf, + (struct usb_descriptor_header *) &mtp_ep_hs_in_desc, + (struct usb_descriptor_header *) &mtp_ep_hs_out_desc, + (struct usb_descriptor_header *) &mtp_ep_hs_int_desc, + NULL, +}; + +/* Full speed support */ +static struct usb_endpoint_descriptor mtp_ep_fs_in_desc __initdata = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(PTP_FS_DATA_PKT_SIZE), + .bInterval = 0, +}; + +static struct usb_endpoint_descriptor mtp_ep_fs_out_desc __initdata = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = __constant_cpu_to_le16(PTP_FS_DATA_PKT_SIZE), + .bInterval = 0, +}; + +static struct usb_endpoint_descriptor mtp_ep_fs_int_desc __initdata = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = __constant_cpu_to_le16(PTP_FS_EVENT_PKT_SIZE), + .bInterval = 255, +}; + +static struct usb_descriptor_header *mtp_fs_function[] __initdata = { + (struct usb_descriptor_header *) &mtp_intf, + (struct usb_descriptor_header *) &mtp_ep_fs_in_desc, + (struct usb_descriptor_header *) &mtp_ep_fs_out_desc, + (struct usb_descriptor_header *) &mtp_ep_fs_int_desc, + NULL, +}; + +/*---------------------------------------------------------------------------*/ +/* mtp - USB character device glue + * It was decided to that mtp should have its own character driver + * because of the existence of u_serial + */ + +/*----------------USB glue----------------------------------*/ +/* + * mtp_alloc_req + * + * Allocate a usb_request and its buffer. Returns a pointer to the + * usb_request or NULL if there is an error. + */ +struct usb_request * +mtp_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags) +{ + struct usb_request *req; + + req = usb_ep_alloc_request(ep, kmalloc_flags); + + if (req != NULL) { + req->length = len; + req->buf = kmalloc(len, kmalloc_flags); + if (req->buf == NULL) { + usb_ep_free_request(ep, req); + return NULL; + } + } + + return req; +} + +/* + * mtp_free_req + * + * Free a usb_request and its buffer. + */ +void mtp_free_req(struct usb_ep *ep, struct usb_request *req) +{ + kfree(req->buf); + usb_ep_free_request(ep, req); +} + +static int mtp_alloc_requests(struct usb_ep *ep, struct mtp_buf *queue, + void (*fn)(struct usb_ep *, struct usb_request *)) +{ + int i; + struct usb_request *req; + + /* Pre-allocate up to queue_size transfers, but if we can't + * do quite that many this time, don't fail ... we just won't + * be as speedy as we might otherwise be. + */ + for (i = 0; i < MTP_QUEUE_SIZE; i++) { + req = mtp_alloc_req(ep, PAGE_SIZE, GFP_ATOMIC); + if (!req) + break; + req->complete = fn; + queue[i].r = req; + queue[i].busy = 0; + queue[i].state = BUF_EMPTY; + queue[i].next = &queue[i+1]; + } + if (i == 0) + return -ENOMEM; + queue[--i].next = &queue[0]; + return 0; +} + +static void mtp_free_requests(struct usb_ep *ep, struct mtp_buf *queue) +{ + struct usb_request *req; + int i; + + for (i = 0; i < MTP_QUEUE_SIZE; i++) { + req = queue[i].r; + if (!req) + break; + mtp_free_req(ep, req); + queue[i].r = NULL; + } +} + +/*----------------------------------------------------------------*/ + +struct mtp_dev { + struct f_mtp *fmtp; + struct device *dev; /* Driver model state */ + spinlock_t lock; /* serialize access */ + int opened; /* indicates if device open */ + wait_queue_head_t close_wait; /* wait for device close */ + int index; /* device index */ + + wait_queue_head_t event_wait; /* wait for events */ + bool event; /* event/s available */ + + spinlock_t rx_lock; /* guard rx stuff */ + struct kfifo rx_fifo; + void *rx_fifo_buf; + struct tasklet_struct rx_task; + wait_queue_head_t rx_wait; /* wait for data in RX buf */ + unsigned int rx_queued; /* no. of queued requests */ + struct mtp_buf rx_queue[MTP_QUEUE_SIZE]; + struct mtp_buf *rx_next; + bool rx_cancel; + + spinlock_t tx_lock; /* guard tx stuff */ + struct kfifo tx_fifo; + void *tx_fifo_buf; + wait_queue_head_t tx_wait; /* wait for space in TX buf */ + unsigned int tx_flush:1; /* flush TX buf */ + wait_queue_head_t tx_flush_wait; + int tx_last_size; /*last tx packet's size*/ + struct tasklet_struct tx_task; + struct mtp_buf tx_queue[MTP_QUEUE_SIZE]; + unsigned int tx_queued; + struct mtp_buf *tx_next; + bool tx_cancel; +}; + +struct mtp_data { + struct mtp_dev *mtpdev; + u8 dev_busy; + struct class *class; + dev_t dev; + struct cdev chdev; + struct usb_gadget *gadget; +}; + +static struct mtp_data mtpdata; + +static void mtp_rx_complete(struct usb_ep *ep, struct usb_request *req); +static void mtp_tx_complete(struct usb_ep *ep, struct usb_request *req); +static int mtp_do_rx(struct mtp_dev *mtp); +static long mtp_ioctl(struct file *filp, unsigned code, unsigned long value); + +/*----------some more USB glue---------------------------*/ + +/* OUT complete, we have new data to read */ +static void mtp_rx_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct mtp_dev *mtp = ep->driver_data; + unsigned long flags; + int i; + struct mtp_buf *queue = req->context; + + spin_lock_irqsave(&mtp->rx_lock, flags); + + /* put received data into RX ring buffer */ + /* we assume enough space is there in RX buffer for this request + * the checking should be done in mtp_do_rx() before this request + * was queued */ + switch (req->status) { + case 0: + /* normal completion */ + i = kfifo_in(&mtp->rx_fifo, req->buf, req->actual); + if (i != req->actual) { + WARN(1, KERN_ERR "%s: PUT(%d) != actual(%d) data " + "loss possible. rx_queued = %d\n", __func__, i, + req->actual, mtp->rx_queued); + } + dev_vdbg(mtp->dev, + "%s: rx len=%d, 0x%02x 0x%02x 0x%02x ...\n", __func__, + req->actual, *((u8 *)req->buf), + *((u8 *)req->buf+1), *((u8 *)req->buf+2)); + + /* wake up rx_wait */ + wake_up_interruptible(&mtp->rx_wait); + break; + case -ESHUTDOWN: + /* disconnect */ + dev_warn(mtp->dev, "%s: %s shutdown\n", __func__, ep->name); + break; + default: + /* presumably a transient fault */ + dev_warn(mtp->dev, "%s: unexpected %s status %d\n", + __func__, ep->name, req->status); + break; + } + + mtp->rx_queued--; + queue->busy = false; + queue->state = BUF_EMPTY; + spin_unlock_irqrestore(&mtp->rx_lock, flags); + if (!mtp->rx_cancel) + tasklet_schedule(&mtp->rx_task); +} + +static int mtp_do_tx(struct mtp_dev *mtp); +/* IN complete, i.e. USB write complete. we can free buffer */ +static void mtp_tx_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct mtp_dev *mtp = ep->driver_data; + unsigned long flags; + struct mtp_buf *queue = req->context; + + spin_lock_irqsave(&mtp->tx_lock, flags); + queue->busy = false; + queue->state = BUF_EMPTY; + spin_unlock_irqrestore(&mtp->tx_lock, flags); + + switch (req->status) { + case 0: + /* normal completion, queue next request */ + if (!mtp->tx_cancel) + tasklet_schedule(&mtp->tx_task); + break; + case -ESHUTDOWN: + /* disconnect */ + dev_warn(mtp->dev, "%s: %s shutdown\n", __func__, ep->name); + break; + default: + /* presumably a transient fault */ + dev_warn(mtp->dev, "%s: unexpected %s status %d\n", + __func__, ep->name, req->status); + break; + } +} + + +/* Read the TX buffer and send to USB */ +/* mtp->tx_lock must be held */ +static int mtp_do_tx(struct mtp_dev *mtp) +{ + struct mtp_buf *queue = mtp->tx_next; + struct usb_ep *in; + int status; + + if (!mtp->fmtp || !mtp->fmtp->ep_in) + return -ENODEV; + + in = mtp->fmtp->ep_in; + + while (queue->state == BUF_EMPTY) { + struct usb_request *req; + unsigned int len; + + req = queue->r; + + len = kfifo_len(&mtp->tx_fifo); + if (!len && !mtp->tx_flush) + /* TX buf empty */ + break; + + req->zero = 0; + if (len > PAGE_SIZE) { + len = PAGE_SIZE; + mtp->tx_last_size = 0; /* not the last packet */ + } else { + /* this is last packet in TX buf. send ZLP/SLP + * if user has requested so + */ + req->zero = mtp->tx_flush; + mtp->tx_last_size = len; + } + + len = kfifo_out(&mtp->tx_fifo, req->buf, len); + req->length = len; + + queue->state = BUF_FULL; + queue->busy = true; + queue->r->context = queue; + + if (req->zero) { + mtp->tx_flush = 0; + wake_up_interruptible(&mtp->tx_flush_wait); + } + + dev_vdbg(mtp->dev, + "%s: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n", __func__, + len, *((u8 *)req->buf), + *((u8 *)req->buf+1), *((u8 *)req->buf+2)); + /* Drop lock while we call out of driver; completions + * could be issued while we do so. Disconnection may + * happen too; maybe immediately before we queue this! + * + * NOTE that we may keep sending data for a while after + * the file is closed. + */ + spin_unlock(&mtp->tx_lock); + status = usb_ep_queue(in, req, GFP_ATOMIC); + spin_lock(&mtp->tx_lock); + + if (status) { + dev_err(mtp->dev, "%s: %s %s err %d\n", + __func__, "queue", in->name, status); + queue->state = BUF_EMPTY; + queue->busy = false; + break; /* FIXME: re-try? */ + } + + mtp->tx_next = queue->next; + queue = mtp->tx_next; + + /* abort immediately after disconnect */ + if (!mtp->fmtp) { + dev_dbg(mtp->dev, + "%s: disconnected so aborting\n", __func__); + break; + } + + /* wake up tx_wait */ + wake_up_interruptible(&mtp->tx_wait); + } + return 0; +} + +static void mtp_tx_task(unsigned long _mtp) +{ + struct mtp_dev *mtp = (void *)_mtp; + + spin_lock_irq(&mtp->tx_lock); + if (mtp->fmtp && mtp->fmtp->ep_in) + mtp_do_tx(mtp); + spin_unlock_irq(&mtp->tx_lock); +} + +/* Tasklet: Queue USB read requests whenever RX buffer available + * Must be called with mtp->rx_lock held + */ +static int mtp_do_rx(struct mtp_dev *mtp) +{ + /* Queue the request only if required space is there in RX buffer */ + struct mtp_buf *queue = mtp->rx_next; + struct usb_ep *out; + int started = 0; + + if (!mtp->fmtp || !mtp->fmtp->ep_out) + return -EINVAL; + + out = mtp->fmtp->ep_out; + + while (queue->state == BUF_EMPTY) { + struct usb_request *req; + int status; + + req = queue->r; + req->length = PAGE_SIZE; + + /* check if space is available in RX buf for this request */ + if (kfifo_avail(&mtp->rx_fifo) < + (mtp->rx_queued + 2)*req->length) { + /* insufficient space */ + break; + } + mtp->rx_queued++; + queue->state = BUF_FULL; + queue->busy = true; + queue->r->context = queue; + + /* drop lock while we call out; the controller driver + * may need to call us back (e.g. for disconnect) + */ + spin_unlock(&mtp->rx_lock); + status = usb_ep_queue(out, req, GFP_ATOMIC); + spin_lock(&mtp->rx_lock); + + if (status) { + dev_warn(mtp->dev, "%s: %s %s err %d\n", + __func__, "queue", out->name, status); + queue->state = BUF_EMPTY; + queue->busy = false; + mtp->rx_queued--; + break; /* FIXME: re-try? */ + } + + started++; + mtp->rx_next = queue->next; + queue = mtp->rx_next; + + /* abort immediately after disconnect */ + if (!mtp->fmtp) { + dev_dbg(mtp->dev, "%s: disconnected so aborting\n", + __func__); + break; + } + } + return started; +} + + +static void mtp_rx_task(unsigned long _mtp) +{ + struct mtp_dev *mtp = (void *)_mtp; + + spin_lock_irq(&mtp->rx_lock); + if (mtp->fmtp && mtp->fmtp->ep_out) + mtp_do_rx(mtp); + spin_unlock_irq(&mtp->rx_lock); +} + +/*----------FILE Operations-------------------------------*/ + +static int mtp_open(struct inode *inode, struct file *filp) +{ + unsigned minor = iminor(inode); + struct mtp_dev *mtp; + int index; + + index = minor - MINOR(mtpdata.dev); + if (index >= mtpdata.dev_busy) + return -ENODEV; + + if (!mtpdata.mtpdev) + return -ENODEV; + + mtp = &mtpdata.mtpdev[index]; + filp->private_data = mtp; + + /* prevent multiple opens for now */ + if (mtp->opened) + return -EBUSY; + spin_lock_irq(&mtp->lock); + if (mtp->opened) { + spin_unlock_irq(&mtp->lock); + return -EBUSY; + } + mtp->opened = 1; + spin_unlock_irq(&mtp->lock); + mtp->index = index; + + /* if connected, start receiving */ + if (mtp->fmtp) + tasklet_schedule(&mtp->rx_task); + + dev_dbg(mtp->dev, "%s: mtp%d opened\n", __func__, mtp->index); + return 0; +} + +static int mtp_release(struct inode *inode, struct file *filp) +{ + struct mtp_dev *mtp = filp->private_data; + + filp->private_data = NULL; + + dev_dbg(mtp->dev, "%s: releasing mtp%d\n", __func__, mtp->index); + + if (!mtp->opened) + goto mtp_release_exit; + + spin_lock_irq(&mtp->lock); + mtp->opened = 0; + spin_unlock_irq(&mtp->lock); + + wake_up_interruptible(&mtp->close_wait); + +mtp_release_exit: + dev_dbg(mtp->dev, "%s: mtp%d released!!\n", __func__, mtp->index); + return 0; +} + +static int mtp_can_read(struct mtp_dev *mtp) +{ + int ret; + + spin_lock_irq(&mtp->rx_lock); + ret = kfifo_len(&mtp->rx_fifo) ? 1 : 0; + spin_unlock_irq(&mtp->rx_lock); + + return ret; +} + +static ssize_t mtp_read(struct file *filp, char __user *buff, + size_t len, loff_t *o) +{ + struct mtp_dev *mtp = filp->private_data; + unsigned read = 0; + int ret = 0; + + if (!mtp->fmtp || !mtp->fmtp->ep_out) { + /* not yet connected or reading not possible*/ + return -EINVAL; + } + + if (len) { + read = kfifo_len(&mtp->rx_fifo); + if (!read) { + /* if NONBLOCK then return immediately */ + if (filp->f_flags & O_NONBLOCK) + return -EAGAIN; + + /* sleep till we have some data */ + if (wait_event_interruptible(mtp->rx_wait, + mtp_can_read(mtp))) + return -ERESTARTSYS; + + } + ret = kfifo_to_user(&mtp->rx_fifo, buff, len, &read); + if (ret) + return ret; + } + + if (read > 0) { + spin_lock_irq(&mtp->rx_lock); + mtp_do_rx(mtp); + spin_unlock_irq(&mtp->rx_lock); + } + + dev_vdbg(mtp->dev, "%s done %d/%d\n", __func__, read, len); + return read; +} + +static int mtp_can_write(struct mtp_dev *mtp) +{ + int ret; + + spin_lock_irq(&mtp->tx_lock); + ret = !kfifo_is_full(&mtp->tx_fifo); + spin_unlock_irq(&mtp->tx_lock); + + return ret; +} + +static ssize_t mtp_write(struct file *filp, const char __user *buff, + size_t len, loff_t *o) +{ + struct mtp_dev *mtp = filp->private_data; + unsigned wrote = 0; + int ret = 0; + + if (!mtp->fmtp || !mtp->fmtp->ep_in) { + /* not yet connected or writing not possible */ + return -EINVAL; + } + + if (len) { + if (kfifo_is_full(&mtp->tx_fifo)) { + if (filp->f_flags & O_NONBLOCK) + return -EAGAIN; + + /* sleep till we have some space to write into */ + if (wait_event_interruptible(mtp->tx_wait, + mtp_can_write(mtp))) + return -ERESTARTSYS; + + } + ret = kfifo_from_user(&mtp->tx_fifo, buff, len, &wrote); + if (ret) + return ret; + } + + if (wrote > 0) { + spin_lock_irq(&mtp->tx_lock); + mtp_do_tx(mtp); + spin_unlock_irq(&mtp->tx_lock); + } + + dev_vdbg(mtp->dev, "%s done %d/%d\n", __func__, wrote, len); + return wrote; +} + +static unsigned int mtp_poll(struct file *filp, struct poll_table_struct *pt) +{ + struct mtp_dev *mtp = filp->private_data; + int ret = 0; + int rx = 0, tx = 0, ev = 0; + + /* generic poll implementation */ + poll_wait(filp, &mtp->rx_wait, pt); + poll_wait(filp, &mtp->tx_wait, pt); + poll_wait(filp, &mtp->event_wait, pt); + + if (!mtp->fmtp) { + /* not yet connected */ + goto poll_exit; + } + + /* check if data is available to read */ + if (mtp->fmtp->ep_out) { + rx = kfifo_len(&mtp->rx_fifo); + if (rx) + ret |= POLLIN | POLLRDNORM; + } + + /* check if space is available to write */ + if (mtp->fmtp->ep_in) { + tx = kfifo_avail(&mtp->tx_fifo); + if (tx) + ret |= POLLOUT | POLLWRNORM; + } + + /* check if event/s available */ + ev = mtp->event; + if (ev) + ret |= POLLPRI; + + dev_dbg(mtp->dev, "%s: rx avl %d, tx space %d, event %d\n", + __func__, rx, tx, ev); +poll_exit: + + return ret; +} + +int mtp_fsync(struct file *filp, int datasync) +{ + struct mtp_dev *mtp = filp->private_data; + + if (!mtp->fmtp || !mtp->fmtp->ep_in) { + /* not yet connected or writing not possible */ + return -EINVAL; + } + + /* flush the TX buffer and send ZLP/SLP + * we will wait till TX buffer is empty + */ + spin_lock_irq(&mtp->tx_lock); + + if (mtp->tx_flush) { + dev_err(mtp->dev, "%s tx_flush already requested\n", __func__); + spin_unlock_irq(&mtp->tx_lock); + return -EINVAL; + } + + if (!kfifo_len(&mtp->tx_fifo)) { + if (mtp->tx_last_size == mtp->fmtp->ep_in->maxpacket) + mtp->tx_flush = 1; + } else + mtp->tx_flush = 1; + + if (mtp->tx_flush) { + mtp_do_tx(mtp); + + spin_unlock_irq(&mtp->tx_lock); + + if (wait_event_interruptible(mtp->tx_flush_wait, + !mtp->tx_flush)) + return -ERESTARTSYS; + } else + spin_unlock_irq(&mtp->tx_lock); + + dev_dbg(mtp->dev, "%s complete\n", __func__); + return 0; +} + +static const struct file_operations mtp_fops = { + .owner = THIS_MODULE, + .open = mtp_open, + .poll = mtp_poll, + .unlocked_ioctl = mtp_ioctl, + .release = mtp_release, + .read = mtp_read, + .write = mtp_write, + .fsync = mtp_fsync, +}; + +/*------------USB Gadget Driver Interface----------------------------*/ + +/** + * mtptp_setup - initialize the character driver for one or more devices + * @g: gadget to associate with these devices + * Context: may sleep + * + * This driver needs to know how many char. devices it should manage. + * Use this call to set up the devices that will be exported through USB. + * Later, connect them to functions based on what configuration is activated + * by the USB host, and disconnect them as appropriate. + * + * Returns negative errno or zero. + */ +int __init gmtp_setup(struct usb_gadget *g) +{ + struct mtp_dev *mtp; + int status; + + if (mtpdata.dev_busy) + return -EBUSY; + + mtpdata.mtpdev = kzalloc(sizeof(struct mtp_dev), GFP_KERNEL); + if (!mtpdata.mtpdev) + return -ENOMEM; + + mtp = mtpdata.mtpdev; + + /* created char dev */ + status = alloc_chrdev_region(&mtpdata.dev, 0, 1, "gmtp"); + if (status) + goto fail1; + + cdev_init(&mtpdata.chdev, &mtp_fops); + + mtpdata.chdev.owner = THIS_MODULE; + mtpdata.dev_busy = 1; + + status = cdev_add(&mtpdata.chdev, mtpdata.dev, 1); + if (status) + goto fail2; + + /* register with sysfs */ + mtpdata.class = class_create(THIS_MODULE, "gmtp"); + if (IS_ERR(mtpdata.class)) { + pr_err("%s: could not create class gmtp\n", __func__); + status = PTR_ERR(mtpdata.class); + goto fail3; + } + + spin_lock_init(&mtp->lock); + spin_lock_init(&mtp->rx_lock); + spin_lock_init(&mtp->tx_lock); + init_waitqueue_head(&mtp->rx_wait); + init_waitqueue_head(&mtp->tx_wait); + init_waitqueue_head(&mtp->event_wait); + init_waitqueue_head(&mtp->tx_flush_wait); + init_waitqueue_head(&mtp->close_wait); + + tasklet_init(&mtp->rx_task, mtp_rx_task, (unsigned long) mtp); + tasklet_init(&mtp->tx_task, mtp_tx_task, (unsigned long) mtp); + mtp->dev = device_create(mtpdata.class, NULL, + MKDEV(MAJOR(mtpdata.dev), MINOR(mtpdata.dev)), NULL, "mtp0"); + if (IS_ERR(mtp->dev)) { + pr_err("%s: device_create() failed for device 0\n", + __func__); + goto fail4; + } + /* Allocate FIFO buffers */ + mtp->tx_fifo_buf = vmalloc(buflen); + if (!mtp->tx_fifo_buf) + goto fail5; + kfifo_init(&mtp->tx_fifo, + mtp->tx_fifo_buf, buflen); + + mtp->rx_fifo_buf = vmalloc(buflen); + if (!mtp->rx_fifo_buf) { + vfree(mtp->tx_fifo_buf); + goto fail5; + } + kfifo_init(&mtp->rx_fifo, + mtp->rx_fifo_buf, buflen); + + mtpdata.gadget = g; + + return 0; +fail5: + device_destroy(mtpdata.class, + MKDEV(MAJOR(mtpdata.dev), MINOR(mtpdata.dev))); +fail4: + device_destroy(mtpdata.class, + MKDEV(MAJOR(mtpdata.dev), MINOR(mtpdata.dev))); + vfree(mtp->tx_fifo_buf); + vfree(mtp->rx_fifo_buf); + class_destroy(mtpdata.class); +fail3: + cdev_del(&mtpdata.chdev); +fail2: + unregister_chrdev_region(mtpdata.dev, mtpdata.dev_busy); +fail1: + kfree(mtpdata.mtpdev); + mtpdata.mtpdev = NULL; + mtpdata.dev_busy = 0; + + return status; +} + +static int mtp_closed(struct mtp_dev *mtp) +{ + int ret; + + spin_lock_irq(&mtp->lock); + ret = !mtp->opened; + spin_unlock_irq(&mtp->lock); + return ret; +} + +/** + * gmtp_cleanup - remove the USB to character devicer and devices + * Context: may sleep + * + * This is called to free all resources allocated by @gmtp_setup(). + * It may need to wait until some open /dev/ files have been closed. + */ +void gmtp_cleanup(void) +{ + struct mtp_dev *mtp; + + if (!mtpdata.mtpdev) + return; + + mtp = mtpdata.mtpdev; + + tasklet_kill(&mtp->rx_task); + tasklet_kill(&mtp->tx_task); + device_destroy(mtpdata.class, MKDEV(MAJOR(mtpdata.dev), + MINOR(mtpdata.dev))); + /* wait till open files are closed */ + wait_event(mtp->close_wait, mtp_closed(mtp)); + vfree(mtp->tx_fifo_buf); + vfree(mtp->rx_fifo_buf); + + cdev_del(&mtpdata.chdev); + class_destroy(mtpdata.class); + + /* cdev_put(&gmtp>chdev); */ + unregister_chrdev_region(mtpdata.dev, mtpdata.dev_busy); + + kfree(mtpdata.mtpdev); + mtpdata.mtpdev = NULL; + mtpdata.dev_busy = 0; +} + +/** + * gmtp_connect - notify the driver that USB link is active + * @fmtp: the function, setup with endpoints and descriptors + * @name: name of the function + * Context: any (usually from irq) + * + * This is called to activate the endpoints and let the driver know + * that USB link is active. + * + * Caller needs to have set up the endpoints and USB function in @gmtp + * before calling this, as well as the appropriate (speed-specific) + * endpoint descriptors, and also have set up the char driver by calling + * @gmtp_setup(). + * + * Returns negative error or zeroa + * On success, ep->driver_data will be overwritten + */ +int gmtp_connect(struct f_mtp *fmtp, const char *name) +{ + int status = 0; + struct mtp_dev *mtp; + + if (mtpdata.dev_busy != 1) { + pr_err("%s: invalid device number\n", __func__); + return -EINVAL; + } + + mtp = mtpdata.mtpdev; + + dev_dbg(mtp->dev, "%s %s 0\n", __func__, name); + + if (!fmtp->ep_out && !fmtp->ep_in) { + dev_err(mtp->dev, "%s: Neither IN nor OUT endpoint available\n", + __func__); + return -EINVAL; + } + + if (fmtp->ep_out) { + status = usb_ep_enable(fmtp->ep_out, fmtp->in_use.mtp_out); + if (status < 0) + return status; + + fmtp->ep_out->driver_data = mtp; + } + + if (fmtp->ep_in) { + status = usb_ep_enable(fmtp->ep_in, fmtp->in_use.mtp_in); + if (status < 0) + goto fail1; + + fmtp->ep_in->driver_data = mtp; + } + + kfifo_reset(&mtp->tx_fifo); + kfifo_reset(&mtp->rx_fifo); + mtp->rx_queued = 0; + mtp->tx_flush = 0; + mtp->tx_last_size = 0; + mtp->event = 0; + + if (fmtp->ep_out) { + status = mtp_alloc_requests(fmtp->ep_out, mtp->rx_queue, + &mtp_rx_complete); + if (status) + goto fail2; + mtp->tx_next = mtp->tx_queue; + } + + if (fmtp->ep_in) { + status = mtp_alloc_requests(fmtp->ep_in, mtp->tx_queue, + &mtp_tx_complete); + if (status) + goto fail3; + mtp->rx_next = mtp->rx_queue; + } + + /* connect gmtp */ + mtp->fmtp = fmtp; + + /* if device is opened by user space then start RX */ + if (mtp->opened) + tasklet_schedule(&mtp->rx_task); + + dev_dbg(mtp->dev, "%s complete\n", __func__); + return 0; + +fail3: + if (fmtp->ep_out) + mtp_free_requests(fmtp->ep_out, mtp->rx_queue); + +fail2: + if (fmtp->ep_in) { + fmtp->ep_in->driver_data = NULL; + usb_ep_disable(fmtp->ep_in); + } +fail1: + if (fmtp->ep_out) { + fmtp->ep_out->driver_data = NULL; + usb_ep_disable(fmtp->ep_out); + } + + return status; +} + +/** + * gmtp_disconnect - notify the driver that USB link is inactive + * @fmtp: the function, on which, gmtp_connect() was called + * Context: any (usually from irq) + * + * this is called to deactivate the endpoints (related to @gmtp) + * and let the driver know that the USB link is inactive + */ +void gmtp_disconnect(struct f_mtp *fmtp) +{ + struct mtp_dev *mtp; + + if (!fmtp->ep_out && !fmtp->ep_in) + return; + + if (fmtp->ep_out) + mtp = fmtp->ep_out->driver_data; + else + mtp = fmtp->ep_in->driver_data; + + if (!mtp) { + pr_err("%s Invalid mtp_dev\n", __func__); + return; + } + + spin_lock(&mtp->lock); + + if (fmtp->ep_out) { + usb_ep_disable(fmtp->ep_out); + mtp_free_requests(mtp->fmtp->ep_out, mtp->rx_queue); + fmtp->ep_out->driver_data = NULL; + } + + if (fmtp->ep_in) { + usb_ep_disable(fmtp->ep_in); + mtp_free_requests(mtp->fmtp->ep_in, mtp->tx_queue); + fmtp->ep_in->driver_data = NULL; + } + + mtp->fmtp = NULL; + + spin_unlock(&mtp->lock); +} + +/** + * gmtp_notify(struct f_mtp *fmtp); + * @fmtp: the function on which we need to notify + * Context: any (usually form irq) + * + * This is called to send a POLLPRI notification to user + * space to indicate availability of Hight Priority Data + * The actual data can be fetched by user space via an IOCTL + */ +void gmtp_notify(struct f_mtp *fmtp) +{ + struct mtp_dev *mtp; + unsigned long flags; + + if (!fmtp->ep_out || !fmtp->ep_in) + /* not connected */ + return; + + if (fmtp->ep_out) + mtp = fmtp->ep_out->driver_data; + else + mtp = fmtp->ep_in->driver_data; + + + spin_lock_irqsave(&mtp->lock, flags); + if (!mtp->opened) { + /* not opened */ + spin_unlock_irqrestore(&mtp->lock, flags); + return; + } + + mtp->event = true; + wake_up_interruptible(&mtp->event_wait); + spin_unlock_irqrestore(&mtp->lock, flags); +} + +/** + * gmtp_notify_clear(struct f_mtp *fmtp); + * @fmtp: the function on which we need to clear notify + * Context: any (usually form irq) + * + * This is called to send clear a POLLPRI notification to user space + */ +void gmtp_notify_clear(struct f_mtp *fmtp) +{ + unsigned long flags; + + struct mtp_dev *mtp; + if (!fmtp->ep_out || !fmtp->ep_in) + /*not connected */ + return; + + if (fmtp->ep_out) + mtp = fmtp->ep_out->driver_data; + else + mtp = fmtp->ep_in->driver_data; + + + spin_lock_irqsave(&mtp->lock, flags); + if (!mtp->opened) { + /* not opened */ + spin_unlock_irqrestore(&mtp->lock, flags); + return; + } + + mtp->event = false; + wake_up_interruptible(&mtp->event_wait); + spin_unlock_irqrestore(&mtp->lock, flags); +} + +/** + * gmtp_clear_fifos(struct f_mtp *fmtp) + * + * @fmtp: the function who's fifos we need to clear + * Contect: Process. should not be called from IRQ. + * + * Clears the TX and RX FIFOs + */ +void gmtp_clear_fifos(struct f_mtp *fmtp) +{ + struct mtp_dev *mtp; + struct usb_ep *ep; + struct mtp_buf *queue; + int status; + int i; + + if (!fmtp->ep_out || !fmtp->ep_in) + /* not connected */ + return; + + if (fmtp->ep_out) + mtp = fmtp->ep_out->driver_data; + else + mtp = fmtp->ep_in->driver_data; + + if (!mtp->opened) + return; + + if (fmtp->ep_in) { + /* RESET TX buffer state and pointers */ + ep = fmtp->ep_in; + mtp->tx_cancel = true; + tasklet_kill(&mtp->tx_task); + spin_lock_irq(&mtp->tx_lock); + + for (i = 0 ; i < MTP_QUEUE_SIZE ; i++) { + queue = &mtp->tx_queue[i]; + if (queue->busy) { + spin_unlock_irq(&mtp->tx_lock); + status = usb_ep_dequeue(ep, queue->r); + spin_lock_irq(&mtp->tx_lock); + } + } + usb_ep_fifo_flush(ep); + kfifo_reset(&mtp->tx_fifo); + mtp->tx_flush = 0; + mtp->tx_last_size = 0; + mtp->tx_cancel = false; + mtp->tx_next = &mtp->tx_queue[0]; + spin_unlock_irq(&mtp->tx_lock); + } + + if (fmtp->ep_out) { + /* RESET RX buffer state and pointers */ + ep = fmtp->ep_out; + mtp->rx_cancel = true; + tasklet_kill(&mtp->rx_task); + spin_lock_irq(&mtp->rx_lock); + + for (i = 0 ; i < MTP_QUEUE_SIZE; i++) { + queue = &mtp->rx_queue[i]; + if (queue->busy) { + spin_unlock_irq(&mtp->rx_lock); + status = usb_ep_dequeue(ep, queue->r); + spin_lock_irq(&mtp->rx_lock); + } + } + usb_ep_fifo_flush(ep); + kfifo_reset(&mtp->rx_fifo); + mtp->rx_cancel = false; + mtp->rx_next = &mtp->rx_queue[0]; + spin_unlock_irq(&mtp->rx_lock); + /* trigger RX */ + tasklet_schedule(&mtp->rx_task); + } +} + +/*---------------------------------------------------------------------------*/ +/* f_mtp functions */ + +/** + * This function will be called when the request on the interrupt + * end point being used for class specific events is completed. + * Notes - + * The protocol does not give any specifications about what needs + * should be done in such case. + * Revisit if there is more information. + */ +static void +mtp_notify_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct f_mtp *mtp = req->context; + struct usb_composite_dev *cdev = mtp->cdev; + + VDBG(cdev, "%s:\n", __func__); + + switch (req->status) { + case 0: + /* normal completionn */ + break; + + case -ESHUTDOWN: + /* disconnect */ + WARNING(cdev, "%s: %s shutdown\n", __func__, ep->name); + break; + + default: + WARNING(cdev, "%s: unexpected %s status %d\n", + __func__, ep->name, req->status); + break; + } + + kfree(req->buf); + usb_ep_free_request(ep, req); + return; +} + +/** + * build_device_status() - prepares the device status response + * + * @mtp: the f_mtp struct + * @buf: buffer to build the response data into + * @buf_len: length of buffer in bytes + * + * uses spinlock mtp->lock + * + * returns number of bytes copied. + */ +static int build_device_status(struct f_mtp *mtp, void *buf, size_t buf_len) +{ + int copied, len; + __le16 *ptr = buf; + struct device_status *status = &mtp->dev_status; + unsigned long flags; + + spin_lock_irqsave(&mtp->lock, flags); + len = status->length; + if (len > buf_len) { + WARNING(mtp->cdev, "%s Insufficient buffer for dev_status\n", + __func__); + /* limit status data to available buffer */ + len = buf_len; + } + + *ptr++ = cpu_to_le16(len); + *ptr++ = cpu_to_le16(status->code); + copied = 4; + + if (len > 4) { + len -= 4; + if (len > MAX_STATUS_DATA_SIZE) { + len = MAX_STATUS_DATA_SIZE; + WARNING(mtp->cdev, "%s limited status to %d bytes\n", + __func__, len); + } + memcpy(ptr, status->data, len); + copied += len; + } + spin_unlock_irqrestore(&mtp->lock, flags); + return copied; +} + +/** + * cache_device_status() - saves the device status to struct f_mtp + * + * @mtp: the f_mtp struct + * @length: length of PTP device status + * @code: code of PTP device status + * @buf: user space buffer pointing to PTP device status container + * + * uses spinlock mtp->lock + * + * returns 0 on success. negative on error + */ +static int cache_device_status(struct f_mtp *mtp, + u16 length, u16 code, const void __user *buf) +{ + u8 *uninitialized_var(tmp_data); + unsigned long flags; + + if (length > 4) { + if (!buf) { + WARNING(mtp->cdev, "%s No data buffer provided\n", + __func__); + return -EINVAL; + } + + length -= 4; /* get length of data section */ + if (length > MAX_STATUS_DATA_SIZE) { + length = MAX_STATUS_DATA_SIZE; + WARNING(mtp->cdev, "%s limited status data to %d " + "bytes\n", __func__, length); + } + + tmp_data = kmalloc(length, GFP_KERNEL); + if (!tmp_data) + return -ENOMEM; + + /* 4 bytes are for header, leave them out */ + if (copy_from_user(tmp_data, buf + 4, length)) { + ERROR(mtp->cdev, "%s copy_from_user fault\n", __func__); + kfree(tmp_data); + return -EFAULT; + } + length += 4; /* undo the previous minus */ + } + + spin_lock_irqsave(&mtp->lock, flags); + if (length > 4) { + memcpy(mtp->dev_status.data, tmp_data, length - 4); + kfree(tmp_data); + } + mtp->dev_status.length = length; + mtp->dev_status.code = code; + spin_unlock_irqrestore(&mtp->lock, flags); + return 0; +} + +/** + * cache_control_request() - saves the control request to f_mtp struct + * + * @mtp: the f_mtp struct + * @length: length of buffer + * @buf: the buffer with data to be stored + * + * uses spinlock mtp->lock + */ +void cache_control_request(struct f_mtp *mtp, u16 length, const void *buf) +{ + unsigned long flags; + + if (length > PTP_MAX_CONTROL_SIZE) { + WARNING(mtp->cdev, "mtp CTRL BUF LIMIT reached\n"); + length = PTP_MAX_CONTROL_SIZE; + } + spin_lock_irqsave(&mtp->lock, flags); + mtp->ctrl_req.length = length; + memcpy(mtp->ctrl_req.data, buf, length); + mtp->ctrl_req.valid = true; + spin_unlock_irqrestore(&mtp->lock, flags); +} + +/** + * mtp_setup_complete - completion handler for setup request + * + * As of now this is only executed when we receive a PTP_CANCEL request + * We save the data received for this request into f_mtp->ctrl_req + */ +static void mtp_setup_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct f_mtp *mtp = req->context; + struct usb_composite_dev *cdev = ep->driver_data; + + if (req->status || req->actual != req->length) + DBG(cdev, "mtp setup complete --> %d, %d/%d\n", + req->status, req->actual, req->length); + + if (!req->status) { + if (req->actual) { + int len, i = 0; + unsigned long flags; + + /* append this to existing control request */ + spin_lock_irqsave(&mtp->lock, flags); + if (mtp->ctrl_req.valid) + i = mtp->ctrl_req.length; + len = i + req->actual; + if (len > PTP_MAX_CONTROL_SIZE) { + WARNING(cdev, "mtp CTRL BUF LIMIT reached\n"); + len = PTP_MAX_CONTROL_SIZE; + } + memcpy(&mtp->ctrl_req.data[i], req->buf, len - i); + mtp->ctrl_req.length = len; + mtp->ctrl_req.valid = true; + spin_unlock_irqrestore(&mtp->lock, flags); + /* notify user space */ + gmtp_notify(mtp); + } + } +} + +/** + * Handle the MTP specific setup requests + */ +static int +mtp_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) +{ + struct f_mtp *mtp = func_to_mtp(f); + struct usb_composite_dev *cdev = f->config->cdev; + struct usb_request *req = cdev->req; + + int value = -EOPNOTSUPP; + u16 wIndex = le16_to_cpu(ctrl->wIndex); + u16 wValue = le16_to_cpu(ctrl->wValue); + u16 wLength = le16_to_cpu(ctrl->wLength); + + switch (ctrl->bRequest) { + case PTP_REQ_GET_EXTENDED_EVENT_DATA: + /* FIXME need to implement + * Maybe we could have an IOCTL to save the extended event + * data with the driver and then send it to host whenever + * we get this request + */ + WARNING(cdev, "%s: FIXME: PTP request GET_EXTENDED_EVENT_DATA, " + "not implemented\n", __func__); + break; + + case PTP_REQ_CANCEL: + DBG(cdev, "%s: PTP: CANCEL\n", __func__); + if (ctrl->bRequestType != (USB_DIR_OUT | + USB_TYPE_CLASS | USB_RECIP_INTERFACE)) + goto stall; + + if (wValue != 0 || wIndex != 0 || wLength != 6) + goto stall; + + /* set device status to BUSY, Stack sets it to OK when ready */ + cache_device_status(mtp, 4, PTP_RC_DEVICE_BUSY, 0); + cache_control_request(mtp, sizeof(ctrl), ctrl); + /* prepare for data phase */ + req->context = mtp; + /* override completion handler */ + req->complete = mtp_setup_complete; + /* copying of data and notifying user space is + * done in mtp setup completion handler + */ + value = wLength; + break; + + case PTP_REQ_DEVICE_RESET: + DBG(cdev, "%s: PTP: DEVICE_RESET\n", __func__); + if (ctrl->bRequestType != (USB_DIR_OUT | + USB_TYPE_CLASS | USB_RECIP_INTERFACE)) + goto stall; + + if (wValue != 0 || wIndex != 0 || wLength != 0) + goto stall; + + cache_device_status(mtp, 4, PTP_RC_DEVICE_BUSY, 0); + cache_control_request(mtp, sizeof(ctrl), ctrl); + gmtp_notify(mtp); + value = 0; /* no data phase */ + break; + + case PTP_REQ_GET_DEVICE_STATUS: + /* return the cached device status */ + DBG(cdev, "%s: PTP: GET_DEVICE_STATUS\n", __func__); + + if (ctrl->bRequestType != (USB_DIR_IN | + USB_TYPE_CLASS | USB_RECIP_INTERFACE)) { + goto stall; + } + + if (wValue != 0 || wIndex != 0) + goto stall; + + value = build_device_status(mtp, req->buf, + USB_BUFSIZ); /* composite.c */ + + if (value < 0) { + ERROR(cdev, "%s: error building device status\n", + __func__); + goto stall; + } + value = min(wLength, (u16)value); + break; + + /* TBD: other response codes */ + default: + WARNING(cdev, + "%s: FIXME, got PTP request 0x%x, not implemented\n", + __func__, ctrl->bRequest); + break; + } + + /* data phase of control transfer */ + if (value >= 0) { + req->length = value; + req->zero = value < wLength; + value = usb_ep_queue(cdev->gadget->ep0, + req, GFP_ATOMIC); + if (value < 0) { + DBG(cdev, "%s: ep_queue --> %d\n", __func__, value); + req->status = 0; + } + } + +stall: + /* device either stalls (value < 0) or reports success */ + return value; +} + +static long +mtp_ioctl(struct file *filp, unsigned code, unsigned long value) +{ + int status; + struct mtp_dev *mtp = filp->private_data; + struct f_mtp *fmtp = mtp->fmtp; + struct usb_composite_dev *cdev; + int packet_size; + struct usb_request *notify_req; + void *event_packet; + u32 event_packet_len; + struct ptp_device_status_data ptp_status; + unsigned long flags; + + if (!fmtp) + return -ENODEV; + + cdev = fmtp->cdev; + + switch (code) { + case MTP_IOCTL_WRITE_ON_INTERRUPT_EP: + + /* get size of packet */ + if (copy_from_user(&event_packet_len, + (void __user *)value, 4)) + return -EFAULT; + + event_packet_len = le32_to_cpu(event_packet_len); + if (event_packet_len > fmtp->ep_notify->maxpacket) { + ERROR(cdev, "%s Max event packet limit exceeded\n", + __func__); + return -EFAULT; + } + + event_packet = kmalloc(event_packet_len, GFP_KERNEL); + if (!event_packet) { + ERROR(cdev, "%s cannot allocate memory for event\n", + __func__); + return -ENOMEM; + } + + /* read full packet */ + if (copy_from_user(event_packet, + (void __user *)value, event_packet_len)) { + kfree(event_packet); + return -EFAULT; + } + + /* Allocate request object to be used with this endpoint. */ + notify_req = usb_ep_alloc_request(fmtp->ep_notify, GFP_KERNEL); + if (!notify_req) { + ERROR(cdev, + "%s: could not allocate notify EP request\n", + __func__); + kfree(event_packet); + return -ENOMEM; + } + + notify_req->buf = event_packet; + notify_req->context = fmtp; + notify_req->complete = mtp_notify_complete; + notify_req->length = event_packet_len; + if (unlikely(event_packet_len == fmtp->ep_notify->maxpacket)) + notify_req->zero = 1; + else + notify_req->zero = 0; + + + status = usb_ep_queue(fmtp->ep_notify, notify_req, GFP_ATOMIC); + if (status) { + ERROR(cdev, + "%s: EVENT packet could not be queued %d\n", + __func__, status); + usb_ep_free_request(fmtp->ep_notify, notify_req); + kfree(event_packet); + return status; + } + return 0; + + case MTP_IOCTL_GET_MAX_DATAPKT_SIZE: + switch (fmtp->usb_speed) { + case USB_SPEED_LOW: + case USB_SPEED_FULL: + packet_size = PTP_FS_DATA_PKT_SIZE; + break; + + case USB_SPEED_HIGH: + case USB_SPEED_WIRELESS: + packet_size = PTP_HS_DATA_PKT_SIZE; + break; + + default: + return -EINVAL; + } + + status = put_user(packet_size, (int *)value); + if (status) { + ERROR(cdev, + "%s: could not send max data packet size\n", + __func__); + return -EFAULT; + } + return 0; + + case MTP_IOCTL_GET_MAX_EVENTPKT_SIZE: + switch (fmtp->usb_speed) { + case USB_SPEED_LOW: + case USB_SPEED_FULL: + packet_size = PTP_FS_EVENT_PKT_SIZE; + break; + + case USB_SPEED_HIGH: + case USB_SPEED_WIRELESS: + packet_size = PTP_HS_EVENT_PKT_SIZE; + break; + + default: + return -EINVAL; + } + + status = put_user(packet_size, (int *)value); + if (status) { + ERROR(cdev, + "%s: couldn't send max event packet size\n", + __func__); + return -EFAULT; + } + return 0; + + case MTP_IOCTL_SET_DEVICE_STATUS: + if (copy_from_user(&ptp_status, (const void __user *)value, + sizeof(ptp_status))) + return -EFAULT; + + status = cache_device_status(fmtp, + __le16_to_cpu(ptp_status.wLength), + __le16_to_cpu(ptp_status.Code), + (const void __user *)(value)); + return status; + + case MTP_IOCTL_GET_CONTROL_REQ: + spin_lock_irqsave(&fmtp->lock, flags); + /* check if we have a cached control req. */ + if (!fmtp->ctrl_req.valid) { + spin_unlock_irqrestore(&fmtp->lock, flags); + return -EINVAL; + } + event_packet_len = fmtp->ctrl_req.length; + event_packet = kmalloc(event_packet_len, GFP_ATOMIC); + if (!event_packet) { + spin_unlock_irqrestore(&fmtp->lock, flags); + return -ENOMEM; + } + memcpy(event_packet, fmtp->ctrl_req.data, + event_packet_len); + spin_unlock_irqrestore(&fmtp->lock, flags); + if (copy_to_user((void __user *)value, event_packet, + event_packet_len)) { + kfree(event_packet); + return -EFAULT; + } + kfree(event_packet); + /* clear the control req. cache */ + spin_lock_irqsave(&fmtp->lock, flags); + fmtp->ctrl_req.valid = false; + fmtp->ctrl_req.length = 0; + spin_unlock_irqrestore(&fmtp->lock, flags); + /* clear user space notification */ + gmtp_notify_clear(fmtp); + return 0; + + case MTP_IOCTL_RESET_BUFFERS: + gmtp_clear_fifos(fmtp); + return 0; + + default: + WARNING(cdev, "%s: unhandled IOCTL %d\n", __func__, code); + return -EINVAL; + } +} + +static int +enable_mtp(struct f_mtp *mtp) +{ + struct usb_composite_dev *cdev = mtp->cdev; + int status = -1; + + /* choose interrupt endpoint */ + mtp->in_use.mtp_int = ep_choose(cdev->gadget, + mtp->hs.mtp_int, + mtp->fs.mtp_int); + /* enable it */ + status = usb_ep_enable(mtp->ep_notify, mtp->in_use.mtp_int); + if (status) { + ERROR(cdev, "%s: Can't enable endpoint %s --> %d\n", + __func__, mtp->ep_notify->name, status); + return status; + } + /* choose the bulk endpoints */ + mtp->in_use.mtp_in = ep_choose(cdev->gadget, + mtp->hs.mtp_in, + mtp->fs.mtp_in); + mtp->in_use.mtp_out = ep_choose(cdev->gadget, + mtp->hs.mtp_out, + mtp->fs.mtp_out); + + status = gmtp_connect(mtp, mtp->func.name); + if (status) { + ERROR(cdev, + "%s: gmtp_connect() failed %d\n", __func__, status); + usb_ep_disable(mtp->ep_notify); + return status; + } + DBG(cdev, "%s: mtp 0 enabled\n", __func__); + /* Get the USB speed */ + mtp->usb_speed = cdev->gadget->speed; + mtp->ep_notify->driver_data = mtp; + return 0; +} + +static void +disable_mtp(struct f_mtp *mtp) +{ + struct usb_composite_dev *cdev = mtp->cdev; + + if (mtp->ep_notify->driver_data) { + /* disable OUT/IN endpoints */ + gmtp_disconnect(mtp); + /* disable INT endpoint */ + usb_ep_disable(mtp->ep_notify); + mtp->ep_notify->driver_data = NULL; + DBG(cdev, "%s: mtp 0 disabled\n", __func__); + } + mtp->usb_speed = USB_SPEED_UNKNOWN; +} + +static int +mtp_set_alt(struct usb_function *f, unsigned intf, unsigned alt) +{ + struct usb_composite_dev *cdev = f->config->cdev; + struct f_mtp *mtp = func_to_mtp(f); + + if (intf != mtp->mtp_id) + return -EINVAL; + + if (alt != 0) { + WARNING(cdev, "%s: invalid alternate setting\n", __func__); + return -EINVAL; + } + + /* if already enabled, then disable and reneable it*/ + if (mtp->ep_notify->driver_data) + disable_mtp(mtp); + + return enable_mtp(mtp); +} + +static void +mtp_disable(struct usb_function *f) +{ + struct f_mtp *mtp = func_to_mtp(f); + + disable_mtp(mtp); +} + +static int +__init mtp_bind(struct usb_configuration *c, struct usb_function *f) +{ + struct f_mtp *mtp = func_to_mtp(f); + struct usb_composite_dev *cdev = c->cdev; + int status; + struct usb_ep *ep = NULL; + + /* allocate instance-specific interface IDs and patch up descriptors */ + /* We have only ONE MTP interface. So get the unused interface ID for + * this interface.*/ + status = usb_interface_id(c, f); + if (status < 0) + return status; + mtp->mtp_id = status; + mtp_intf.bInterfaceNumber = status; + + status = -ENOMEM; + /* Allocate the endpoints */ + /* mtp_ep_fs_in_desc */ + ep = usb_ep_autoconfig(cdev->gadget, &mtp_ep_fs_in_desc); + if (!ep) + goto fail; + mtp->ep_in = ep; + ep->driver_data = cdev; + + /* mtp_ep_fs_out_desc */ + ep = usb_ep_autoconfig(cdev->gadget, &mtp_ep_fs_out_desc); + if (!ep) + goto fail; + mtp->ep_out = ep; + ep->driver_data = cdev; + + /* mtp_ep_fs_int_desc */ + ep = usb_ep_autoconfig(cdev->gadget, &mtp_ep_fs_int_desc); + if (!ep) + goto fail; + mtp->ep_notify = ep; + ep->driver_data = cdev; + + + /* copy descriptors, and track endpoint copies */ + f->descriptors = usb_copy_descriptors(mtp_fs_function); + mtp->fs.mtp_in = usb_find_endpoint(mtp_fs_function, + f->descriptors, &mtp_ep_fs_in_desc); + mtp->fs.mtp_out = usb_find_endpoint(mtp_fs_function, + f->descriptors, &mtp_ep_fs_out_desc); + mtp->fs.mtp_int = usb_find_endpoint(mtp_fs_function, + f->descriptors, &mtp_ep_fs_int_desc); + + /* support all relevant hardware speeds... we expect that when + * hardware is dual speed, all bulk-capable endpoints work at + * both speeds + */ + if (gadget_is_dualspeed(c->cdev->gadget)) { + /* Copy endpoint address */ + mtp_ep_hs_in_desc.bEndpointAddress = + mtp_ep_fs_in_desc.bEndpointAddress; + mtp_ep_hs_out_desc.bEndpointAddress = + mtp_ep_fs_out_desc.bEndpointAddress; + mtp_ep_hs_int_desc.bEndpointAddress = + mtp_ep_fs_int_desc.bEndpointAddress; + + /* Copy descriptors, and track endpoint copies */ + f->hs_descriptors = usb_copy_descriptors(mtp_hs_function); + mtp->hs.mtp_in = usb_find_endpoint(mtp_hs_function, + f->hs_descriptors, &mtp_ep_hs_in_desc); + mtp->hs.mtp_out = usb_find_endpoint(mtp_hs_function, + f->hs_descriptors, &mtp_ep_hs_out_desc); + mtp->hs.mtp_int = usb_find_endpoint(mtp_hs_function, + f->hs_descriptors, &mtp_ep_hs_int_desc); + } + + INFO(cdev, "mtp 0: %s speed IN/%s OUT/%s INT/%s\n", + gadget_is_dualspeed(cdev->gadget) ? "dual" : "full", + mtp->ep_in->name, mtp->ep_out->name, + mtp->ep_notify->name); + + return 0; +fail: + if (mtp->ep_out) + mtp->ep_out->driver_data = NULL; + + if (mtp->ep_in) + mtp->ep_in->driver_data = NULL; + + if (mtp->ep_notify) + mtp->ep_notify->driver_data = NULL; + + ERROR(cdev, "%s/%p: cant bind, err %d\n", f->name, f, status); + + return status; +} + +static void +mtp_unbind(struct usb_configuration *c, struct usb_function *f) +{ + struct f_mtp *mtp = func_to_mtp(f); + + if (gadget_is_dualspeed(c->cdev->gadget)) + usb_free_descriptors(f->hs_descriptors); + + usb_free_descriptors(f->descriptors); + kfree(mtp); +} + +/** + * mtp_bind_config - add a MTP function to a configuration + * @c: the configuration to support MTP + * Context: single threaded during gadget setup + * + * Returns zero on success, else negative errno. + * + * Caller must have called @gmtp_setup() with enough devices to + * handle all the ones it binds. Caller is also responsible + * for calling @gmtp_cleanup() before module unload. + */ +int +__init mtp_bind_config(struct usb_configuration *c) +{ + struct f_mtp *mtp = NULL; + int status = 0; + + /* allocate device global string IDs and patch descriptors*/ + if (mtp_string_defs[0].id == 0) { + status = usb_string_id(c->cdev); + if (status < 0) + return status; + mtp_string_defs[0].id = status; + mtp_intf.iInterface = status; + } + + /* allocate and initialize one new instance */ + mtp = kzalloc(sizeof(*mtp), GFP_KERNEL); + if (!mtp) + return -ENOMEM; + + spin_lock_init(&mtp->lock); + + mtp->func.name = "MTP"; + mtp->func.strings = mtp_strings; + mtp->func.bind = mtp_bind; + mtp->func.unbind = mtp_unbind; + mtp->func.set_alt = mtp_set_alt; + mtp->func.setup = mtp_setup; + mtp->func.disable = mtp_disable; + + mtp->usb_speed = USB_SPEED_UNKNOWN; /* invalid speed */ + + mtp->cdev = c->cdev; + + /* default device status is BUSY */ + cache_device_status(mtp, 4, PTP_RC_DEVICE_BUSY, 0); + + status = usb_add_function(c, &mtp->func); + if (status) + kfree(mtp); + + return status; +} -- 1.6.3.3 -- To unsubscribe from this list: send the line "unsubscribe linux-usb" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html