scsi_tgt_queue_command just adds a command to the list and then kernel thread sends it to user space because scsi_tgt_queue_command cannot use netlink (LLDs might call scsi_tgt_queue_command in interrupt context). Now we don't use netlink any more. So we can kill the list and scsi_tgt_queue_command sends a command to user space directly (that is, just updates shared memory). Signed-off-by: FUJITA Tomonori <fujita.tomonori@xxxxxxxxxxxxx> Signed-off-by: Mike Christie <michaelc@xxxxxxxxxxx> --- drivers/scsi/scsi_tgt_lib.c | 106 ++++++++++--------------------------------- 1 files changed, 24 insertions(+), 82 deletions(-) 0cfeaa966f9a35068aaa2a72cd8a6a5ca7c8eec1 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c index 7ebfbc0..5c66f10 100644 --- a/drivers/scsi/scsi_tgt_lib.c +++ b/drivers/scsi/scsi_tgt_lib.c @@ -45,11 +45,9 @@ struct scsi_tgt_cmd { /* TODO replace the lists with a large bio */ struct bio_list xfer_done_list; struct bio_list xfer_list; - struct scsi_lun *lun; struct list_head hash_list; struct request *rq; - u64 tag; void *buffer; unsigned bufflen; @@ -62,12 +60,6 @@ struct scsi_tgt_queuedata { struct Scsi_Host *shost; struct list_head cmd_hash[1 << TGT_HASH_ORDER]; spinlock_t cmd_hash_lock; - - struct work_struct uspace_send_work; - - spinlock_t cmd_req_lock; - struct mutex cmd_req_mutex; - struct list_head cmd_req; }; /* @@ -117,6 +109,10 @@ struct scsi_cmnd *scsi_host_get_command( rq->flags |= REQ_SPECIAL | REQ_BLOCK_PC; rq->end_io_data = tcmd; + bio_list_init(&tcmd->xfer_list); + bio_list_init(&tcmd->xfer_done_list); + tcmd->rq = rq; + return cmd; release_rq: @@ -175,19 +171,27 @@ static void scsi_unmap_user_pages(struct } } +static void cmd_hashlist_del(struct scsi_cmnd *cmd) +{ + struct request_queue *q = cmd->request->q; + struct scsi_tgt_queuedata *qdata = q->queuedata; + unsigned long flags; + struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data; + + spin_lock_irqsave(&qdata->cmd_hash_lock, flags); + list_del(&tcmd->hash_list); + spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags); +} + static void scsi_tgt_cmd_destroy(void *data) { struct scsi_cmnd *cmd = data; struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data; - struct scsi_tgt_queuedata *qdata = cmd->request->q->queuedata; - unsigned long flags; dprintk("cmd %p %d %lu\n", cmd, cmd->sc_data_direction, rq_data_dir(cmd->request)); - spin_lock_irqsave(&qdata->cmd_hash_lock, flags); - list_del(&tcmd->hash_list); - spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags); + cmd_hashlist_del(cmd); /* * We must set rq->flags here because bio_map_user and @@ -216,55 +220,6 @@ static void init_scsi_tgt_cmd(struct req spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags); } -static void scsi_tgt_uspace_send_fn(void *data) -{ - struct request_queue *q = data; - struct scsi_tgt_queuedata *qdata = q->queuedata; - struct request *rq; - struct scsi_cmnd *cmd; - struct scsi_tgt_cmd *tcmd; - unsigned long flags; - int err; - -retry: - err = 0; - if (list_empty(&qdata->cmd_req)) - return; - - mutex_lock(&qdata->cmd_req_mutex); - - spin_lock_irqsave(&qdata->cmd_req_lock, flags); - if (list_empty(&qdata->cmd_req)) { - spin_unlock_irqrestore(&qdata->cmd_req_lock, flags); - mutex_unlock(&qdata->cmd_req_mutex); - goto out; - } - rq = list_entry_rq(qdata->cmd_req.next); - list_del_init(&rq->queuelist); - spin_unlock_irqrestore(&qdata->cmd_req_lock, flags); - - tcmd = rq->end_io_data; - init_scsi_tgt_cmd(rq, tcmd); - cmd = rq->special; - err = scsi_tgt_uspace_send_cmd(cmd, tcmd->lun, tcmd->tag); - if (err < 0) { - eprintk("failed to send: %p %d\n", cmd, err); - - spin_lock_irqsave(&qdata->cmd_req_lock, flags); - list_add(&rq->queuelist, &qdata->cmd_req); - spin_unlock_irqrestore(&qdata->cmd_req_lock, flags); - } - - mutex_unlock(&qdata->cmd_req_mutex); -out: - /* TODO: proper error handling */ - if (err < 0) - queue_delayed_work(scsi_tgtd, &qdata->uspace_send_work, - HZ / 10); - else - goto retry; -} - /* * scsi_tgt_alloc_queue - setup queue used for message passing * shost: scsi host @@ -312,11 +267,6 @@ int scsi_tgt_alloc_queue(struct Scsi_Hos INIT_LIST_HEAD(&queuedata->cmd_hash[i]); spin_lock_init(&queuedata->cmd_hash_lock); - INIT_LIST_HEAD(&queuedata->cmd_req); - spin_lock_init(&queuedata->cmd_req_lock); - INIT_WORK(&queuedata->uspace_send_work, scsi_tgt_uspace_send_fn, q); - mutex_init(&queuedata->cmd_req_mutex); - return 0; cleanup_queue: @@ -336,28 +286,20 @@ EXPORT_SYMBOL_GPL(scsi_tgt_cmd_to_host); * scsi_tgt_queue_command - queue command for userspace processing * @cmd: scsi command * @scsilun: scsi lun - * @noblock: set to nonzero if the command should be queued + * @tag: unique value to identify this command for tmf */ int scsi_tgt_queue_command(struct scsi_cmnd *cmd, struct scsi_lun *scsilun, u64 tag) { - struct request_queue *q = cmd->request->q; - struct scsi_tgt_queuedata *qdata = q->queuedata; - unsigned long flags; struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data; + int err; - bio_list_init(&tcmd->xfer_list); - bio_list_init(&tcmd->xfer_done_list); - tcmd->lun = scsilun; - tcmd->tag = tag; - tcmd->rq = cmd->request; - - spin_lock_irqsave(&qdata->cmd_req_lock, flags); - list_add_tail(&cmd->request->queuelist, &qdata->cmd_req); - spin_unlock_irqrestore(&qdata->cmd_req_lock, flags); + init_scsi_tgt_cmd(cmd->request, tcmd); + err = scsi_tgt_uspace_send_cmd(cmd, scsilun, tag); + if (err) + cmd_hashlist_del(cmd); - queue_work(scsi_tgtd, &qdata->uspace_send_work); - return 0; + return err; } EXPORT_SYMBOL_GPL(scsi_tgt_queue_command); -- 1.1.3 - : send the line "unsubscribe linux-scsi" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html