Remove the need for the transport_qf_callback callback by making sure we have specific states with specific handlers for the two queue full cases. This includes changing a check in the qla2xxx driver to include TRANSPORT_COMPLETE_QF_WP in addition to TRANSPORT_WRITE_PENDING. This is fairly ugly, but not really worse than before. Long term this leakage of internal state into drivers needs to go away. Signed-off-by: Christoph Hellwig <hch@xxxxxx> Index: lio-core/drivers/target/target_core_transport.c =================================================================== --- lio-core.orig/drivers/target/target_core_transport.c 2011-10-16 13:20:58.969784954 +0200 +++ lio-core/drivers/target/target_core_transport.c 2011-10-16 13:55:32.773285968 +0200 @@ -73,9 +73,8 @@ static int transport_generic_write_pendi static int transport_processing_thread(void *param); static int __transport_execute_tasks(struct se_device *dev); static void transport_complete_task_attr(struct se_cmd *cmd); -static int transport_complete_qf(struct se_cmd *cmd); static void transport_handle_queue_full(struct se_cmd *cmd, - struct se_device *dev, int (*qf_callback)(struct se_cmd *)); + struct se_device *dev); static void transport_direct_request_timeout(struct se_cmd *cmd); static void transport_free_dev_tasks(struct se_cmd *cmd); static u32 transport_allocate_tasks(struct se_cmd *cmd, @@ -972,7 +971,7 @@ static void target_qf_do_work(struct wor pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, - (cmd->t_state == TRANSPORT_COMPLETE_OK) ? "COMPLETE_OK" : + (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" : "UNKNOWN"); /* @@ -1970,8 +1969,8 @@ check_stop: return; queue_full: - cmd->t_state = TRANSPORT_COMPLETE_OK; - transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf); + cmd->t_state = TRANSPORT_COMPLETE_QF_OK; + transport_handle_queue_full(cmd, cmd->se_dev); } static void transport_direct_request_timeout(struct se_cmd *cmd) @@ -3433,12 +3432,19 @@ static void transport_complete_task_attr wake_up_interruptible(&dev->dev_queue_obj.thread_wq); } -static int transport_complete_qf(struct se_cmd *cmd) +static void transport_complete_qf(struct se_cmd *cmd) { int ret = 0; - if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) - return cmd->se_tfo->queue_status(cmd); + transport_stop_all_task_timers(cmd); + if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) + transport_complete_task_attr(cmd); + + if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { + ret = cmd->se_tfo->queue_status(cmd); + if (ret) + goto out; + } switch (cmd->data_direction) { case DMA_FROM_DEVICE: @@ -3448,7 +3454,7 @@ static int transport_complete_qf(struct if (cmd->t_bidi_data_sg) { ret = cmd->se_tfo->queue_data_in(cmd); if (ret < 0) - return ret; + break; } /* Fall through for DMA_TO_DEVICE */ case DMA_NONE: @@ -3458,17 +3464,21 @@ static int transport_complete_qf(struct break; } - return ret; +out: + if (ret < 0) { + transport_handle_queue_full(cmd, cmd->se_dev); + return; + } + transport_lun_remove_cmd(cmd); + transport_cmd_check_stop_to_fabric(cmd); } static void transport_handle_queue_full( struct se_cmd *cmd, - struct se_device *dev, - int (*qf_callback)(struct se_cmd *)) + struct se_device *dev) { spin_lock_irq(&dev->qf_cmd_lock); cmd->se_cmd_flags |= SCF_EMULATE_QUEUE_FULL; - cmd->transport_qf_callback = qf_callback; list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); atomic_inc(&dev->dev_qf_count); smp_mb__after_atomic_inc(); @@ -3494,14 +3504,6 @@ static void transport_generic_complete_o if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) schedule_work(&cmd->se_dev->qf_work_queue); - if (cmd->transport_qf_callback) { - ret = cmd->transport_qf_callback(cmd); - if (ret < 0) - goto queue_full; - - cmd->transport_qf_callback = NULL; - goto done; - } /* * Check if we need to retrieve a sense buffer from * the struct se_cmd in question. @@ -3577,7 +3579,6 @@ static void transport_generic_complete_o break; } -done: transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); return; @@ -3585,7 +3586,8 @@ done: queue_full: pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," " data_direction: %d\n", cmd, cmd->data_direction); - transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf); + cmd->t_state = TRANSPORT_COMPLETE_QF_OK; + transport_handle_queue_full(cmd, cmd->se_dev); } static void transport_free_dev_tasks(struct se_cmd *cmd) @@ -4112,15 +4114,15 @@ void transport_generic_process_write(str } EXPORT_SYMBOL(transport_generic_process_write); -static int transport_write_pending_qf(struct se_cmd *cmd) +static void transport_write_pending_qf(struct se_cmd *cmd) { - return cmd->se_tfo->write_pending(cmd); + if (cmd->se_tfo->write_pending(cmd) == -EAGAIN) { + pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", + cmd); + transport_handle_queue_full(cmd, cmd->se_dev); + } } -/* transport_generic_write_pending(): - * - * - */ static int transport_generic_write_pending(struct se_cmd *cmd) { unsigned long flags; @@ -4130,17 +4132,6 @@ static int transport_generic_write_pendi cmd->t_state = TRANSPORT_WRITE_PENDING; spin_unlock_irqrestore(&cmd->t_state_lock, flags); - if (cmd->transport_qf_callback) { - ret = cmd->transport_qf_callback(cmd); - if (ret == -EAGAIN) - goto queue_full; - else if (ret < 0) - return ret; - - cmd->transport_qf_callback = NULL; - return 0; - } - /* * Clear the se_cmd for WRITE_PENDING status in order to set * cmd->t_transport_active=0 so that transport_generic_handle_data @@ -4165,8 +4156,7 @@ static int transport_generic_write_pendi queue_full: pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); cmd->t_state = TRANSPORT_COMPLETE_QF_WP; - transport_handle_queue_full(cmd, cmd->se_dev, - transport_write_pending_qf); + transport_handle_queue_full(cmd, cmd->se_dev); return ret; } @@ -4853,7 +4843,10 @@ get_cmd: transport_generic_request_timeout(cmd); break; case TRANSPORT_COMPLETE_QF_WP: - transport_generic_write_pending(cmd); + transport_write_pending_qf(cmd); + break; + case TRANSPORT_COMPLETE_QF_OK: + transport_complete_qf(cmd); break; default: pr_err("Unknown t_state: %d deferred_t_state:" Index: lio-core/include/target/target_core_base.h =================================================================== --- lio-core.orig/include/target/target_core_base.h 2011-10-16 13:20:58.985784499 +0200 +++ lio-core/include/target/target_core_base.h 2011-10-16 13:55:03.813285580 +0200 @@ -102,6 +102,7 @@ enum transport_state_table { TRANSPORT_NEW_CMD_MAP = 16, TRANSPORT_FREE_CMD_INTR = 17, TRANSPORT_COMPLETE_QF_WP = 18, + TRANSPORT_COMPLETE_QF_OK = 19, }; /* Used for struct se_cmd->se_cmd_flags */ @@ -471,7 +472,6 @@ struct se_cmd { struct target_core_fabric_ops *se_tfo; int (*transport_emulate_cdb)(struct se_cmd *); void (*transport_complete_callback)(struct se_cmd *); - int (*transport_qf_callback)(struct se_cmd *); unsigned char *t_task_cdb; unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; Index: lio-core/drivers/target/tcm_qla2xxx/tcm_qla2xxx_fabric.c =================================================================== --- lio-core.orig/drivers/target/tcm_qla2xxx/tcm_qla2xxx_fabric.c 2011-10-16 13:26:15.202785883 +0200 +++ lio-core/drivers/target/tcm_qla2xxx/tcm_qla2xxx_fabric.c 2011-10-16 13:26:34.077285420 +0200 @@ -644,7 +644,8 @@ int tcm_qla2xxx_write_pending_status(str * CTIO aborts to be posted via hardware in tcm_qla2xxx_handle_data(). */ spin_lock_irqsave(&se_cmd->t_state_lock, flags); - if (se_cmd->t_state == TRANSPORT_WRITE_PENDING) { + if (se_cmd->t_state == TRANSPORT_WRITE_PENDING || + se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) { spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); wait_for_completion_timeout(&se_cmd->t_transport_stop_comp, 3000); return 0; -- To unsubscribe from this list: send the line "unsubscribe target-devel" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html