On Thu, 2012-02-09 at 13:16 -0800, Nicholas A. Bellinger wrote: > On Thu, 2012-02-09 at 11:21 -0800, Roland Dreier wrote: <SNIP> > Bingo.. Great catch here Roland. > > > I have to admit I'm still a bit vague on exactly what we're supposed > > to do to finish the command here. Is it as simple as adding > > > > target_put_sess_cmd() > > > > ? > > > > Anyway I think this explains one reason we get stuck waiting > > for session commands on shutdown -- if any commands ever > > hit this path, I guess they'll never get freed and so we'll end up > > waiting forever for them... make sense? > > > > So since tcm_qla2xxx_do_rsp() will always have a valid se_device > pointer, the transport_send_check_condition_and_sense() call needs to be > wrapped in transport_generic_request_failure() in order to perform the > final TFO->check_stop_free() -> tcm_qla2xxx_check_stop_free() -> > target_put_sess_cmd(). > > This is similar what was fixed recently for target_submit_cmd() -> > transport_generic_allocate_tasks().. > > Please give the following a shot. > > Thanks, > > --nab > > diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c > index 83e5df4..8f291cd 100644 > --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c > +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c > @@ -626,8 +626,7 @@ void tcm_qla2xxx_do_rsp(struct work_struct *work) > /* > * Dispatch ->queue_status from workqueue process context > */ > - transport_send_check_condition_and_sense(&cmd->se_cmd, > - cmd->se_cmd.scsi_sense_reason, 0); > + transport_generic_request_failure(cmd); > } > > /* > > Sorry, a little too eager on the patch above.. How about the following..? --nab diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 83e5df4..71de7a6 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -626,8 +626,7 @@ void tcm_qla2xxx_do_rsp(struct work_struct *work) /* * Dispatch ->queue_status from workqueue process context */ - transport_send_check_condition_and_sense(&cmd->se_cmd, - cmd->se_cmd.scsi_sense_reason, 0); + transport_generic_request_failure(&cmd->se_cmd); } /* diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 9338939..846d377 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -76,7 +76,6 @@ static int transport_generic_get_mem(struct se_cmd *cmd); static void transport_put_cmd(struct se_cmd *cmd); static void transport_remove_cmd_from_queue(struct se_cmd *cmd); static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); -static void transport_generic_request_failure(struct se_cmd *); static void target_complete_ok_work(struct work_struct *work); int init_se_kmem_caches(void) @@ -1867,7 +1866,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) /* * Handle SAM-esque emulation for generic transport request failures. */ -static void transport_generic_request_failure(struct se_cmd *cmd) +void transport_generic_request_failure(struct se_cmd *cmd) { int ret = 0; @@ -1959,6 +1958,7 @@ queue_full: cmd->t_state = TRANSPORT_COMPLETE_QF_OK; transport_handle_queue_full(cmd, cmd->se_dev); } +EXPORT_SYMBOL(transport_generic_request_failure); static inline u32 transport_lba_21(unsigned char *cdb) { diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h index cfd908f..fc793c1 100644 --- a/include/target/target_core_fabric.h +++ b/include/target/target_core_fabric.h @@ -150,6 +150,7 @@ int core_alua_check_nonop_delay(struct se_cmd *); void core_tmr_req_init(struct se_cmd *, void *, u8); void core_tmr_release_req(struct se_tmr_req *); int transport_generic_handle_tmr(struct se_cmd *); +void transport_generic_request_failure(struct se_cmd *); int transport_lookup_tmr_lun(struct se_cmd *, u32); struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *, debian-amd64:/usr/src/lio-core-2.6.git# -- To unsubscribe from this list: send the line "unsubscribe target-devel" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html