To support zero-copy for user backend, allow backend allocating sgl pages. Unfortunately we need extra copy in tcm_loop for user backend, but since it's just for test, not a big deal. Signed-off-by: Shaohua Li <shli@xxxxxxxxxx> --- drivers/target/loopback/tcm_loop.c | 49 ++++++++++++++++++++++++++++++++- drivers/target/target_core_transport.c | 24 +++++++++++----- drivers/target/target_core_xcopy.c | 4 +- include/target/target_core_backend.h | 5 ++- 4 files changed, 71 insertions(+), 11 deletions(-) Index: linux/drivers/target/loopback/tcm_loop.c =================================================================== --- linux.orig/drivers/target/loopback/tcm_loop.c 2013-11-16 09:13:55.606790041 +0800 +++ linux/drivers/target/loopback/tcm_loop.c 2013-11-16 09:13:55.594790204 +0800 @@ -33,6 +33,7 @@ #include <scsi/scsi_cmnd.h> #include <target/target_core_base.h> +#include <target/target_core_backend.h> #include <target/target_core_fabric.h> #include <target/target_core_fabric_configfs.h> #include <target/target_core_configfs.h> @@ -166,6 +167,9 @@ static void tcm_loop_submission_work(str struct scatterlist *sgl_bidi = NULL; u32 sgl_bidi_count = 0; int rc; + struct se_lun *se_lun; + struct se_device *dev; + bool alloc_sgl = false; tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; @@ -194,7 +198,21 @@ static void tcm_loop_submission_work(str se_cmd->se_cmd_flags |= SCF_BIDI; } - rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd, + + spin_lock(&tl_tpg->tl_se_tpg.tpg_lun_lock); + se_lun = tl_tpg->tl_se_tpg.tpg_lun_list[tl_cmd->sc->device->lun]; + dev = se_lun->lun_se_dev; + if (dev->transport->alloc_sgl) + alloc_sgl = true; + spin_unlock(&tl_tpg->tl_se_tpg.tpg_lun_lock); + + if (alloc_sgl) + rc = target_submit_cmd(se_cmd, tl_nexus->se_sess, sc->cmnd, + &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun, + scsi_bufflen(sc), tcm_loop_sam_attr(sc), + sc->sc_data_direction, 0); + else + rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd, &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun, scsi_bufflen(sc), tcm_loop_sam_attr(sc), sc->sc_data_direction, 0, @@ -720,8 +738,36 @@ static void tcm_loop_close_session(struc return; }; +static void tcm_loop_copy_sgl(struct se_cmd *se_cmd, bool to_scsi) +{ + struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, + struct tcm_loop_cmd, tl_se_cmd); + struct scsi_cmnd *sc = tl_cmd->sc; + void *buf; + + if (!se_cmd->se_dev->transport->alloc_sgl) + return; + + buf = vmalloc(scsi_bufflen(sc)); + + if (to_scsi) { + sg_copy_to_buffer(se_cmd->t_data_sg, se_cmd->t_data_nents, + buf, scsi_bufflen(sc)); + sg_copy_from_buffer(scsi_sglist(sc), scsi_sg_count(sc), + buf, scsi_bufflen(sc)); + } else { + sg_copy_to_buffer(scsi_sglist(sc), scsi_sg_count(sc), + buf, scsi_bufflen(sc)); + sg_copy_from_buffer(se_cmd->t_data_sg, se_cmd->t_data_nents, + buf, scsi_bufflen(sc)); + } + vfree(buf); +} + static int tcm_loop_write_pending(struct se_cmd *se_cmd) { + tcm_loop_copy_sgl(se_cmd, false); + /* * Since Linux/SCSI has already sent down a struct scsi_cmnd * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array @@ -754,6 +800,7 @@ static int tcm_loop_queue_data_in(struct if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) || (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)) scsi_set_resid(sc, se_cmd->residual_count); + tcm_loop_copy_sgl(se_cmd, true); sc->scsi_done(sc); return 0; } Index: linux/drivers/target/target_core_transport.c =================================================================== --- linux.orig/drivers/target/target_core_transport.c 2013-11-16 09:13:55.606790041 +0800 +++ linux/drivers/target/target_core_transport.c 2013-11-16 09:13:55.598790131 +0800 @@ -1998,11 +1998,17 @@ queue_full: transport_handle_queue_full(cmd, cmd->se_dev); } -static inline void transport_free_sgl(struct scatterlist *sgl, int nents) +static inline void transport_free_sgl(struct se_device *se_dev, + struct scatterlist *sgl, int nents) { struct scatterlist *sg; int count; + if (se_dev->transport->free_sgl) { + se_dev->transport->free_sgl(se_dev, sgl, nents); + return; + } + for_each_sg(sgl, sg, nents, count) __free_page(sg_page(sg)); @@ -2033,11 +2039,11 @@ static inline void transport_free_pages( } transport_reset_sgl_orig(cmd); - transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents); + transport_free_sgl(cmd->se_dev, cmd->t_data_sg, cmd->t_data_nents); cmd->t_data_sg = NULL; cmd->t_data_nents = 0; - transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); + transport_free_sgl(cmd->se_dev, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); cmd->t_bidi_data_sg = NULL; cmd->t_bidi_data_nents = 0; } @@ -2128,8 +2134,8 @@ void transport_kunmap_data_sg(struct se_ EXPORT_SYMBOL(transport_kunmap_data_sg); int -target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length, - bool zero_page) +target_alloc_sgl(struct se_device *se_dev, struct scatterlist **sgl, + unsigned int *nents, u32 length, bool zero_page) { struct scatterlist *sg; struct page *page; @@ -2137,6 +2143,10 @@ target_alloc_sgl(struct scatterlist **sg unsigned int nent; int i = 0; + if (se_dev->transport->alloc_sgl) + return se_dev->transport->alloc_sgl(se_dev, sgl, + nents, length, zero_page); + nent = DIV_ROUND_UP(length, PAGE_SIZE); sg = kmalloc(sizeof(struct scatterlist) * nent, GFP_KERNEL); if (!sg) @@ -2196,14 +2206,14 @@ transport_generic_new_cmd(struct se_cmd else bidi_length = cmd->data_length; - ret = target_alloc_sgl(&cmd->t_bidi_data_sg, + ret = target_alloc_sgl(cmd->se_dev, &cmd->t_bidi_data_sg, &cmd->t_bidi_data_nents, bidi_length, zero_flag); if (ret < 0) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } - ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, + ret = target_alloc_sgl(cmd->se_dev, &cmd->t_data_sg, &cmd->t_data_nents, cmd->data_length, zero_flag); if (ret < 0) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; Index: linux/drivers/target/target_core_xcopy.c =================================================================== --- linux.orig/drivers/target/target_core_xcopy.c 2013-11-16 09:13:55.606790041 +0800 +++ linux/drivers/target/target_core_xcopy.c 2013-11-16 09:13:55.598790131 +0800 @@ -626,8 +626,8 @@ static int target_xcopy_setup_pt_cmd( } if (alloc_mem) { - rc = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, - cmd->data_length, false); + rc = target_alloc_sgl(cmd->se_dev, &cmd->t_data_sg, + &cmd->t_data_nents, cmd->data_length, false); if (rc < 0) { ret = rc; goto out; Index: linux/include/target/target_core_backend.h =================================================================== --- linux.orig/include/target/target_core_backend.h 2013-11-16 09:13:55.606790041 +0800 +++ linux/include/target/target_core_backend.h 2013-11-16 09:13:55.602790072 +0800 @@ -36,6 +36,9 @@ struct se_subsystem_api { sector_t (*get_blocks)(struct se_device *); unsigned char *(*get_sense_buffer)(struct se_cmd *); bool (*get_write_cache)(struct se_device *); + int (*alloc_sgl)(struct se_device *, struct scatterlist **, + unsigned int *, uint32_t, bool); + void (*free_sgl)(struct se_device *, struct scatterlist *, int); }; struct sbc_ops { @@ -75,7 +78,7 @@ int transport_set_vpd_ident(struct t10_v void *transport_kmap_data_sg(struct se_cmd *); void transport_kunmap_data_sg(struct se_cmd *); /* core helpers also used by xcopy during internal command setup */ -int target_alloc_sgl(struct scatterlist **, unsigned int *, u32, bool); +int target_alloc_sgl(struct se_device*, struct scatterlist **, unsigned int *, u32, bool); sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *, struct scatterlist *, u32, struct scatterlist *, u32); -- To unsubscribe from this list: send the line "unsubscribe target-devel" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html