Re: [PATCH, RFC] target: stop splitting commands into multiple tasks

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



ping?

While this patch is small, it's the start of removing tasks and thus
greatly simplying the I/O path.  As long as this one hasn't been
reviewed I don't want to start the major overhaul of the I/O path.

On Mon, Mar 26, 2012 at 05:53:17PM -0400, Christoph Hellwig wrote:
> The high-performance backends (iblock and rd) support tasks of unlimited
> size.  With that there is no reason to keep a complex infrastructure for
> splitting up commands in place.  Stop doing so and only submit a single
> task per data direction.  Once this is in place we can slowly remove fields
> from the task that duplicate things in the command, or move other fields
> into the command.
> 
> Signed-off-by: Christoph Hellwig <hch@xxxxxx>
> 
> ---
>  drivers/target/target_core_cdb.c       |    5 +
>  drivers/target/target_core_transport.c |  122 +++++----------------------------
>  2 files changed, 25 insertions(+), 102 deletions(-)
> 
> Index: lio-core/drivers/target/target_core_transport.c
> ===================================================================
> --- lio-core.orig/drivers/target/target_core_transport.c	2012-03-26 23:15:14.433329766 +0200
> +++ lio-core/drivers/target/target_core_transport.c	2012-03-26 23:15:23.519947207 +0200
> @@ -3186,7 +3186,8 @@ static int transport_generic_cmd_sequenc
>  	}
>  
>  	if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB &&
> -	    sectors > dev->se_sub_dev->se_dev_attrib.fabric_max_sectors) {
> +	    (sectors > dev->se_sub_dev->se_dev_attrib.fabric_max_sectors ||
> +	     sectors > dev->se_sub_dev->se_dev_attrib.max_sectors)) {
>  		printk_ratelimited(KERN_ERR "SCSI OP %02xh with too big sectors %u\n",
>  				   cdb[0], sectors);
>  		goto out_invalid_cdb_field;
> @@ -3449,12 +3450,7 @@ static void transport_free_dev_tasks(str
>  	while (!list_empty(&dispose_list)) {
>  		task = list_first_entry(&dispose_list, struct se_task, t_list);
>  
> -		if (task->task_sg != cmd->t_data_sg &&
> -		    task->task_sg != cmd->t_bidi_data_sg)
> -			kfree(task->task_sg);
> -
>  		list_del(&task->t_list);
> -
>  		cmd->se_dev->transport->free_task(task);
>  	}
>  }
> @@ -3785,111 +3781,35 @@ transport_allocate_data_tasks(struct se_
>  	struct scatterlist *cmd_sg, unsigned int sgl_nents)
>  {
>  	struct se_device *dev = cmd->se_dev;
> -	int task_count, i;
> -	unsigned long long lba;
> -	sector_t sectors, dev_max_sectors;
> -	u32 sector_size;
> +	struct se_dev_attrib *attr = &dev->se_sub_dev->se_dev_attrib;
> +	sector_t sectors;
> +	struct se_task *task;
> +	unsigned long flags;
>  
>  	if (transport_cmd_get_valid_sectors(cmd) < 0)
>  		return -EINVAL;
>  
> -	dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
> -	sector_size = dev->se_sub_dev->se_dev_attrib.block_size;
> -
> -	WARN_ON(cmd->data_length % sector_size);
> +	sectors = DIV_ROUND_UP(cmd->data_length, attr->block_size);
>  
> -	lba = cmd->t_task_lba;
> -	sectors = DIV_ROUND_UP(cmd->data_length, sector_size);
> -	task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors);
> +	BUG_ON(cmd->data_length % attr->block_size);
> +	BUG_ON(sectors > attr->max_sectors);
>  
> -	/*
> -	 * If we need just a single task reuse the SG list in the command
> -	 * and avoid a lot of work.
> -	 */
> -	if (task_count == 1) {
> -		struct se_task *task;
> -		unsigned long flags;
> -
> -		task = transport_generic_get_task(cmd, data_direction);
> -		if (!task)
> -			return -ENOMEM;
> -
> -		task->task_sg = cmd_sg;
> -		task->task_sg_nents = sgl_nents;
> -
> -		task->task_lba = lba;
> -		task->task_sectors = sectors;
> -		task->task_size = task->task_sectors * sector_size;
> -
> -		spin_lock_irqsave(&cmd->t_state_lock, flags);
> -		list_add_tail(&task->t_list, &cmd->t_task_list);
> -		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
> -
> -		return task_count;
> -	}
> -
> -	for (i = 0; i < task_count; i++) {
> -		struct se_task *task;
> -		unsigned int task_size, task_sg_nents_padded;
> -		struct scatterlist *sg;
> -		unsigned long flags;
> -		int count;
> -
> -		task = transport_generic_get_task(cmd, data_direction);
> -		if (!task)
> -			return -ENOMEM;
> -
> -		task->task_lba = lba;
> -		task->task_sectors = min(sectors, dev_max_sectors);
> -		task->task_size = task->task_sectors * sector_size;
> -		/*
> -		 * This now assumes that passed sg_ents are in PAGE_SIZE chunks
> -		 * in order to calculate the number per task SGL entries
> -		 */
> -		task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE);
> -		/*
> -		 * Check if the fabric module driver is requesting that all
> -		 * struct se_task->task_sg[] be chained together..  If so,
> -		 * then allocate an extra padding SG entry for linking and
> -		 * marking the end of the chained SGL for every task except
> -		 * the last one for (task_count > 1) operation, or skipping
> -		 * the extra padding for the (task_count == 1) case.
> -		 */
> -		if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) {
> -			task_sg_nents_padded = (task->task_sg_nents + 1);
> -		} else
> -			task_sg_nents_padded = task->task_sg_nents;
> -
> -		task->task_sg = kmalloc(sizeof(struct scatterlist) *
> -					task_sg_nents_padded, GFP_KERNEL);
> -		if (!task->task_sg) {
> -			cmd->se_dev->transport->free_task(task);
> -			return -ENOMEM;
> -		}
> -
> -		sg_init_table(task->task_sg, task_sg_nents_padded);
> -
> -		task_size = task->task_size;
> -
> -		/* Build new sgl, only up to task_size */
> -		for_each_sg(task->task_sg, sg, task->task_sg_nents, count) {
> -			if (cmd_sg->length > task_size)
> -				break;
> +	task = transport_generic_get_task(cmd, data_direction);
> +	if (!task)
> +		return -ENOMEM;
>  
> -			*sg = *cmd_sg;
> -			task_size -= cmd_sg->length;
> -			cmd_sg = sg_next(cmd_sg);
> -		}
> +	task->task_sg = cmd_sg;
> +	task->task_sg_nents = sgl_nents;
> +	task->task_size = cmd->data_length;
>  
> -		lba += task->task_sectors;
> -		sectors -= task->task_sectors;
> +	task->task_lba = cmd->t_task_lba;
> +	task->task_sectors = sectors;
>  
> -		spin_lock_irqsave(&cmd->t_state_lock, flags);
> -		list_add_tail(&task->t_list, &cmd->t_task_list);
> -		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
> -	}
> +	spin_lock_irqsave(&cmd->t_state_lock, flags);
> +	list_add_tail(&task->t_list, &cmd->t_task_list);
> +	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
>  
> -	return task_count;
> +	return 1;
>  }
>  
>  static int
> Index: lio-core/drivers/target/target_core_cdb.c
> ===================================================================
> --- lio-core.orig/drivers/target/target_core_cdb.c	2012-03-26 23:15:13.680000514 +0200
> +++ lio-core/drivers/target/target_core_cdb.c	2012-03-26 23:15:23.519947207 +0200
> @@ -432,6 +432,7 @@ static int
>  target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
>  {
>  	struct se_device *dev = cmd->se_dev;
> +	u32 max_sectors;
>  	int have_tp = 0;
>  
>  	/*
> @@ -456,7 +457,9 @@ target_emulate_evpd_b0(struct se_cmd *cm
>  	/*
>  	 * Set MAXIMUM TRANSFER LENGTH
>  	 */
> -	put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.fabric_max_sectors, &buf[8]);
> +	max_sectors = min(dev->se_sub_dev->se_dev_attrib.fabric_max_sectors,
> +			  dev->se_sub_dev->se_dev_attrib.max_sectors);
> +	put_unaligned_be32(max_sectors, &buf[8]);
>  
>  	/*
>  	 * Set OPTIMAL TRANSFER LENGTH
> --
> To unsubscribe from this list: send the line "unsubscribe target-devel" in
> the body of a message to majordomo@xxxxxxxxxxxxxxx
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
---end quoted text---
--
To unsubscribe from this list: send the line "unsubscribe target-devel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [Linux SCSI]     [Kernel Newbies]     [Linux SCSI Target Infrastructure]     [Share Photos]     [IDE]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux ATA RAID]     [Linux IIO]     [Device Mapper]

  Powered by Linux