Re: [PATCH v3 15/18] ibmvfc: send Cancel MAD down each hw scsi channel

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 12/2/20 8:08 PM, Tyrel Datwyler wrote:
> In general the client needs to send Cancel MADs and task management
> commands down the same channel as the command(s) intended to cancel or
> abort. The client assigns cancel keys per LUN and thus must send a
> Cancel down each channel commands were submitted for that LUN. Further,
> the client then must wait for those cancel completions prior to
> submitting a LUN RESET or ABORT TASK SET.
> 
> Add a cancel event pointer and cancel rsp iu storage to the
> ibmvfc_sub_queue struct such that the cancel routine can assign a cancel
> event to each applicable queue. When in legacy CRQ mode we fake treating
> it as a subqueue by using a subqueue struct allocated on the stack. Wait
> for completion of each submitted cancel.
> 
> Signed-off-by: Tyrel Datwyler <tyreld@xxxxxxxxxxxxx>
> ---
>  drivers/scsi/ibmvscsi/ibmvfc.c | 104 ++++++++++++++++++++++-----------
>  drivers/scsi/ibmvscsi/ibmvfc.h |  38 ++++++------
>  2 files changed, 90 insertions(+), 52 deletions(-)
> 
> diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
> index ec3db5a6baf3..e353b9e88104 100644
> --- a/drivers/scsi/ibmvscsi/ibmvfc.c
> +++ b/drivers/scsi/ibmvscsi/ibmvfc.c
> @@ -2339,67 +2339,103 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
>  {
>  	struct ibmvfc_host *vhost = shost_priv(sdev->host);
>  	struct ibmvfc_event *evt, *found_evt;
> -	union ibmvfc_iu rsp;
> -	int rsp_rc = -EBUSY;
> +	struct ibmvfc_sub_queue *scrqs;
> +	struct ibmvfc_sub_queue legacy_crq;
> +	int rsp_rc = 0;
>  	unsigned long flags;
>  	u16 status;
> +	int cancel_cnt = 0;
> +	int num_hwq;
> +	int ret = 0;
> +	int i;
>  
>  	ENTER;
>  	spin_lock_irqsave(vhost->host->host_lock, flags);
> -	found_evt = NULL;
> -	list_for_each_entry(evt, &vhost->sent, queue) {
> -		if (evt->cmnd && evt->cmnd->device == sdev) {
> -			found_evt = evt;
> +	if (vhost->using_channels && vhost->scsi_scrqs.active_queues) {
> +		num_hwq = vhost->scsi_scrqs.active_queues;
> +		scrqs = vhost->scsi_scrqs.scrqs;
> +	} else {
> +		/* Use ibmvfc_sub_queue on the stack to fake legacy CRQ as a subqueue */
> +		num_hwq = 1;
> +		scrqs = &legacy_crq;
> +	}
> +
> +	for (i = 0; i < num_hwq; i++) {
> +		scrqs[i].cancel_event = NULL;
> +		found_evt = NULL;
> +		list_for_each_entry(evt, &vhost->sent, queue) {
> +			if (evt->cmnd && evt->cmnd->device == sdev && evt->hwq == i) {
> +				found_evt = evt;
> +				cancel_cnt++;
> +				break;
> +			}
> +		}
> +
> +		if (!found_evt)
> +			continue;
> +
> +		if (vhost->logged_in) {
> +			scrqs[i].cancel_event = ibmvfc_init_tmf(vhost, sdev, type);
> +			scrqs[i].cancel_event->hwq = i;
> +			scrqs[i].cancel_event->sync_iu = &scrqs[i].cancel_rsp;
> +			rsp_rc = ibmvfc_send_event(scrqs[i].cancel_event, vhost, default_timeout);
> +			if (rsp_rc)
> +				break;

It looks like if you have two outstanding commands, on two different hwqs, and you succeed
in sending a cancel for the first hwq but fail sending it for the second hwq due to
something happening like a xport event of some sort, then you would end up falling down
into free_events where you'd call ibmvfc_free_event which will do a list_add_tail to add
the event to the free list without having even pulled the event off the sent list, which
will result in list corruption as now the free list and sent list will be intermingled.
It would probably be better to only free the events if you never sent them or if you
are sure they completed. So, you might need to have to wait for the completion of
the cancel events that did get sent, which would likely be completed via purge_all.


> +		} else {
> +			rsp_rc = -EBUSY;
>  			break;
>  		}
>  	}
>  
> -	if (!found_evt) {
> +	spin_unlock_irqrestore(vhost->host->host_lock, flags);
> +
> +	if (!cancel_cnt) {
>  		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
>  			sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
> -		spin_unlock_irqrestore(vhost->host->host_lock, flags);
>  		return 0;
>  	}
>  
> -	if (vhost->logged_in) {
> -		evt = ibmvfc_init_tmf(vhost, sdev, type);
> -		evt->sync_iu = &rsp;
> -		rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
> -	}
> -
> -	spin_unlock_irqrestore(vhost->host->host_lock, flags);
> -
>  	if (rsp_rc != 0) {
>  		sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
>  		/* If failure is received, the host adapter is most likely going
>  		 through reset, return success so the caller will wait for the command
>  		 being cancelled to get returned */
> -		return 0;
> +		goto free_events;
>  	}
>  
>  	sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
>  
> -	wait_for_completion(&evt->comp);
> -	status = be16_to_cpu(rsp.mad_common.status);
> -	spin_lock_irqsave(vhost->host->host_lock, flags);
> -	ibmvfc_free_event(evt);
> -	spin_unlock_irqrestore(vhost->host->host_lock, flags);
> +	for (i = 0; i < num_hwq; i++) {
> +		if (!scrqs[i].cancel_event)
> +			continue;
>  
> -	if (status != IBMVFC_MAD_SUCCESS) {
> -		sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
> -		switch (status) {
> -		case IBMVFC_MAD_DRIVER_FAILED:
> -		case IBMVFC_MAD_CRQ_ERROR:
> -			/* Host adapter most likely going through reset, return success to
> -			 the caller will wait for the command being cancelled to get returned */
> -			return 0;
> -		default:
> -			return -EIO;
> -		};
> +		wait_for_completion(&scrqs[i].cancel_event->comp);
> +		status = be16_to_cpu(scrqs[i].cancel_rsp.mad_common.status);
> +
> +		if (status != IBMVFC_MAD_SUCCESS) {
> +			sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
> +			switch (status) {
> +			case IBMVFC_MAD_DRIVER_FAILED:
> +			case IBMVFC_MAD_CRQ_ERROR:
> +				/* Host adapter most likely going through reset, return success to
> +				 the caller will wait for the command being cancelled to get returned */
> +				goto free_events;

Similar comment here... What about the rest of the outstanding cancel commands? Do you need
to wait for those to complete before freeing them?

> +			default:
> +				ret = -EIO;
> +				goto free_events;
> +			};
> +		}
>  	}
>  
>  	sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
> -	return 0;
> +free_events:
> +	spin_lock_irqsave(vhost->host->host_lock, flags);
> +	for (i = 0; i < num_hwq; i++)
> +		if (scrqs[i].cancel_event)
> +			ibmvfc_free_event(scrqs[i].cancel_event);
> +	spin_unlock_irqrestore(vhost->host->host_lock, flags);
> +
> +	return ret;
>  }
>  
>  /**



-- 
Brian King
Power Linux I/O
IBM Linux Technology Center




[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Index of Archives]     [SCSI Target Devel]     [Linux SCSI Target Infrastructure]     [Kernel Newbies]     [IDE]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux ATA RAID]     [Linux IIO]     [Samba]     [Device Mapper]

  Powered by Linux