Follow the normal path for requests and queue through the block layer.
We hold the qc pointer in the scmd host scribble, which is less than
ideal. In future we can hold the ata_queued_cmd in the scmd priv_data,
allowing for easy lookup.
We need to use the shost sdev to queue these requests. That is because we
initially do not have the target disk scsi_device allocated yet when
queueing internal commands. This is less than ideal, and makes it hard for
libsas to know when queueing an internal command whether it is a ATA
internal command or not.
Also make ata_exec_internal_sg() static - there are no users outside
libata-core.c
Signed-off-by: John Garry <john.garry@xxxxxxxxxx>
---
drivers/ata/libata-core.c | 137 +++++++++++++++++++++-----------------
drivers/ata/libata-sata.c | 5 +-
drivers/ata/libata.h | 4 --
3 files changed, 80 insertions(+), 66 deletions(-)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 40e816419f48..6b1aaeccb253 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1438,9 +1438,18 @@ unsigned long ata_id_xfermask(const u16 *id)
}
EXPORT_SYMBOL_GPL(ata_id_xfermask);
-static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
+void ata_qc_complete_internal(struct ata_queued_cmd *qc)
{
- struct completion *waiting = qc->private_data;
+ struct scsi_cmnd *scmd = qc->scsicmd;
+
+ scsi_done(scmd);
+}
+
+static void ata_internal_end_rq(struct request *rq, blk_status_t error)
+{
+ struct completion *waiting = rq->end_io_data;
+
+ rq->end_io_data = (void *)(uintptr_t)error;
complete(waiting);
}
@@ -1467,52 +1476,74 @@ static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
* RETURNS:
* Zero on success, AC_ERR_* mask on failure
*/
-unsigned ata_exec_internal_sg(struct ata_device *dev,
+static unsigned ata_exec_internal_sg(struct ata_device *dev,
struct ata_taskfile *tf, const u8 *cdb,
- int dma_dir, struct scatterlist *sgl,
- unsigned int n_elem, unsigned long timeout)
+ int dma_dir, void *buf, unsigned int buflen,
+ unsigned long timeout)
{
struct ata_link *link = dev->link;
struct ata_port *ap = link->ap;
+ struct Scsi_Host *scsi_host = ap->scsi_host;
+ struct scsi_device *sdev = scsi_host->sdev;
u8 command = tf->command;
int auto_timeout = 0;
struct ata_queued_cmd *qc;
- unsigned int preempted_tag;
- u32 preempted_sactive;
- u64 preempted_qc_active;
- int preempted_nr_active_links;
DECLARE_COMPLETION_ONSTACK(wait);
unsigned long flags;
unsigned int err_mask;
+ struct scsi_cmnd *scmd;
+ struct request *req;
int rc;
- spin_lock_irqsave(ap->lock, flags);
+ /*
+ * We only support a single reserved command, so this guarantees
+ * serialization. However the code already assumed that (we are
+ * serialized here per-port).
+ */
+ req = scsi_alloc_request(sdev->request_queue,
+ dma_dir == DMA_TO_DEVICE ?
+ REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
+ BLK_MQ_REQ_RESERVED);
+ if (IS_ERR(req))
+ return AC_ERR_OTHER;
- /* no internal command while frozen */
- if (ap->pflags & ATA_PFLAG_FROZEN) {
- spin_unlock_irqrestore(ap->lock, flags);
- return AC_ERR_SYSTEM;
+
+ if (!timeout) {
+ if (ata_probe_timeout)
+ timeout = ata_probe_timeout * 1000;
+ else {
+ timeout = ata_internal_cmd_timeout(dev, command);
+ auto_timeout = 1;
+ }
}
- /* initialize internal qc */
+ scmd = blk_mq_rq_to_pdu(req);
+ scmd->allowed = 0;
+ req->timeout = timeout;
+ //TODO: Hook up timeout handler
+ req->rq_flags |= RQF_QUIET;
+ scmd->device = sdev;
qc = __ata_qc_from_tag(ap, ATA_TAG_INTERNAL);
+ /* Do this until we can hold ata_queued_cmd in the SCMD priv data */
+ scmd->host_scribble = (unsigned char *)qc;
+
+ if (buflen) {
+ int ret = blk_rq_map_kern(sdev->request_queue, req,
+ buf, buflen, GFP_NOIO);
+ if (ret) {
+ blk_mq_free_request(req);
+ return AC_ERR_OTHER;
+ }
+ }
+
qc->tag = ATA_TAG_INTERNAL;
qc->hw_tag = 0;
- qc->scsicmd = NULL;
+ qc->scsicmd = scmd;
qc->ap = ap;
qc->dev = dev;
ata_qc_reinit(qc);
- preempted_tag = link->active_tag;
- preempted_sactive = link->sactive;
- preempted_qc_active = ap->qc_active;
- preempted_nr_active_links = ap->nr_active_links;
- link->active_tag = ATA_TAG_POISON;
- link->sactive = 0;
- ap->qc_active = 0;
- ap->nr_active_links = 0;
-
/* prepare & issue qc */
qc->tf = *tf;
if (cdb)
@@ -1525,32 +1556,14 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
qc->flags |= ATA_QCFLAG_RESULT_TF;
qc->dma_dir = dma_dir;
- if (dma_dir != DMA_NONE) {
- unsigned int i, buflen = 0;
- struct scatterlist *sg;
-
- for_each_sg(sgl, sg, n_elem, i)
- buflen += sg->length;
-
- ata_sg_init(qc, sgl, n_elem);
- qc->nbytes = buflen;
- }
- qc->private_data = &wait;
+ qc->private_data = ap;
qc->complete_fn = ata_qc_complete_internal;
- ata_qc_issue(qc);
-
- spin_unlock_irqrestore(ap->lock, flags);
+ req->end_io_data = &wait;
+ req->end_io = ata_internal_end_rq;
- if (!timeout) {
- if (ata_probe_timeout)
- timeout = ata_probe_timeout * 1000;
- else {
- timeout = ata_internal_cmd_timeout(dev, command);
- auto_timeout = 1;
- }
- }