[PATCH 1/2] mmc_block: factor out the mmc request handling

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Pierre Ossman <drzeus@xxxxxxxxx>

Move the handling of the actual MMC request to its own function so
that the main request handler can be extended to handle other types
of requests than simple reads and writes.

Signed-off-by: Pierre Ossman <drzeus@xxxxxxxxx>

diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 86dbb36..3140e92 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -208,195 +208,215 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
 	return blocks;
 }
 
-static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
+static int mmc_blk_xfer_rq(struct mmc_blk_data *md,
+	struct request *req, unsigned int *bytes_xfered)
 {
-	struct mmc_blk_data *md = mq->data;
-	struct mmc_card *card = md->queue.card;
+	struct mmc_card *card;
+
 	struct mmc_blk_request brq;
-	int ret = 1, data_size, i;
+	int ret, data_size, i;
 	struct scatterlist *sg;
+	u32 readcmd, writecmd;
 
-	mmc_claim_host(card->host);
+	BUG_ON(!bytes_xfered);
 
-	do {
-		struct mmc_command cmd;
-		u32 readcmd, writecmd;
-
-		memset(&brq, 0, sizeof(struct mmc_blk_request));
-		brq.mrq.cmd = &brq.cmd;
-		brq.mrq.data = &brq.data;
-
-		brq.cmd.arg = req->sector;
-		if (!mmc_card_blockaddr(card))
-			brq.cmd.arg <<= 9;
-		brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
-		brq.data.blksz = 1 << md->block_bits;
-		brq.stop.opcode = MMC_STOP_TRANSMISSION;
-		brq.stop.arg = 0;
-		brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
-		brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
-		if (brq.data.blocks > card->host->max_blk_count)
-			brq.data.blocks = card->host->max_blk_count;
-
-		if (brq.data.blocks > 1) {
-			/* SPI multiblock writes terminate using a special
-			 * token, not a STOP_TRANSMISSION request.
-			 */
-			if (!mmc_host_is_spi(card->host)
-					|| rq_data_dir(req) == READ)
-				brq.mrq.stop = &brq.stop;
-			readcmd = MMC_READ_MULTIPLE_BLOCK;
-			writecmd = MMC_WRITE_MULTIPLE_BLOCK;
-		} else {
-			brq.mrq.stop = NULL;
-			readcmd = MMC_READ_SINGLE_BLOCK;
-			writecmd = MMC_WRITE_BLOCK;
-		}
-
-		if (rq_data_dir(req) == READ) {
-			brq.cmd.opcode = readcmd;
-			brq.data.flags |= MMC_DATA_READ;
-		} else {
-			brq.cmd.opcode = writecmd;
-			brq.data.flags |= MMC_DATA_WRITE;
-		}
+	card = md->queue.card;
 
-		mmc_set_data_timeout(&brq.data, card);
+	memset(&brq, 0, sizeof(struct mmc_blk_request));
+	brq.mrq.cmd = &brq.cmd;
+	brq.mrq.data = &brq.data;
 
-		brq.data.sg = mq->sg;
-		brq.data.sg_len = mmc_queue_map_sg(mq);
-
-		mmc_queue_bounce_pre(mq);
+	brq.cmd.arg = req->sector;
+	if (!mmc_card_blockaddr(card))
+		brq.cmd.arg <<= 9;
+	brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+	brq.data.blksz = 1 << md->block_bits;
+	brq.stop.opcode = MMC_STOP_TRANSMISSION;
+	brq.stop.arg = 0;
+	brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+	brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
+	if (brq.data.blocks > card->host->max_blk_count)
+		brq.data.blocks = card->host->max_blk_count;
 
+	if (brq.data.blocks > 1) {
 		/*
-		 * Adjust the sg list so it is the same size as the
-		 * request.
+		 * SPI multiblock writes terminate using a special
+		 * token, not a STOP_TRANSMISSION request.
 		 */
-		if (brq.data.blocks !=
-		    (req->nr_sectors >> (md->block_bits - 9))) {
-			data_size = brq.data.blocks * brq.data.blksz;
-			for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
-				data_size -= sg->length;
-				if (data_size <= 0) {
-					sg->length += data_size;
-					i++;
-					break;
-				}
-			}
-			brq.data.sg_len = i;
-		}
+		if (!mmc_host_is_spi(card->host)
+				|| rq_data_dir(req) == READ)
+			brq.mrq.stop = &brq.stop;
+		readcmd = MMC_READ_MULTIPLE_BLOCK;
+		writecmd = MMC_WRITE_MULTIPLE_BLOCK;
+	} else {
+		brq.mrq.stop = NULL;
+		readcmd = MMC_READ_SINGLE_BLOCK;
+		writecmd = MMC_WRITE_BLOCK;
+	}
 
-		mmc_wait_for_req(card->host, &brq.mrq);
+	if (rq_data_dir(req) == READ) {
+		brq.cmd.opcode = readcmd;
+		brq.data.flags |= MMC_DATA_READ;
+	} else {
+		brq.cmd.opcode = writecmd;
+		brq.data.flags |= MMC_DATA_WRITE;
+	}
 
-		mmc_queue_bounce_post(mq);
+	mmc_set_data_timeout(&brq.data, card);
 
-		/*
-		 * Check for errors here, but don't jump to cmd_err
-		 * until later as we need to wait for the card to leave
-		 * programming mode even when things go wrong.
-		 */
-		if (brq.cmd.error) {
-			printk(KERN_ERR "%s: error %d sending read/write command\n",
-			       req->rq_disk->disk_name, brq.cmd.error);
-		}
+	brq.data.sg = md->queue.sg;
+	brq.data.sg_len = mmc_queue_map_sg(&md->queue);
 
-		if (brq.data.error) {
-			printk(KERN_ERR "%s: error %d transferring data\n",
-			       req->rq_disk->disk_name, brq.data.error);
-		}
+	mmc_queue_bounce_pre(&md->queue);
 
-		if (brq.stop.error) {
-			printk(KERN_ERR "%s: error %d sending stop command\n",
-			       req->rq_disk->disk_name, brq.stop.error);
+	/*
+	 * Adjust the sg list so it is the same size as the
+	 * request.
+	 */
+	if (brq.data.blocks !=
+	    (req->nr_sectors >> (md->block_bits - 9))) {
+		data_size = brq.data.blocks * brq.data.blksz;
+		for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
+			data_size -= sg->length;
+			if (data_size <= 0) {
+				sg->length += data_size;
+				i++;
+				break;
+			}
 		}
+		brq.data.sg_len = i;
+	}
 
-		if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
-			do {
-				int err;
-
-				cmd.opcode = MMC_SEND_STATUS;
-				cmd.arg = card->rca << 16;
-				cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
-				err = mmc_wait_for_cmd(card->host, &cmd, 5);
-				if (err) {
-					printk(KERN_ERR "%s: error %d requesting status\n",
-					       req->rq_disk->disk_name, err);
-					goto cmd_err;
-				}
-				/*
-				 * Some cards mishandle the status bits,
-				 * so make sure to check both the busy
-				 * indication and the card state.
-				 */
-			} while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
-				(R1_CURRENT_STATE(cmd.resp[0]) == 7));
-
-#if 0
-			if (cmd.resp[0] & ~0x00000900)
-				printk(KERN_ERR "%s: status = %08x\n",
-				       req->rq_disk->disk_name, cmd.resp[0]);
-			if (mmc_decode_status(cmd.resp))
-				goto cmd_err;
-#endif
-		}
+	mmc_wait_for_req(card->host, &brq.mrq);
 
-		if (brq.cmd.error || brq.data.error || brq.stop.error)
-			goto cmd_err;
+	mmc_queue_bounce_post(&md->queue);
 
-		/*
-		 * A block was successfully transferred.
-		 */
-		spin_lock_irq(&md->lock);
-		ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
-		spin_unlock_irq(&md->lock);
-	} while (ret);
+	ret = 0;
+	*bytes_xfered = brq.data.bytes_xfered;
 
-	mmc_release_host(card->host);
+	if (brq.cmd.error) {
+		ret = brq.cmd.error;
+		printk(KERN_ERR "%s: error %d sending read/write command\n",
+		       req->rq_disk->disk_name, brq.cmd.error);
+	}
 
-	return 1;
+	if (brq.data.error) {
+		ret = brq.cmd.error;
+		printk(KERN_ERR "%s: error %d transferring data\n",
+		       req->rq_disk->disk_name, brq.data.error);
+	}
 
- cmd_err:
- 	/*
- 	 * If this is an SD card and we're writing, we can first
- 	 * mark the known good sectors as ok.
- 	 *
-	 * If the card is not SD, we can still ok written sectors
-	 * as reported by the controller (which might be less than
-	 * the real number of written sectors, but never more).
-	 *
-	 * For reads we just fail the entire chunk as that should
-	 * be safe in all cases.
+	if (brq.stop.error) {
+		ret = brq.cmd.error;
+		printk(KERN_ERR "%s: error %d sending stop command\n",
+		       req->rq_disk->disk_name, brq.stop.error);
+	}
+
+	/*
+	 * We need to wait for the card to leave programming mode
+	 * even when things go wrong.
 	 */
-	if (rq_data_dir(req) != READ) {
-		if (mmc_card_sd(card)) {
+	if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
+		int cmd_ret;
+		struct mmc_command cmd;
+
+		do {
+			cmd.opcode = MMC_SEND_STATUS;
+			cmd.arg = card->rca << 16;
+			cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+			cmd_ret = mmc_wait_for_cmd(card->host, &cmd, 5);
+			if (cmd_ret) {
+				printk(KERN_ERR "%s: error %d requesting status\n",
+				       req->rq_disk->disk_name, cmd_ret);
+				ret = cmd_ret;
+				break;
+			}
+			/*
+			 * Some cards mishandle the status bits,
+			 * so make sure to check both the busy
+			 * indication and the card state.
+			 */
+		} while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
+			(R1_CURRENT_STATE(cmd.resp[0]) == 7));
+	}
+
+	/*
+	 * Adjust the number of bytes transferred if there has been
+	 * an error...
+	 */
+	if (ret) {
+		/*
+		 * For reads we just fail the entire chunk as that should
+		 * be safe in all cases.
+		 *
+		 * If this is an SD card and we're writing, we can ask the
+		 * card for known good sectors.
+		 *
+		 * If the card is not SD, we can still ok written sectors
+		 * as reported by the controller (which might be less than
+		 * the real number of written sectors, but never more).
+		 */
+		if (rq_data_dir(req) == READ)
+			*bytes_xfered = 0;
+		else if (mmc_card_sd(card)) {
 			u32 blocks;
-			unsigned int bytes;
 
 			blocks = mmc_sd_num_wr_blocks(card);
-			if (blocks != (u32)-1) {
+			if (blocks == (u32)-1)
+				*bytes_xfered = 0;
+			else {
 				if (card->csd.write_partial)
-					bytes = blocks << md->block_bits;
+					*bytes_xfered = blocks << md->block_bits;
 				else
-					bytes = blocks << 9;
-				spin_lock_irq(&md->lock);
-				ret = __blk_end_request(req, 0, bytes);
-				spin_unlock_irq(&md->lock);
+					*bytes_xfered = blocks << 9;
 			}
-		} else {
+		}
+	}
+
+	return ret;
+}
+
+static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
+{
+	struct mmc_blk_data *md = mq->data;
+	struct mmc_card *card = md->queue.card;
+	int ret, err, bytes_xfered;
+
+	mmc_claim_host(card->host);
+
+	do {
+		err = mmc_blk_xfer_rq(md, req, &bytes_xfered);
+
+		/*
+		 * First handle the sectors that got transferred
+		 * successfully...
+		 */
+		spin_lock_irq(&md->lock);
+		ret = __blk_end_request(req, 0, bytes_xfered);
+		spin_unlock_irq(&md->lock);
+
+		/*
+		 * ...then check if things went south.
+		 */
+		if (err) {
+			mmc_release_host(card->host);
+
+			/*
+			 * Kill of the rest of the request...
+			 */
 			spin_lock_irq(&md->lock);
-			ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
+			while (ret)
+				ret = __blk_end_request(req, -EIO,
+					blk_rq_cur_bytes(req));
 			spin_unlock_irq(&md->lock);
+
+			return 0;
 		}
-	}
+	} while (ret);
 
 	mmc_release_host(card->host);
 
-	spin_lock_irq(&md->lock);
-	while (ret)
-		ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
-	spin_unlock_irq(&md->lock);
-
-	return 0;
+	return 1;
 }
 

-- 
     -- Pierre Ossman

  Linux kernel, MMC maintainer        http://www.kernel.org
  rdesktop, core developer          http://www.rdesktop.org

  WARNING: This correspondence is being monitored by the
  Swedish government. Make sure your server uses encryption
  for SMTP traffic and consider using PGP for end-to-end
  encryption.
--
To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [Linux Ext4 Filesystem]     [Union Filesystem]     [Filesystem Testing]     [Ceph Users]     [Ecryptfs]     [AutoFS]     [Kernel Newbies]     [Share Photos]     [Security]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux Cachefs]     [Reiser Filesystem]     [Linux RAID]     [Samba]     [Device Mapper]     [CEPH Development]
  Powered by Linux