Re: [PATCH 07/14] block: change plugging to use a singly linked list

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Sat, Oct 16, 2021 at 07:37:41PM -0600, Jens Axboe wrote:
> Use a singly linked list for the blk_plug. This saves 8 bytes in the
> blk_plug struct, and makes for faster list manipulations than doubly
> linked lists. As we don't use the doubly linked lists for anything,
> singly linked is just fine.

This patch also does a few other thing, like changing the same_queue_rq
type and adding a has_elevator flag to the plug.  Can you please split
this out and document the changes better?

>static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule)
>  {
> +	struct blk_mq_hw_ctx *hctx = NULL;
> +	int queued = 0;
> +	int errors = 0;
> +
> +	while (!rq_list_empty(plug->mq_list)) {
> +		struct request *rq;
> +		blk_status_t ret;
> +
> +		rq = rq_list_pop(&plug->mq_list);
>  
> +		if (!hctx)
> +			hctx = rq->mq_hctx;
> +		else if (hctx != rq->mq_hctx && hctx->queue->mq_ops->commit_rqs) {
> +			trace_block_unplug(hctx->queue, queued, !from_schedule);
> +			hctx->queue->mq_ops->commit_rqs(hctx);
> +			queued = 0;
> +			hctx = rq->mq_hctx;
> +		}
> +
> +		ret = blk_mq_request_issue_directly(rq,
> +						rq_list_empty(plug->mq_list));
> +		if (ret != BLK_STS_OK) {
> +			if (ret == BLK_STS_RESOURCE ||
> +					ret == BLK_STS_DEV_RESOURCE) {
> +				blk_mq_request_bypass_insert(rq, false,
> +						rq_list_empty(plug->mq_list));
> +				break;
> +			}
> +			blk_mq_end_request(rq, ret);
> +			errors++;
> +		} else
> +			queued++;
> +	}

This all looks a bit messy to me.  I'd suggest reordering this a bit
including a new helper:

static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int *queued,
		bool from_schedule)
{
	if (hctx->queue->mq_ops->commit_rqs) {
		trace_block_unplug(hctx->queue, *queued, !from_schedule);
		hctx->queue->mq_ops->commit_rqs(hctx);
	}
	*queued = 0;
}

static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule)
{
	struct blk_mq_hw_ctx *hctx = NULL;
	int queued = 0;
	int errors = 0;

	while ((rq = rq_list_pop(&plug->mq_list))) {
		bool last = rq_list_empty(plug->mq_list);
		blk_status_t ret;

		if (hctx != rq->mq_hctx) {
			if (hctx)
				blk_mq_commit_rqs(hctx, &queued, from_schedule);
			hctx = rq->mq_hctx;
		}

		ret = blk_mq_request_issue_directly(rq, last);
		switch (ret) {
		case BLK_STS_OK:
			queued++;
			break;
		case BLK_STS_RESOURCE:
		case BLK_STS_DEV_RESOURCE:
			blk_mq_request_bypass_insert(rq, false, last);
			blk_mq_commit_rqs(hctx, &queued, from_schedule);
			return;
		default:
			blk_mq_end_request(rq, ret);
			errors++;
			break;
		}
	}

	/*
	 * If we didn't flush the entire list, we could have told the driver
	 * there was more coming, but that turned out to be a lie.
	 */
	if (errors)
		blk_mq_commit_rqs(hctx, &queued, from_schedule);
}



[Index of Archives]     [Linux RAID]     [Linux SCSI]     [Linux ATA RAID]     [IDE]     [Linux Wireless]     [Linux Kernel]     [ATH6KL]     [Linux Bluetooth]     [Linux Netdev]     [Kernel Newbies]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Device Mapper]

  Powered by Linux