Commit 6e94e7fd authored by Yankun Li's avatar Yankun Li Committed by Tomasz Zawadzki
Browse files

lib/reduce: merge consecutive IO requests



if chunk_size is 16KB, backing_io_unit is 4KB, when reading
or writing a 16kb of data, it is split into four small IO
requests.
In this patch, if the access area is adjacent, the small IO
is combined into a single IO request, reducing IO latency.

Change-Id: Id8c91854679fcb7f1706a3314dbbe168b4d4f501
Signed-off-by: default avatarYankun Li <845245370@qq.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/23970


Reviewed-by: default avatarJim Harris <jim.harris@samsung.com>
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarAleksey Marchuk <alexeymar@nvidia.com>
Community-CI: Mellanox Build Bot
parent dfaa72f9
Loading
Loading
Loading
Loading
+80 −2
Original line number Diff line number Diff line
@@ -1068,8 +1068,13 @@ _write_write_done(void *_req, int reduce_errno)
	_reduce_vol_complete_req(req, 0);
}

struct reduce_merged_io_desc {
	uint64_t io_unit_index;
	uint32_t num_io_units;
};

static void
_issue_backing_ops(struct spdk_reduce_vol_request *req, struct spdk_reduce_vol *vol,
_issue_backing_ops_without_merge(struct spdk_reduce_vol_request *req, struct spdk_reduce_vol *vol,
				 reduce_request_fn next_fn, bool is_write)
{
	struct iovec *iov;
@@ -1102,6 +1107,79 @@ _issue_backing_ops(struct spdk_reduce_vol_request *req, struct spdk_reduce_vol *
	}
}

static void
_issue_backing_ops(struct spdk_reduce_vol_request *req, struct spdk_reduce_vol *vol,
		   reduce_request_fn next_fn, bool is_write)
{
	struct iovec *iov;
	struct reduce_merged_io_desc merged_io_desc[4];
	uint8_t *buf;
	bool merge = false;
	uint32_t num_io = 0;
	uint32_t io_unit_counts = 0;
	uint32_t merged_io_idx = 0;
	uint32_t i;

	/* The merged_io_desc value is defined here to contain four elements,
	 * and the chunk size must be four times the maximum of the io unit.
	 * if chunk size is too big, don't merge IO.
	 */
	if (vol->backing_io_units_per_chunk > 4) {
		_issue_backing_ops_without_merge(req, vol, next_fn, is_write);
		return;
	}

	if (req->chunk_is_compressed) {
		iov = req->comp_buf_iov;
		buf = req->comp_buf;
	} else {
		iov = req->decomp_buf_iov;
		buf = req->decomp_buf;
	}

	for (i = 0; i < req->num_io_units; i++) {
		if (!merge) {
			merged_io_desc[merged_io_idx].io_unit_index = req->chunk->io_unit_index[i];
			merged_io_desc[merged_io_idx].num_io_units = 1;
			num_io++;
		}

		if (i + 1 == req->num_io_units) {
			break;
		}

		if (req->chunk->io_unit_index[i] + 1 == req->chunk->io_unit_index[i + 1]) {
			merged_io_desc[merged_io_idx].num_io_units += 1;
			merge = true;
			continue;
		}
		merge = false;
		merged_io_idx++;
	}

	req->num_backing_ops = num_io;
	req->backing_cb_args.cb_fn = next_fn;
	req->backing_cb_args.cb_arg = req;
	for (i = 0; i < num_io; i++) {
		iov[i].iov_base = buf + io_unit_counts * vol->params.backing_io_unit_size;
		iov[i].iov_len = vol->params.backing_io_unit_size * merged_io_desc[i].num_io_units;
		if (is_write) {
			vol->backing_dev->writev(vol->backing_dev, &iov[i], 1,
						 merged_io_desc[i].io_unit_index * vol->backing_lba_per_io_unit,
						 vol->backing_lba_per_io_unit * merged_io_desc[i].num_io_units,
						 &req->backing_cb_args);
		} else {
			vol->backing_dev->readv(vol->backing_dev, &iov[i], 1,
						merged_io_desc[i].io_unit_index * vol->backing_lba_per_io_unit,
						vol->backing_lba_per_io_unit * merged_io_desc[i].num_io_units,
						&req->backing_cb_args);
		}

		/* Collects the number of processed I/O. */
		io_unit_counts += merged_io_desc[i].num_io_units;
	}
}

static void
_reduce_vol_write_chunk(struct spdk_reduce_vol_request *req, reduce_request_fn next_fn,
			uint32_t compressed_size)