Commit 3616be85 authored by Jim Harris's avatar Jim Harris Committed by Konrad Sztyber
Browse files

examples/nvme/perf: connect io qpairs asynchronously



This significantly speeds up testing with high connection
workloads (i.e. -P 64) with TCP especially.  We already
set async_mode=true all of the time for the bdev/nvme
module, so there's no reason we shouldn't do it in
perf too.

After allocating all of the IO qpairs, busy poll the
poll group, using the new spdk_nvme_poll_group_all_connected()
API to ensure the qpairs are all connected before proceeding
with I/O.

Signed-off-by: default avatarJim Harris <james.r.harris@intel.com>
Change-Id: If0c3c944cd5f3d87170a5bbf7d766ac1a4dcef7c
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/17578


Reviewed-by: default avatarShuhei Matsumoto <smatsumoto@nvidia.com>
Reviewed-by: default avatarKonrad Sztyber <konrad.sztyber@intel.com>
Community-CI: Mellanox Build Bot
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarAleksey Marchuk <alexeymar@nvidia.com>
parent 366aabdf
Loading
Loading
Loading
Loading
+19 −2
Original line number Diff line number Diff line
@@ -983,7 +983,8 @@ nvme_init_ns_worker_ctx(struct ns_worker_ctx *ns_ctx)
	struct ns_entry *entry = ns_ctx->entry;
	struct spdk_nvme_poll_group *group;
	struct spdk_nvme_qpair *qpair;
	int i;
	uint64_t poll_timeout_tsc;
	int i, rc;

	ns_ctx->u.nvme.num_active_qpairs = g_nr_io_queues_per_ns;
	ns_ctx->u.nvme.num_all_qpairs = g_nr_io_queues_per_ns + g_nr_unused_io_queues;
@@ -998,6 +999,7 @@ nvme_init_ns_worker_ctx(struct ns_worker_ctx *ns_ctx)
	}
	opts.delay_cmd_submit = true;
	opts.create_only = true;
	opts.async_mode = true;

	ns_ctx->u.nvme.group = spdk_nvme_poll_group_create(NULL, NULL);
	if (ns_ctx->u.nvme.group == NULL) {
@@ -1027,7 +1029,22 @@ nvme_init_ns_worker_ctx(struct ns_worker_ctx *ns_ctx)
		}
	}

	/* Busy poll here until all qpairs are connected - this ensures once we start
	 * I/O we aren't still waiting for some qpairs to connect. Limit the poll to
	 * 10 seconds though.
	 */
	poll_timeout_tsc = spdk_get_ticks() + 10 * spdk_get_ticks_hz();
	rc = -EAGAIN;
	while (spdk_get_ticks() < poll_timeout_tsc && rc == -EAGAIN) {
		spdk_nvme_poll_group_process_completions(group, 0, perf_disconnect_cb);
		rc = spdk_nvme_poll_group_all_connected(group);
		if (rc == 0) {
			return 0;
		}
	}

	/* If we reach here, it means we either timed out, or some connection failed. */
	assert(spdk_get_ticks() > poll_timeout_tsc || rc == -EIO);

qpair_failed:
	for (; i > 0; --i) {