Commit 3580546b authored by Ben Walker's avatar Ben Walker Committed by Jim Harris
Browse files

nvmf: Implement the poll group as an io_channel



It has all the same properties of uniqueness, so
implement it as an io_channel to take advantage
of the other infrastructure for message passing
already available.

Change-Id: I1777b91f0597a5a43ac0d0bbfdf878e874eb04f3
Signed-off-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.gerrithub.io/388291


Reviewed-by: default avatarDaniel Verkamp <daniel.verkamp@intel.com>
Tested-by: default avatarSPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
parent 7b57e9f9
Loading
Loading
Loading
Loading
+123 −46
Original line number Diff line number Diff line
@@ -61,6 +61,53 @@ spdk_nvmf_tgt_opts_init(struct spdk_nvmf_tgt_opts *opts)
	opts->max_io_size = SPDK_NVMF_DEFAULT_MAX_IO_SIZE;
}

static void
spdk_nvmf_poll_group_poll(void *ctx)
{
	struct spdk_nvmf_poll_group *group = ctx;
	int rc;
	struct spdk_nvmf_transport_poll_group *tgroup;

	TAILQ_FOREACH(tgroup, &group->tgroups, link) {
		rc = spdk_nvmf_transport_poll_group_poll(tgroup);
		if (rc < 0) {
			return;
		}
	}
}

static int
spdk_nvmf_tgt_create_poll_group(void *io_device, void *ctx_buf)
{
	struct spdk_nvmf_tgt *tgt = io_device;
	struct spdk_nvmf_poll_group *group = ctx_buf;
	struct spdk_nvmf_transport *transport;

	TAILQ_INIT(&group->tgroups);

	TAILQ_FOREACH(transport, &tgt->transports, link) {
		spdk_nvmf_poll_group_add_transport(group, transport);
	}

	group->poller = spdk_poller_register(spdk_nvmf_poll_group_poll, group, 0);

	return 0;
}

static void
spdk_nvmf_tgt_destroy_poll_group(void *io_device, void *ctx_buf)
{
	struct spdk_nvmf_poll_group *group = ctx_buf;
	struct spdk_nvmf_transport_poll_group *tgroup, *tmp;

	spdk_poller_unregister(&group->poller);

	TAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp) {
		TAILQ_REMOVE(&group->tgroups, tgroup, link);
		spdk_nvmf_transport_poll_group_destroy(tgroup);
	}
}

struct spdk_nvmf_tgt *
spdk_nvmf_tgt_create(struct spdk_nvmf_tgt_opts *opts)
{
@@ -91,6 +138,11 @@ spdk_nvmf_tgt_create(struct spdk_nvmf_tgt_opts *opts)
	tgt->max_sid = 0;
	TAILQ_INIT(&tgt->transports);

	spdk_io_device_register(tgt,
				spdk_nvmf_tgt_create_poll_group,
				spdk_nvmf_tgt_destroy_poll_group,
				sizeof(struct spdk_nvmf_poll_group));

	SPDK_DEBUGLOG(SPDK_TRACE_NVMF, "Max Queue Pairs Per Controller: %d\n",
		      tgt->opts.max_qpairs_per_ctrlr);
	SPDK_DEBUGLOG(SPDK_TRACE_NVMF, "Max Queue Depth: %d\n", tgt->opts.max_queue_depth);
@@ -122,6 +174,30 @@ spdk_nvmf_tgt_destroy(struct spdk_nvmf_tgt *tgt)
	free(tgt);
}

struct spdk_nvmf_tgt_listen_ctx {
	struct spdk_nvmf_transport *transport;
	struct spdk_nvme_transport_id trid;
};

static void
spdk_nvmf_tgt_listen_done(void *io_device, void *c, int status)
{
	free(c);
}

static int
spdk_nvmf_tgt_listen_add_transport(void *io_device,
				   struct spdk_io_channel *ch,
				   void *c)
{
	struct spdk_nvmf_tgt_listen_ctx *ctx = c;
	struct spdk_nvmf_poll_group *group;

	group = spdk_io_channel_get_ctx(ch);

	return spdk_nvmf_poll_group_add_transport(group, ctx->transport);
}

int
spdk_nvmf_tgt_listen(struct spdk_nvmf_tgt *tgt,
		     struct spdk_nvme_transport_id *trid)
@@ -131,19 +207,39 @@ spdk_nvmf_tgt_listen(struct spdk_nvmf_tgt *tgt,

	transport = spdk_nvmf_tgt_get_transport(tgt, trid->trtype);
	if (!transport) {
		struct spdk_nvmf_tgt_listen_ctx *ctx;

		transport = spdk_nvmf_transport_create(tgt, trid->trtype);
		if (!transport) {
			SPDK_ERRLOG("Transport initialization failed\n");
			return -EINVAL;
		}
		TAILQ_INSERT_TAIL(&tgt->transports, transport, link);

		ctx = calloc(1, sizeof(*ctx));
		if (!ctx) {
			return -ENOMEM;
		}

		ctx->trid = *trid;
		ctx->transport = transport;

		/* Send a message to each poll group to notify it that a new transport
		 * is available.
		 * TODO: This call does not currently allow the user to wait for these
		 * messages to propagate. It also does not protect against two calls
		 * to this function overlapping
		 */
		spdk_for_each_channel(tgt,
				      spdk_nvmf_tgt_listen_add_transport,
				      ctx,
				      spdk_nvmf_tgt_listen_done);
	}

	rc = spdk_nvmf_transport_listen(transport, trid);
	if (rc < 0) {
		SPDK_ERRLOG("Unable to listen on address '%s'\n", trid->traddr);
		return -EINVAL;
		return rc;
	}

	tgt->discovery_genctr++;
@@ -199,63 +295,27 @@ spdk_nvmf_tgt_accept(struct spdk_nvmf_tgt *tgt)
	}
}

static void
spdk_nvmf_poll_group_poll(void *ctx)
{
	struct spdk_nvmf_poll_group *group = ctx;
	int rc;
	struct spdk_nvmf_transport_poll_group *tgroup;

	TAILQ_FOREACH(tgroup, &group->tgroups, link) {
		rc = spdk_nvmf_transport_poll_group_poll(tgroup);
		if (rc < 0) {
			return;
		}
	}
}

struct spdk_nvmf_poll_group *
spdk_nvmf_poll_group_create(struct spdk_nvmf_tgt *tgt)
{
	struct spdk_nvmf_poll_group *group;
	struct spdk_nvmf_transport *transport;
	struct spdk_nvmf_transport_poll_group *tgroup;
	struct spdk_io_channel *ch;

	group = calloc(1, sizeof(*group));
	if (!group) {
	ch = spdk_get_io_channel(tgt);
	if (!ch) {
		SPDK_ERRLOG("Unable to get I/O channel for target\n");
		return NULL;
	}

	TAILQ_INIT(&group->tgroups);

	TAILQ_FOREACH(transport, &tgt->transports, link) {
		tgroup = spdk_nvmf_transport_poll_group_create(transport);
		if (!tgroup) {
			SPDK_ERRLOG("Unable to create poll group for transport\n");
			continue;
		}

		TAILQ_INSERT_TAIL(&group->tgroups, tgroup, link);
	}

	group->poller = spdk_poller_register(spdk_nvmf_poll_group_poll, group, 0);

	return group;
	return spdk_io_channel_get_ctx(ch);
}

void
spdk_nvmf_poll_group_destroy(struct spdk_nvmf_poll_group *group)
{
	struct spdk_nvmf_transport_poll_group *tgroup, *tmp;

	spdk_poller_unregister(&group->poller);

	TAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp) {
		TAILQ_REMOVE(&group->tgroups, tgroup, link);
		spdk_nvmf_transport_poll_group_destroy(tgroup);
	}
	struct spdk_io_channel *ch;

	free(group);
	ch = spdk_io_channel_from_ctx(group);
	spdk_put_io_channel(ch);
}

int
@@ -292,6 +352,23 @@ spdk_nvmf_poll_group_remove(struct spdk_nvmf_poll_group *group,
	return rc;
}

int
spdk_nvmf_poll_group_add_transport(struct spdk_nvmf_poll_group *group,
				   struct spdk_nvmf_transport *transport)
{
	struct spdk_nvmf_transport_poll_group *tgroup;

	tgroup = spdk_nvmf_transport_poll_group_create(transport);
	if (!tgroup) {
		SPDK_ERRLOG("Unable to create poll group for transport\n");
		return -1;
	}

	TAILQ_INSERT_TAIL(&group->tgroups, tgroup, link);

	return 0;
}

SPDK_TRACE_REGISTER_FN(nvmf_trace)
{
	spdk_trace_register_object(OBJECT_NVMF_IO, 'r');
+2 −0
Original line number Diff line number Diff line
@@ -205,6 +205,8 @@ int spdk_nvmf_poll_group_add(struct spdk_nvmf_poll_group *group,
			     struct spdk_nvmf_qpair *qpair);
int spdk_nvmf_poll_group_remove(struct spdk_nvmf_poll_group *group,
				struct spdk_nvmf_qpair *qpair);
int spdk_nvmf_poll_group_add_transport(struct spdk_nvmf_poll_group *group,
				       struct spdk_nvmf_transport *transport);

void spdk_nvmf_request_exec(struct spdk_nvmf_request *req);
int spdk_nvmf_request_complete(struct spdk_nvmf_request *req);