Commit aac967c0 authored by Jim Harris's avatar Jim Harris
Browse files

lib/nvmf: create pollers for each transport poll group



Having more granular pollers makes it easier to detect and report
whether any given poller has pending work in progress.

Signed-off-by: default avatarJim Harris <jim.harris@samsung.com>
Change-Id: I640eca2c702ac07eec1b84e3f541564fd0d44a12
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/25184


Community-CI: Mellanox Build Bot
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarAleksey Marchuk <alexeymar@nvidia.com>
Community-CI: Community CI Samsung <spdk.community.ci.samsung@gmail.com>
Reviewed-by: default avatarBen Walker <ben@nvidia.com>
parent b8c964e2
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -195,12 +195,12 @@ struct spdk_nvmf_transport_poll_group {
	STAILQ_HEAD(, spdk_nvmf_request)				pending_buf_queue;
	struct spdk_iobuf_channel					*buf_cache;
	struct spdk_nvmf_poll_group					*group;
	struct spdk_poller						*poller;
	TAILQ_ENTRY(spdk_nvmf_transport_poll_group)			link;
};

struct spdk_nvmf_poll_group {
	struct spdk_thread				*thread;
	struct spdk_poller				*poller;

	TAILQ_HEAD(, spdk_nvmf_transport_poll_group)	tgroups;

+8 −27
Original line number Diff line number Diff line
@@ -145,25 +145,6 @@ nvmf_qpair_set_state(struct spdk_nvmf_qpair *qpair,
	qpair->state = state;
}

static int
nvmf_poll_group_poll(void *ctx)
{
	struct spdk_nvmf_poll_group *group = ctx;
	int rc;
	int count = 0;
	struct spdk_nvmf_transport_poll_group *tgroup;

	TAILQ_FOREACH(tgroup, &group->tgroups, link) {
		rc = nvmf_transport_poll_group_poll(tgroup);
		if (rc < 0) {
			return SPDK_POLLER_BUSY;
		}
		count += rc;
	}

	return count > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
}

/*
 * Reset and clean up the poll group (I/O channel code will actually free the
 * group).
@@ -197,8 +178,6 @@ nvmf_tgt_cleanup_poll_group(struct spdk_nvmf_poll_group *group)

	free(group->sgroups);

	spdk_poller_unregister(&group->poller);

	if (group->destroy_cb_fn) {
		group->destroy_cb_fn(group->destroy_cb_arg, 0);
	}
@@ -266,9 +245,6 @@ nvmf_tgt_create_poll_group(void *io_device, void *ctx_buf)
	group->thread = thread;
	pthread_mutex_init(&group->mutex, NULL);

	group->poller = SPDK_POLLER_REGISTER(nvmf_poll_group_poll, group, 0);
	spdk_poller_register_interrupt(group->poller, NULL, NULL);

	SPDK_DTRACE_PROBE1_TICKS(nvmf_create_poll_group, spdk_thread_get_id(thread));

	TAILQ_FOREACH(transport, &tgt->transports, link) {
@@ -996,8 +972,11 @@ _nvmf_tgt_pause_polling(struct spdk_io_channel_iter *i)
{
	struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
	struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch);
	struct spdk_nvmf_transport_poll_group *tgroup;

	spdk_poller_unregister(&group->poller);
	TAILQ_FOREACH(tgroup, &group->tgroups, link) {
		nvmf_transport_poll_group_pause(tgroup);
	}

	spdk_for_each_channel_continue(i, 0);
}
@@ -1055,9 +1034,11 @@ _nvmf_tgt_resume_polling(struct spdk_io_channel_iter *i)
{
	struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
	struct spdk_nvmf_poll_group *group = spdk_io_channel_get_ctx(ch);
	struct spdk_nvmf_transport_poll_group *tgroup;

	assert(group->poller == NULL);
	group->poller = SPDK_POLLER_REGISTER(nvmf_poll_group_poll, group, 0);
	TAILQ_FOREACH(tgroup, &group->tgroups, link) {
		nvmf_transport_poll_group_resume(tgroup);
	}

	spdk_for_each_channel_continue(i, 0);
}
+35 −0
Original line number Diff line number Diff line
@@ -571,6 +571,26 @@ nvmf_transport_listener_discover(struct spdk_nvmf_transport *transport,
	transport->ops->listener_discover(transport, trid, entry);
}

static int
nvmf_tgroup_poll(void *arg)
{
	struct spdk_nvmf_transport_poll_group *tgroup = arg;
	int rc;

	rc = nvmf_transport_poll_group_poll(tgroup);
	return rc == 0 ? SPDK_POLLER_IDLE : SPDK_POLLER_BUSY;
}

static void
nvmf_transport_poll_group_create_poller(struct spdk_nvmf_transport_poll_group *tgroup)
{
	char poller_name[SPDK_NVMF_TRSTRING_MAX_LEN + 32];

	snprintf(poller_name, sizeof(poller_name), "nvmf_%s", tgroup->transport->ops->name);
	tgroup->poller = spdk_poller_register_named(nvmf_tgroup_poll, tgroup, 0, poller_name);
	spdk_poller_register_interrupt(tgroup->poller, NULL, NULL);
}

struct spdk_nvmf_transport_poll_group *
nvmf_transport_poll_group_create(struct spdk_nvmf_transport *transport,
				 struct spdk_nvmf_poll_group *group)
@@ -587,6 +607,7 @@ nvmf_transport_poll_group_create(struct spdk_nvmf_transport *transport,
		return NULL;
	}
	tgroup->transport = transport;
	nvmf_transport_poll_group_create_poller(tgroup);

	STAILQ_INIT(&tgroup->pending_buf_queue);

@@ -672,6 +693,8 @@ nvmf_transport_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)

	transport = group->transport;

	spdk_poller_unregister(&group->poller);

	if (!STAILQ_EMPTY(&group->pending_buf_queue)) {
		SPDK_ERRLOG("Pending I/O list wasn't empty on poll group destruction\n");
	}
@@ -693,6 +716,18 @@ nvmf_transport_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
	}
}

void
nvmf_transport_poll_group_pause(struct spdk_nvmf_transport_poll_group *tgroup)
{
	spdk_poller_unregister(&tgroup->poller);
}

void
nvmf_transport_poll_group_resume(struct spdk_nvmf_transport_poll_group *tgroup)
{
	nvmf_transport_poll_group_create_poller(tgroup);
}

int
nvmf_transport_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
			      struct spdk_nvmf_qpair *qpair)
+3 −0
Original line number Diff line number Diff line
@@ -23,6 +23,9 @@ struct spdk_nvmf_transport_poll_group *nvmf_transport_get_optimal_poll_group(

void nvmf_transport_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group);

void nvmf_transport_poll_group_pause(struct spdk_nvmf_transport_poll_group *group);
void nvmf_transport_poll_group_resume(struct spdk_nvmf_transport_poll_group *group);

int nvmf_transport_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
				  struct spdk_nvmf_qpair *qpair);

+0 −1
Original line number Diff line number Diff line
@@ -208,7 +208,6 @@ test_nvmf_tgt_create_poll_group(void)
	CU_ASSERT(TAILQ_FIRST(&tgt.poll_groups) == &group);
	CU_ASSERT(tgt.num_poll_groups == 1);
	CU_ASSERT(group.thread == thread);
	CU_ASSERT(group.poller != NULL);

	nvmf_tgt_destroy_poll_group((void *)&tgt, (void *)&group);
	CU_ASSERT(TAILQ_EMPTY(&tgt.poll_groups));
Loading