Commit 7ea544b4 authored by Daniel Verkamp's avatar Daniel Verkamp Committed by Jim Harris
Browse files

nvmf: move poll groups to public API



The end goal is to have the application create one poll group per core.
Then each poll group will have a single CQ per network device and an I/O
channel per back-end storage device to poll.

This is just the first step toward that, which is to wire up the
creation of the per-core poll groups in the application.

Note that the app poll groups don't do anything yet.  We'll need
additional library API changes to make the library use the existing poll
groups, rather than creating a new poll group per subsystem as we do
right now.

Change-Id: I2d4e2a5e5aa354d37714750f1d5b1d1e4ab9edce
Signed-off-by: default avatarDaniel Verkamp <daniel.verkamp@intel.com>
Reviewed-on: https://review.gerrithub.io/381887


Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Tested-by: default avatarSPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
parent 6a0e6abc
Loading
Loading
Loading
Loading
+80 −1
Original line number Diff line number Diff line
@@ -39,9 +39,19 @@
#include "spdk/event.h"
#include "spdk/log.h"
#include "spdk/nvme.h"
#include "spdk/util.h"

struct nvmf_tgt_poll_group {
	struct spdk_nvmf_poll_group *group;
	struct spdk_poller *poller;
};

struct spdk_nvmf_tgt *g_tgt = NULL;

static struct nvmf_tgt_poll_group *g_poll_groups = NULL;
static size_t g_num_poll_groups = 0;
static size_t g_active_poll_groups = 0;

static struct spdk_poller *g_acceptor_poller = NULL;

static TAILQ_HEAD(, nvmf_tgt_subsystem) g_subsystems = TAILQ_HEAD_INITIALIZER(g_subsystems);
@@ -97,10 +107,41 @@ shutdown_subsystems(void)
}

static void
acceptor_poller_unregistered_event(void *arg1, void *arg2)
nvmf_tgt_poll_group_stopped_event(void *arg1, void *arg2)
{
	uint32_t core;

	g_active_poll_groups--;

	if (g_active_poll_groups == 0) {
		/* All of the poll group pollers are stopped, so we can now delete the poll groups safely. */
		SPDK_ENV_FOREACH_CORE(core) {
			struct nvmf_tgt_poll_group *app_poll_group = &g_poll_groups[core];

			spdk_nvmf_poll_group_destroy(app_poll_group->group);
		}

		shutdown_subsystems();
	}
}

static void
acceptor_poller_unregistered_event(void *arg1, void *arg2)
{
	struct nvmf_tgt_poll_group *app_poll_group;
	struct spdk_event *event;
	uint32_t core;

	/* Stop poll group pollers on all cores */
	SPDK_ENV_FOREACH_CORE(core) {
		app_poll_group = &g_poll_groups[core];
		event = spdk_event_allocate(spdk_env_get_current_core(),
					    nvmf_tgt_poll_group_stopped_event,
					    NULL, NULL);

		spdk_poller_unregister(&app_poll_group->poller, event);
	}
}

static void
spdk_nvmf_shutdown_cb(void)
@@ -231,9 +272,18 @@ acceptor_poll(void *arg)
	spdk_nvmf_tgt_accept(tgt);
}

static void
nvmf_tgt_poll_group_poll(void *arg)
{
	struct nvmf_tgt_poll_group *app_poll_group = arg;

	spdk_nvmf_poll_group_poll(app_poll_group->group);
}

static void
spdk_nvmf_startup(void *arg1, void *arg2)
{
	uint32_t core;
	int rc;

	rc = spdk_nvmf_parse_conf();
@@ -247,6 +297,35 @@ spdk_nvmf_startup(void *arg1, void *arg2)
		goto initialize_error;
	}

	/* Find the maximum core number */
	SPDK_ENV_FOREACH_CORE(core) {
		g_num_poll_groups = spdk_max(g_num_poll_groups, core + 1);
	}

	assert(g_num_poll_groups > 0);

	g_poll_groups = calloc(g_num_poll_groups, sizeof(*g_poll_groups));
	if (g_poll_groups == NULL) {
		goto initialize_error;
	}

	/* Create a poll group on each core in the app core mask. */
	g_active_poll_groups = 0;
	SPDK_ENV_FOREACH_CORE(core) {
		struct nvmf_tgt_poll_group *app_poll_group = &g_poll_groups[core];

		app_poll_group->group = spdk_nvmf_poll_group_create(g_tgt);
		if (app_poll_group->group == NULL) {
			SPDK_ERRLOG("Failed to create poll group for core %u\n", core);
			goto initialize_error;
		}

		spdk_poller_register(&app_poll_group->poller,
				     nvmf_tgt_poll_group_poll, app_poll_group,
				     core, 0);
		g_active_poll_groups++;
	}

	spdk_poller_register(&g_acceptor_poller, acceptor_poll, g_tgt,
			     g_spdk_nvmf_tgt_conf.acceptor_lcore,
			     g_spdk_nvmf_tgt_conf.acceptor_poll_rate);
+16 −0
Original line number Diff line number Diff line
@@ -99,6 +99,22 @@ struct spdk_bdev;
struct spdk_nvmf_request;
struct spdk_nvmf_host;
struct spdk_nvmf_listener;
struct spdk_nvmf_poll_group;

/**
 * Create a poll group.
 */
struct spdk_nvmf_poll_group *spdk_nvmf_poll_group_create(struct spdk_nvmf_tgt *tgt);

/**
 * Destroy a poll group.
 */
void spdk_nvmf_poll_group_destroy(struct spdk_nvmf_poll_group *group);

/**
 * Check a poll group for work completions.
 */
int spdk_nvmf_poll_group_poll(struct spdk_nvmf_poll_group *group);

/*
 * The NVMf subsystem, as indicated in the specification, is a collection
+0 −4
Original line number Diff line number Diff line
@@ -197,14 +197,10 @@ struct spdk_nvmf_subsystem {
struct spdk_nvmf_transport *spdk_nvmf_tgt_get_transport(struct spdk_nvmf_tgt *tgt,
		enum spdk_nvme_transport_type);

struct spdk_nvmf_poll_group *spdk_nvmf_poll_group_create(
	struct spdk_nvmf_tgt *tgt);
void spdk_nvmf_poll_group_destroy(struct spdk_nvmf_poll_group *group);
int spdk_nvmf_poll_group_add(struct spdk_nvmf_poll_group *group,
			     struct spdk_nvmf_qpair *qpair);
int spdk_nvmf_poll_group_remove(struct spdk_nvmf_poll_group *group,
				struct spdk_nvmf_qpair *qpair);
int spdk_nvmf_poll_group_poll(struct spdk_nvmf_poll_group *group);

void spdk_nvmf_request_exec(struct spdk_nvmf_request *req);
int spdk_nvmf_request_complete(struct spdk_nvmf_request *req);