Commit 2881f7f6 authored by Konrad Sztyber's avatar Konrad Sztyber Committed by Jim Harris
Browse files

nvmf: queue subsystem state change requests



When user requests a subsystem state to be changed while it's already
changing state, that request will now get queued and will be serviced
after the previous state change is completed.

Fixes #3079.

Suggested-by: default avatarJim Harris <jim.harris@samsung.com>
Signed-off-by: default avatarKonrad Sztyber <konrad.sztyber@intel.com>
Change-Id: Iff1d5cf73d641f0633735bffc8f77d5eefc92d55
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/22942


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Mellanox Build Bot
Reviewed-by: default avatarJim Harris <jim.harris@samsung.com>
Reviewed-by: default avatarAleksey Marchuk <alexeymar@nvidia.com>
parent ca48edfe
Loading
Loading
Loading
Loading
+11 −8
Original line number Diff line number Diff line
@@ -266,6 +266,7 @@ struct nvmf_subsystem_state_change_ctx {

	spdk_nvmf_subsystem_state_change_done		cb_fn;
	void						*cb_arg;
	TAILQ_ENTRY(nvmf_subsystem_state_change_ctx)	link;
};

struct spdk_nvmf_subsystem {
@@ -335,6 +336,8 @@ struct spdk_nvmf_subsystem {
	 * It will be enough for ANA group to use the same size as namespaces.
	 */
	uint32_t					*ana_group;
	/* Queue of a state change requests */
	TAILQ_HEAD(, nvmf_subsystem_state_change_ctx)	state_changes;
};

static int
+31 −6
Original line number Diff line number Diff line
@@ -270,6 +270,7 @@ spdk_nvmf_subsystem_create(struct spdk_nvmf_tgt *tgt,
	TAILQ_INIT(&subsystem->listeners);
	TAILQ_INIT(&subsystem->hosts);
	TAILQ_INIT(&subsystem->ctrlrs);
	TAILQ_INIT(&subsystem->state_changes);
	subsystem->used_listener_ids = spdk_bit_array_create(NVMF_MAX_LISTENERS_PER_SUBSYSTEM);
	if (subsystem->used_listener_ids == NULL) {
		pthread_mutex_destroy(&subsystem->mutex);
@@ -359,6 +360,7 @@ _nvmf_subsystem_destroy_msg(void *cb_arg)
static int
_nvmf_subsystem_destroy(struct spdk_nvmf_subsystem *subsystem)
{
	struct nvmf_subsystem_state_change_ctx *ctx;
	struct spdk_nvmf_ns		*ns;
	nvmf_subsystem_destroy_cb	async_destroy_cb = NULL;
	void				*async_destroy_cb_arg = NULL;
@@ -384,6 +386,15 @@ _nvmf_subsystem_destroy(struct spdk_nvmf_subsystem *subsystem)
		ns = next_ns;
	}

	while ((ctx = TAILQ_FIRST(&subsystem->state_changes))) {
		SPDK_WARNLOG("subsystem %s has pending state change requests\n", subsystem->subnqn);
		TAILQ_REMOVE(&subsystem->state_changes, ctx, link);
		if (ctx->cb_fn != NULL) {
			ctx->cb_fn(subsystem, ctx->cb_arg, -ECANCELED);
		}
		free(ctx);
	}

	free(subsystem->ns);
	free(subsystem->ana_group);

@@ -563,21 +574,31 @@ nvmf_subsystem_set_state(struct spdk_nvmf_subsystem *subsystem,
	return actual_old_state - expected_old_state;
}

static void nvmf_subsystem_do_state_change(struct nvmf_subsystem_state_change_ctx *ctx);

static void
_nvmf_subsystem_state_change_complete(void *_ctx)
{
	struct nvmf_subsystem_state_change_ctx *ctx = _ctx;
	struct nvmf_subsystem_state_change_ctx *next, *ctx = _ctx;
	struct spdk_nvmf_subsystem *subsystem = ctx->subsystem;

	pthread_mutex_lock(&subsystem->mutex);
	assert(TAILQ_FIRST(&subsystem->state_changes) == ctx);
	TAILQ_REMOVE(&subsystem->state_changes, ctx, link);
	next = TAILQ_FIRST(&subsystem->state_changes);
	if (next == NULL) {
		subsystem->changing_state = false;
	}
	pthread_mutex_unlock(&subsystem->mutex);

	if (ctx->cb_fn != NULL) {
		ctx->cb_fn(subsystem, ctx->cb_arg, ctx->status);
	}

	free(ctx);

	if (next != NULL) {
		nvmf_subsystem_do_state_change(next);
	}
}

static void
@@ -725,7 +746,7 @@ nvmf_subsystem_state_change(struct spdk_nvmf_subsystem *subsystem,
			    spdk_nvmf_subsystem_state_change_done cb_fn,
			    void *cb_arg)
{
	struct nvmf_subsystem_state_change_ctx *ctx;
	struct nvmf_subsystem_state_change_ctx *ctx, *prev __attribute__((unused));
	struct spdk_thread *thread;

	thread = spdk_get_thread();
@@ -746,12 +767,16 @@ nvmf_subsystem_state_change(struct spdk_nvmf_subsystem *subsystem,
	ctx->thread = thread;

	pthread_mutex_lock(&subsystem->mutex);
	prev = TAILQ_FIRST(&subsystem->state_changes);
	TAILQ_INSERT_TAIL(&subsystem->state_changes, ctx, link);

	if (subsystem->changing_state) {
		assert(prev != NULL);
		pthread_mutex_unlock(&subsystem->mutex);
		free(ctx);
		return -EBUSY;
		return 0;
	}

	assert(prev == NULL);
	subsystem->changing_state = true;
	pthread_mutex_unlock(&subsystem->mutex);

+12 −3
Original line number Diff line number Diff line
@@ -1582,6 +1582,7 @@ test_spdk_nvmf_ns_event(void)
		.max_nsid = 1024,
		.ns = NULL,
		.tgt = &tgt,
		.state_changes = TAILQ_HEAD_INITIALIZER(subsystem.state_changes),
	};
	struct spdk_nvmf_ctrlr ctrlr = {
		.subsys = &subsystem
@@ -2032,6 +2033,13 @@ test_nvmf_ns_reservation_restore(void)
	CU_ASSERT(TAILQ_EMPTY(&ns.registrants));
}

static void
ut_nvmf_subsystem_paused(struct spdk_nvmf_subsystem *subsystem, void *ctx, int status)
{
	CU_ASSERT_EQUAL(status, 0);
	CU_ASSERT_EQUAL(subsystem->state, SPDK_NVMF_SUBSYSTEM_PAUSED);
}

static void
test_nvmf_subsystem_state_change(void)
{
@@ -2065,12 +2073,13 @@ test_nvmf_subsystem_state_change(void)
	poll_threads();
	CU_ASSERT(subsystem->state == SPDK_NVMF_SUBSYSTEM_ACTIVE);

	rc = spdk_nvmf_subsystem_pause(subsystem, SPDK_NVME_GLOBAL_NS_TAG, NULL, NULL);
	rc = spdk_nvmf_subsystem_pause(subsystem, SPDK_NVME_GLOBAL_NS_TAG,
				       ut_nvmf_subsystem_paused, NULL);
	CU_ASSERT(rc == 0);
	rc = spdk_nvmf_subsystem_stop(subsystem, NULL, NULL);
	CU_ASSERT(rc == -EBUSY);
	CU_ASSERT(rc == 0);
	poll_threads();
	CU_ASSERT(subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED);
	CU_ASSERT(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE);

	rc = spdk_nvmf_subsystem_stop(discovery_subsystem, NULL, NULL);
	CU_ASSERT(rc == 0);