Commit 517e85fc authored by Shuhei Matsumoto's avatar Shuhei Matsumoto Committed by Konrad Sztyber
Browse files

bdev/nvme: Factor out operations under mutex from bdev_nvme_reset_ctrlr()



Factor out operations under mutex held from bdev_nvme_reset_ctrlr()
into bdev_nvme_reset_ctrlr_unsafe().

We want to move pending reset list from per SPDK thread to global, and
then move enqueue operation into bdev_nvme_reset_ctrlr().

However, bdev_nvme_reset_ctrlr() is shared between JSON RPC and bdev_io.
To keep code cleanness, this refactoring is necessary.

Signed-off-by: default avatarShuhei Matsumoto <smatsumoto@nvidia.com>
Change-Id: I5a5333594ff19956575e3c4d6398f3a275c0dfc6
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/25288


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Reviewed-by: default avatarJim Harris <jim.harris@samsung.com>
Community-CI: Mellanox Build Bot
Reviewed-by: default avatarAleksey Marchuk <alexeymar@nvidia.com>
Community-CI: Community CI Samsung <spdk.community.ci.samsung@gmail.com>
parent f2f4b5f2
Loading
Loading
Loading
Loading
+19 −11
Original line number Diff line number Diff line
@@ -2586,24 +2586,18 @@ _bdev_nvme_reset_ctrlr(void *ctx)
}

static int
bdev_nvme_reset_ctrlr(struct nvme_ctrlr *nvme_ctrlr)
bdev_nvme_reset_ctrlr_unsafe(struct nvme_ctrlr *nvme_ctrlr, spdk_msg_fn *msg_fn)
{
	spdk_msg_fn msg_fn;

	pthread_mutex_lock(&nvme_ctrlr->mutex);
	if (nvme_ctrlr->destruct) {
		pthread_mutex_unlock(&nvme_ctrlr->mutex);
		return -ENXIO;
	}

	if (nvme_ctrlr->resetting) {
		pthread_mutex_unlock(&nvme_ctrlr->mutex);
		NVME_CTRLR_NOTICELOG(nvme_ctrlr, "Unable to perform reset, already in progress.\n");
		return -EBUSY;
	}

	if (nvme_ctrlr->disabled) {
		pthread_mutex_unlock(&nvme_ctrlr->mutex);
		NVME_CTRLR_NOTICELOG(nvme_ctrlr, "Unable to perform reset. Controller is disabled.\n");
		return -EALREADY;
	}
@@ -2613,19 +2607,33 @@ bdev_nvme_reset_ctrlr(struct nvme_ctrlr *nvme_ctrlr)

	if (nvme_ctrlr->reconnect_is_delayed) {
		NVME_CTRLR_INFOLOG(nvme_ctrlr, "Reconnect is already scheduled.\n");
		msg_fn = bdev_nvme_reconnect_ctrlr_now;
		*msg_fn = bdev_nvme_reconnect_ctrlr_now;
		nvme_ctrlr->reconnect_is_delayed = false;
	} else {
		msg_fn = _bdev_nvme_reset_ctrlr;
		*msg_fn = _bdev_nvme_reset_ctrlr;
		assert(nvme_ctrlr->reset_start_tsc == 0);
	}

	nvme_ctrlr->reset_start_tsc = spdk_get_ticks();

	return 0;
}

static int
bdev_nvme_reset_ctrlr(struct nvme_ctrlr *nvme_ctrlr)
{
	spdk_msg_fn msg_fn;
	int rc;

	pthread_mutex_lock(&nvme_ctrlr->mutex);
	rc = bdev_nvme_reset_ctrlr_unsafe(nvme_ctrlr, &msg_fn);
	pthread_mutex_unlock(&nvme_ctrlr->mutex);

	if (rc == 0) {
		spdk_thread_send_msg(nvme_ctrlr->thread, msg_fn, nvme_ctrlr);
	return 0;
	}

	return rc;
}

static int