Commit a2d4b4b1 authored by Shuhei Matsumoto's avatar Shuhei Matsumoto Committed by Konrad Sztyber
Browse files

bdev/nvme: Add NVME_CTRLR_*LOG() to identify ctrlr



By following the NVMe driver, add NVME_CTRLR_*LOG() to identify ctrlr
uniquely. To cover multipath case, use traddr or subnqn.

Just replace existing logs related nvme_ctrlr by NVME_CTRLR_*LOG().

Signed-off-by: default avatarShuhei Matsumoto <smatsumoto@nvidia.com>
Change-Id: I60ad0c5d4a16da9ff595cae1fc14fe296767d94d
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/25178


Reviewed-by: default avatarAleksey Marchuk <alexeymar@nvidia.com>
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarJim Harris <jim.harris@samsung.com>
Community-CI: Community CI Samsung <spdk.community.ci.samsung@gmail.com>
parent 7251e4c2
Loading
Loading
Loading
Loading
+101 −63
Original line number Diff line number Diff line
@@ -32,6 +32,29 @@
#include "spdk_internal/usdt.h"
#include "spdk_internal/trace_defs.h"

#define CTRLR_STRING(nvme_ctrlr) \
	(spdk_nvme_trtype_is_fabrics(nvme_ctrlr->active_path_id->trid.trtype) ? \
	nvme_ctrlr->active_path_id->trid.subnqn : nvme_ctrlr->active_path_id->trid.traddr)

#define NVME_CTRLR_ERRLOG(ctrlr, format, ...) \
	SPDK_ERRLOG("[%s] " format, CTRLR_STRING(ctrlr), ##__VA_ARGS__);

#define NVME_CTRLR_WARNLOG(ctrlr, format, ...) \
	SPDK_WARNLOG("[%s] " format, CTRLR_STRING(ctrlr), ##__VA_ARGS__);

#define NVME_CTRLR_NOTICELOG(ctrlr, format, ...) \
	SPDK_NOTICELOG("[%s] " format, CTRLR_STRING(ctrlr), ##__VA_ARGS__);

#define NVME_CTRLR_INFOLOG(ctrlr, format, ...) \
	SPDK_INFOLOG(bdev_nvme, "[%s] " format, CTRLR_STRING(ctrlr), ##__VA_ARGS__);

#ifdef DEBUG
#define NVME_CTRLR_DEBUGLOG(ctrlr, format, ...) \
	SPDK_DEBUGLOG(bdev_nvme, "[%s] " format, CTRLR_STRING(ctrlr), ##__VA_ARGS__);
#else
#define NVME_CTRLR_DEBUGLOG(ctrlr, ...) do { } while (0)
#endif

#define SPDK_BDEV_NVME_DEFAULT_DELAY_CMD_SUBMIT true
#define SPDK_BDEV_NVME_DEFAULT_KEEP_ALIVE_TIMEOUT_IN_MS	(10000)

@@ -648,13 +671,13 @@ nvme_ctrlr_delete(struct nvme_ctrlr *nvme_ctrlr)
	nvme_ctrlr->reset_detach_poller = SPDK_POLLER_REGISTER(nvme_detach_poller,
					  nvme_ctrlr, 1000);
	if (nvme_ctrlr->reset_detach_poller == NULL) {
		SPDK_ERRLOG("Failed to register detach poller\n");
		NVME_CTRLR_ERRLOG(nvme_ctrlr, "Failed to register detach poller\n");
		goto error;
	}

	rc = spdk_nvme_detach_async(nvme_ctrlr->ctrlr, &nvme_ctrlr->detach_ctx);
	if (rc != 0) {
		SPDK_ERRLOG("Failed to detach the NVMe controller\n");
		NVME_CTRLR_ERRLOG(nvme_ctrlr, "Failed to detach the NVMe controller\n");
		goto error;
	}

@@ -1691,6 +1714,7 @@ bdev_nvme_disconnected_qpair_cb(struct spdk_nvme_qpair *qpair, void *poll_group_
{
	struct nvme_poll_group *group = poll_group_ctx;
	struct nvme_qpair *nvme_qpair;
	struct nvme_ctrlr *nvme_ctrlr;
	struct nvme_ctrlr_channel *ctrlr_ch;
	int status;

@@ -1706,6 +1730,7 @@ bdev_nvme_disconnected_qpair_cb(struct spdk_nvme_qpair *qpair, void *poll_group_

	_bdev_nvme_clear_io_path_cache(nvme_qpair);

	nvme_ctrlr = nvme_qpair->ctrlr;
	ctrlr_ch = nvme_qpair->ctrlr_ch;

	if (ctrlr_ch != NULL) {
@@ -1713,13 +1738,15 @@ bdev_nvme_disconnected_qpair_cb(struct spdk_nvme_qpair *qpair, void *poll_group_
			/* We are in a full reset sequence. */
			if (ctrlr_ch->connect_poller != NULL) {
				/* qpair was failed to connect. Abort the reset sequence. */
				SPDK_DEBUGLOG(bdev_nvme, "qpair %p was failed to connect. abort the reset ctrlr sequence.\n",
				NVME_CTRLR_DEBUGLOG(nvme_ctrlr,
						    "qpair %p was failed to connect. abort the reset ctrlr sequence.\n",
						    qpair);
				spdk_poller_unregister(&ctrlr_ch->connect_poller);
				status = -1;
			} else {
				/* qpair was completed to disconnect. Just move to the next ctrlr_channel. */
				SPDK_DEBUGLOG(bdev_nvme, "qpair %p was disconnected and freed in a reset ctrlr sequence.\n",
				NVME_CTRLR_DEBUGLOG(nvme_ctrlr,
						    "qpair %p was disconnected and freed in a reset ctrlr sequence.\n",
						    qpair);
				status = 0;
			}
@@ -1727,12 +1754,13 @@ bdev_nvme_disconnected_qpair_cb(struct spdk_nvme_qpair *qpair, void *poll_group_
			ctrlr_ch->reset_iter = NULL;
		} else {
			/* qpair was disconnected unexpectedly. Reset controller for recovery. */
			SPDK_NOTICELOG("qpair %p was disconnected and freed. reset controller.\n", qpair);
			bdev_nvme_failover_ctrlr(nvme_qpair->ctrlr);
			NVME_CTRLR_NOTICELOG(nvme_ctrlr, "qpair %p was disconnected and freed. reset controller.\n",
					     qpair);
			bdev_nvme_failover_ctrlr(nvme_ctrlr);
		}
	} else {
		/* In this case, ctrlr_channel is already deleted. */
		SPDK_DEBUGLOG(bdev_nvme, "qpair %p was disconnected and freed. delete nvme_qpair.\n", qpair);
		NVME_CTRLR_DEBUGLOG(nvme_ctrlr, "qpair %p was disconnected and freed. delete nvme_qpair.\n", qpair);
		nvme_qpair_delete(nvme_qpair);
	}
}
@@ -1899,13 +1927,13 @@ bdev_nvme_create_qpair(struct nvme_qpair *nvme_qpair)

	rc = spdk_nvme_poll_group_add(nvme_qpair->group->group, qpair);
	if (rc != 0) {
		SPDK_ERRLOG("Unable to begin polling on NVMe Channel.\n");
		NVME_CTRLR_ERRLOG(nvme_ctrlr, "Unable to begin polling on NVMe Channel.\n");
		goto err;
	}

	rc = spdk_nvme_ctrlr_connect_io_qpair(nvme_ctrlr->ctrlr, qpair);
	if (rc != 0) {
		SPDK_ERRLOG("Unable to connect I/O qpair.\n");
		NVME_CTRLR_ERRLOG(nvme_ctrlr, "Unable to connect I/O qpair.\n");
		goto err;
	}

@@ -1984,8 +2012,9 @@ bdev_nvme_failover_trid(struct nvme_ctrlr *nvme_ctrlr, bool remove, bool start)

	assert(path_id->trid.trtype != SPDK_NVME_TRANSPORT_PCIE);

	SPDK_NOTICELOG("Start failover from %s:%s to %s:%s\n", path_id->trid.traddr,
		       path_id->trid.trsvcid,	next_path->trid.traddr, next_path->trid.trsvcid);
	NVME_CTRLR_NOTICELOG(nvme_ctrlr, "Start failover from %s:%s to %s:%s\n",
			     path_id->trid.traddr, path_id->trid.trsvcid,
			     next_path->trid.traddr, next_path->trid.trsvcid);

	spdk_nvme_ctrlr_fail(nvme_ctrlr->ctrlr);
	nvme_ctrlr->active_path_id = next_path;
@@ -2178,9 +2207,9 @@ _bdev_nvme_reset_ctrlr_complete(struct nvme_ctrlr *nvme_ctrlr, void *ctx, int st
	nvme_ctrlr->ctrlr_op_cb_arg = NULL;

	if (!success) {
		SPDK_ERRLOG("Resetting controller failed.\n");
		NVME_CTRLR_ERRLOG(nvme_ctrlr, "Resetting controller failed.\n");
	} else {
		SPDK_NOTICELOG("Resetting controller successful.\n");
		NVME_CTRLR_NOTICELOG(nvme_ctrlr, "Resetting controller successful.\n");
	}

	pthread_mutex_lock(&nvme_ctrlr->mutex);
@@ -2482,13 +2511,13 @@ bdev_nvme_reset_ctrlr(struct nvme_ctrlr *nvme_ctrlr)

	if (nvme_ctrlr->resetting) {
		pthread_mutex_unlock(&nvme_ctrlr->mutex);
		SPDK_NOTICELOG("Unable to perform reset, already in progress.\n");
		NVME_CTRLR_NOTICELOG(nvme_ctrlr, "Unable to perform reset, already in progress.\n");
		return -EBUSY;
	}

	if (nvme_ctrlr->disabled) {
		pthread_mutex_unlock(&nvme_ctrlr->mutex);
		SPDK_NOTICELOG("Unable to perform reset. Controller is disabled.\n");
		NVME_CTRLR_NOTICELOG(nvme_ctrlr, "Unable to perform reset. Controller is disabled.\n");
		return -EALREADY;
	}

@@ -2496,7 +2525,7 @@ bdev_nvme_reset_ctrlr(struct nvme_ctrlr *nvme_ctrlr)
	nvme_ctrlr->dont_retry = true;

	if (nvme_ctrlr->reconnect_is_delayed) {
		SPDK_DEBUGLOG(bdev_nvme, "Reconnect is already scheduled.\n");
		NVME_CTRLR_DEBUGLOG(nvme_ctrlr, "Reconnect is already scheduled.\n");
		msg_fn = bdev_nvme_reconnect_ctrlr_now;
		nvme_ctrlr->reconnect_is_delayed = false;
	} else {
@@ -2751,7 +2780,7 @@ nvme_ctrlr_op_rpc(struct nvme_ctrlr *nvme_ctrlr, enum nvme_ctrlr_op op,

	ctx = calloc(1, sizeof(*ctx));
	if (ctx == NULL) {
		SPDK_ERRLOG("Failed to allocate nvme_ctrlr_op_rpc_ctx.\n");
		NVME_CTRLR_ERRLOG(nvme_ctrlr, "Failed to allocate nvme_ctrlr_op_rpc_ctx.\n");
		cb_fn(cb_arg, -ENOMEM);
		return;
	}
@@ -3022,13 +3051,14 @@ bdev_nvme_failover_ctrlr_unsafe(struct nvme_ctrlr *nvme_ctrlr, bool remove)

	if (nvme_ctrlr->resetting) {
		if (!nvme_ctrlr->in_failover) {
			SPDK_NOTICELOG("Reset is already in progress. Defer failover until reset completes.\n");
			NVME_CTRLR_NOTICELOG(nvme_ctrlr,
					     "Reset is already in progress. Defer failover until reset completes.\n");

			/* Defer failover until reset completes. */
			nvme_ctrlr->pending_failover = true;
			return -EINPROGRESS;
		} else {
			SPDK_NOTICELOG("Unable to perform failover, already in progress.\n");
			NVME_CTRLR_NOTICELOG(nvme_ctrlr, "Unable to perform failover, already in progress.\n");
			return -EBUSY;
		}
	}
@@ -3036,14 +3066,14 @@ bdev_nvme_failover_ctrlr_unsafe(struct nvme_ctrlr *nvme_ctrlr, bool remove)
	bdev_nvme_failover_trid(nvme_ctrlr, remove, true);

	if (nvme_ctrlr->reconnect_is_delayed) {
		SPDK_NOTICELOG("Reconnect is already scheduled.\n");
		NVME_CTRLR_NOTICELOG(nvme_ctrlr, "Reconnect is already scheduled.\n");

		/* We rely on the next reconnect for the failover. */
		return -EALREADY;
	}

	if (nvme_ctrlr->disabled) {
		SPDK_NOTICELOG("Controller is disabled.\n");
		NVME_CTRLR_NOTICELOG(nvme_ctrlr, "Controller is disabled.\n");

		/* We rely on the enablement for the failover. */
		return -EALREADY;
@@ -3410,7 +3440,7 @@ nvme_qpair_create(struct nvme_ctrlr *nvme_ctrlr, struct nvme_ctrlr_channel *ctrl

	nvme_qpair = calloc(1, sizeof(*nvme_qpair));
	if (!nvme_qpair) {
		SPDK_ERRLOG("Failed to alloc nvme_qpair.\n");
		NVME_CTRLR_ERRLOG(nvme_ctrlr, "Failed to alloc nvme_qpair.\n");
		return -1;
	}

@@ -4538,11 +4568,11 @@ nvme_abort_cpl(void *ctx, const struct spdk_nvme_cpl *cpl)
	struct nvme_ctrlr *nvme_ctrlr = ctx;

	if (spdk_nvme_cpl_is_error(cpl)) {
		SPDK_WARNLOG("Abort failed. Resetting controller. sc is %u, sct is %u.\n", cpl->status.sc,
			     cpl->status.sct);
		NVME_CTRLR_WARNLOG(nvme_ctrlr, "Abort failed. Resetting controller. sc is %u, sct is %u.\n",
				   cpl->status.sc, cpl->status.sct);
		bdev_nvme_reset_ctrlr(nvme_ctrlr);
	} else if (cpl->cdw0 & 0x1) {
		SPDK_WARNLOG("Specified command could not be aborted.\n");
		NVME_CTRLR_WARNLOG(nvme_ctrlr, "Specified command could not be aborted.\n");
		bdev_nvme_reset_ctrlr(nvme_ctrlr);
	}
}
@@ -4557,7 +4587,8 @@ timeout_cb(void *cb_arg, struct spdk_nvme_ctrlr *ctrlr,

	assert(nvme_ctrlr->ctrlr == ctrlr);

	SPDK_WARNLOG("Warning: Detected a timeout. ctrlr=%p qpair=%p cid=%u\n", ctrlr, qpair, cid);
	NVME_CTRLR_WARNLOG(nvme_ctrlr, "Warning: Detected a timeout. ctrlr=%p qpair=%p cid=%u\n",
			   ctrlr, qpair, cid);

	/* Only try to read CSTS if it's a PCIe controller or we have a timeout on an I/O
	 * queue.  (Note: qpair == NULL when there's an admin cmd timeout.)  Otherwise we
@@ -4567,7 +4598,7 @@ timeout_cb(void *cb_arg, struct spdk_nvme_ctrlr *ctrlr,
	if (nvme_ctrlr->active_path_id->trid.trtype == SPDK_NVME_TRANSPORT_PCIE || qpair != NULL) {
		csts = spdk_nvme_ctrlr_get_regs_csts(ctrlr);
		if (csts.bits.cfs) {
			SPDK_ERRLOG("Controller Fatal Status, reset required\n");
			NVME_CTRLR_ERRLOG(nvme_ctrlr, "Controller Fatal Status, reset required\n");
			bdev_nvme_reset_ctrlr(nvme_ctrlr);
			return;
		}
@@ -4580,7 +4611,7 @@ timeout_cb(void *cb_arg, struct spdk_nvme_ctrlr *ctrlr,
			pthread_mutex_lock(&nvme_ctrlr->mutex);
			if (!nvme_ctrlr_is_available(nvme_ctrlr)) {
				pthread_mutex_unlock(&nvme_ctrlr->mutex);
				SPDK_NOTICELOG("Quit abort. Ctrlr is not available.\n");
				NVME_CTRLR_NOTICELOG(nvme_ctrlr, "Quit abort. Ctrlr is not available.\n");
				return;
			}
			pthread_mutex_unlock(&nvme_ctrlr->mutex);
@@ -4591,7 +4622,7 @@ timeout_cb(void *cb_arg, struct spdk_nvme_ctrlr *ctrlr,
				return;
			}

			SPDK_ERRLOG("Unable to send abort. Resetting, rc is %d.\n", rc);
			NVME_CTRLR_ERRLOG(nvme_ctrlr, "Unable to send abort. Resetting, rc is %d.\n", rc);
		}

	/* FALLTHROUGH */
@@ -4599,10 +4630,10 @@ timeout_cb(void *cb_arg, struct spdk_nvme_ctrlr *ctrlr,
		bdev_nvme_reset_ctrlr(nvme_ctrlr);
		break;
	case SPDK_BDEV_NVME_TIMEOUT_ACTION_NONE:
		SPDK_DEBUGLOG(bdev_nvme, "No action for nvme controller timeout.\n");
		NVME_CTRLR_DEBUGLOG(nvme_ctrlr, "No action for nvme controller timeout.\n");
		break;
	default:
		SPDK_ERRLOG("An invalid timeout action value is found.\n");
		NVME_CTRLR_ERRLOG(nvme_ctrlr, "An invalid timeout action value is found.\n");
		break;
	}
}
@@ -4763,7 +4794,7 @@ nvme_ctrlr_populate_namespace(struct nvme_ctrlr *nvme_ctrlr, struct nvme_ns *nvm

	ns = spdk_nvme_ctrlr_get_ns(nvme_ctrlr->ctrlr, nvme_ns->id);
	if (!ns) {
		SPDK_DEBUGLOG(bdev_nvme, "Invalid NS %d\n", nvme_ns->id);
		NVME_CTRLR_DEBUGLOG(nvme_ctrlr, "Invalid NS %d\n", nvme_ns->id);
		rc = -EINVAL;
		goto done;
	}
@@ -4890,21 +4921,23 @@ nvme_ctrlr_populate_namespaces(struct nvme_ctrlr *nvme_ctrlr,
			if (nvme_ns->ns != ns) {
				assert(nvme_ns->ns == NULL);
				nvme_ns->ns = ns;
				SPDK_DEBUGLOG(bdev_nvme, "NSID %u was added\n", nvme_ns->id);
				NVME_CTRLR_DEBUGLOG(nvme_ctrlr, "NSID %u was added\n", nvme_ns->id);
			}

			num_sectors = spdk_nvme_ns_get_num_sectors(ns);
			bdev = nvme_ns->bdev;
			assert(bdev != NULL);
			if (bdev->disk.blockcnt != num_sectors) {
				SPDK_NOTICELOG("NSID %u is resized: bdev name %s, old size %" PRIu64 ", new size %" PRIu64 "\n",
				NVME_CTRLR_NOTICELOG(nvme_ctrlr,
						     "NSID %u is resized: bdev name %s, old size %" PRIu64 ", new size %" PRIu64 "\n",
						     nvme_ns->id,
						     bdev->disk.name,
						     bdev->disk.blockcnt,
						     num_sectors);
				rc = spdk_bdev_notify_blockcnt_change(&bdev->disk, num_sectors);
				if (rc != 0) {
					SPDK_ERRLOG("Could not change num blocks for nvme bdev: name %s, errno: %d.\n",
					NVME_CTRLR_ERRLOG(nvme_ctrlr,
							  "Could not change num blocks for nvme bdev: name %s, errno: %d.\n",
							  bdev->disk.name, rc);
				}
			}
@@ -4925,7 +4958,7 @@ nvme_ctrlr_populate_namespaces(struct nvme_ctrlr *nvme_ctrlr,
			/* Found a new one */
			nvme_ns = nvme_ns_alloc();
			if (nvme_ns == NULL) {
				SPDK_ERRLOG("Failed to allocate namespace\n");
				NVME_CTRLR_ERRLOG(nvme_ctrlr, "Failed to allocate namespace\n");
				/* This just fails to attach the namespace. It may work on a future attempt. */
				continue;
			}
@@ -5075,7 +5108,8 @@ nvme_ctrlr_read_ana_log_page(struct nvme_ctrlr *nvme_ctrlr)
	ana_log_page_size = nvme_ctrlr_get_ana_log_page_size(nvme_ctrlr);

	if (ana_log_page_size > nvme_ctrlr->max_ana_log_page_size) {
		SPDK_ERRLOG("ANA log page size %" PRIu32 " is larger than allowed %" PRIu32 "\n",
		NVME_CTRLR_ERRLOG(nvme_ctrlr,
				  "ANA log page size %" PRIu32 " is larger than allowed %" PRIu32 "\n",
				  ana_log_page_size, nvme_ctrlr->max_ana_log_page_size);
		return -EINVAL;
	}
@@ -5504,7 +5538,7 @@ nvme_ctrlr_init_ana_log_page(struct nvme_ctrlr *nvme_ctrlr,
	nvme_ctrlr->ana_log_page = spdk_zmalloc(ana_log_page_size, 64, NULL,
						SPDK_ENV_NUMA_ID_ANY, SPDK_MALLOC_DMA);
	if (nvme_ctrlr->ana_log_page == NULL) {
		SPDK_ERRLOG("could not allocate ANA log page buffer\n");
		NVME_CTRLR_ERRLOG(nvme_ctrlr, "could not allocate ANA log page buffer\n");
		return -ENXIO;
	}

@@ -5516,7 +5550,7 @@ nvme_ctrlr_init_ana_log_page(struct nvme_ctrlr *nvme_ctrlr,
	 */
	nvme_ctrlr->copied_ana_desc = calloc(1, ana_log_page_size);
	if (nvme_ctrlr->copied_ana_desc == NULL) {
		SPDK_ERRLOG("could not allocate a buffer to parse ANA descriptor\n");
		NVME_CTRLR_ERRLOG(nvme_ctrlr, "could not allocate a buffer to parse ANA descriptor\n");
		return -ENOMEM;
	}

@@ -5528,7 +5562,7 @@ nvme_ctrlr_init_ana_log_page(struct nvme_ctrlr *nvme_ctrlr,
	ana_log_page_size = nvme_ctrlr_get_ana_log_page_size(nvme_ctrlr);

	if (ana_log_page_size > nvme_ctrlr->max_ana_log_page_size) {
		SPDK_ERRLOG("ANA log page size %" PRIu32 " is larger than allowed %" PRIu32 "\n",
		NVME_CTRLR_ERRLOG(nvme_ctrlr, "ANA log page size %" PRIu32 " is larger than allowed %" PRIu32 "\n",
				  ana_log_page_size, nvme_ctrlr->max_ana_log_page_size);
		return -EINVAL;
	}
@@ -5562,11 +5596,11 @@ bdev_nvme_check_multipath(struct nvme_bdev_ctrlr *nbdev_ctrlr, struct spdk_nvme_
		tmp_cdata = spdk_nvme_ctrlr_get_data(tmp->ctrlr);

		if (!tmp_cdata->cmic.multi_ctrlr) {
			SPDK_ERRLOG("Ctrlr%u does not support multipath.\n", cdata->cntlid);
			NVME_CTRLR_ERRLOG(tmp, "Ctrlr%u does not support multipath.\n", cdata->cntlid);
			return false;
		}
		if (cdata->cntlid == tmp_cdata->cntlid) {
			SPDK_ERRLOG("cntlid %u are duplicated.\n", tmp_cdata->cntlid);
			NVME_CTRLR_ERRLOG(tmp, "cntlid %u are duplicated.\n", tmp_cdata->cntlid);
			return false;
		}
	}
@@ -5604,13 +5638,13 @@ nvme_bdev_ctrlr_create(const char *name, struct nvme_ctrlr *nvme_ctrlr)
	} else {
		nbdev_ctrlr = calloc(1, sizeof(*nbdev_ctrlr));
		if (nbdev_ctrlr == NULL) {
			SPDK_ERRLOG("Failed to allocate nvme_bdev_ctrlr.\n");
			NVME_CTRLR_ERRLOG(nvme_ctrlr, "Failed to allocate nvme_bdev_ctrlr.\n");
			rc = -ENOMEM;
			goto exit;
		}
		nbdev_ctrlr->name = strdup(name);
		if (nbdev_ctrlr->name == NULL) {
			SPDK_ERRLOG("Failed to allocate name of nvme_bdev_ctrlr.\n");
			NVME_CTRLR_ERRLOG(nvme_ctrlr, "Failed to allocate name of nvme_bdev_ctrlr.\n");
			free(nbdev_ctrlr);
			goto exit;
		}
@@ -6050,7 +6084,9 @@ nvme_ctrlr_populate_namespaces_done(struct nvme_ctrlr *nvme_ctrlr,
			ctx->names[j] = nvme_bdev->disk.name;
			j++;
		} else {
			SPDK_ERRLOG("Maximum number of namespaces supported per NVMe controller is %du. Unable to return all names of created bdevs\n",
			NVME_CTRLR_ERRLOG(nvme_ctrlr,
					  "Maximum number of namespaces supported per NVMe controller is %du. "
					  "Unable to return all names of created bdevs\n",
					  ctx->max_bdevs);
			ctx->reported_bdevs = 0;
			populate_namespaces_cb(ctx, -ERANGE);
@@ -6072,13 +6108,14 @@ bdev_nvme_check_secondary_trid(struct nvme_ctrlr *nvme_ctrlr,
	struct nvme_path_id *tmp_trid;

	if (trid->trtype == SPDK_NVME_TRANSPORT_PCIE) {
		SPDK_ERRLOG("PCIe failover is not supported.\n");
		NVME_CTRLR_ERRLOG(nvme_ctrlr, "PCIe failover is not supported.\n");
		return -ENOTSUP;
	}

	/* Currently we only support failover to the same transport type. */
	if (nvme_ctrlr->active_path_id->trid.trtype != trid->trtype) {
		SPDK_WARNLOG("Failover from trtype: %s to a different trtype: %s is not supported currently\n",
		NVME_CTRLR_WARNLOG(nvme_ctrlr,
				   "Failover from trtype: %s to a different trtype: %s is not supported currently\n",
				   spdk_nvme_transport_id_trtype_str(nvme_ctrlr->active_path_id->trid.trtype),
				   spdk_nvme_transport_id_trtype_str(trid->trtype));
		return -EINVAL;
@@ -6087,7 +6124,8 @@ bdev_nvme_check_secondary_trid(struct nvme_ctrlr *nvme_ctrlr,

	/* Currently we only support failover to the same NQN. */
	if (strncmp(trid->subnqn, nvme_ctrlr->active_path_id->trid.subnqn, SPDK_NVMF_NQN_MAX_LEN)) {
		SPDK_WARNLOG("Failover from subnqn: %s to a different subnqn: %s is not supported currently\n",
		NVME_CTRLR_WARNLOG(nvme_ctrlr,
				   "Failover from subnqn: %s to a different subnqn: %s is not supported currently\n",
				   nvme_ctrlr->active_path_id->trid.subnqn, trid->subnqn);
		return -EINVAL;
	}
@@ -6095,8 +6133,8 @@ bdev_nvme_check_secondary_trid(struct nvme_ctrlr *nvme_ctrlr,
	/* Skip all the other checks if we've already registered this path. */
	TAILQ_FOREACH(tmp_trid, &nvme_ctrlr->trids, link) {
		if (!spdk_nvme_transport_id_compare(&tmp_trid->trid, trid)) {
			SPDK_WARNLOG("This path (traddr: %s subnqn: %s) is already registered\n", trid->traddr,
				     trid->subnqn);
			NVME_CTRLR_WARNLOG(nvme_ctrlr, "This path (traddr: %s subnqn: %s) is already registered\n",
					   trid->traddr, trid->subnqn);
			return -EALREADY;
		}
	}