Commit c4bb0ea6 authored by Daniel Verkamp's avatar Daniel Verkamp Committed by Changpeng Liu
Browse files

nvme: add helper to wait for internal commands



Factor out the common pattern of waiting for an internally-submitted
command to complete.  This will give us a convenient central place to
add error checking.

Change-Id: I65334d654d294cfb208fc86d16fa387ac5432254
Signed-off-by: default avatarDaniel Verkamp <daniel.verkamp@intel.com>
Reviewed-on: https://review.gerrithub.io/412545


Tested-by: default avatarSPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-by: default avatarChangpeng Liu <changpeng.liu@intel.com>
parent 7f51e8e5
Loading
Loading
Loading
Loading
+43 −0
Original line number Diff line number Diff line
@@ -99,6 +99,49 @@ nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl)
	status->done = true;
}

/**
 * Poll qpair for completions until a command completes.
 *
 * \param qpair queue to poll
 * \param status completion status
 * \param robust_mutex optional robust mutex to lock while polling qpair
 *
 * \return 0 if command completed without error, negative errno on failure
 *
 * The command to wait upon must be submitted with nvme_completion_poll_cb as the callback
 * and status as the callback argument.
 */
int
spdk_nvme_wait_for_completion_robust_lock(
	struct spdk_nvme_qpair *qpair,
	struct nvme_completion_poll_status *status,
	pthread_mutex_t *robust_mutex)
{
	memset(&status->cpl, 0, sizeof(status->cpl));
	status->done = false;

	while (status->done == false) {
		if (robust_mutex) {
			nvme_robust_mutex_lock(robust_mutex);
		}

		spdk_nvme_qpair_process_completions(qpair, 0);

		if (robust_mutex) {
			nvme_robust_mutex_unlock(robust_mutex);
		}
	}

	return spdk_nvme_cpl_is_error(&status->cpl) ? -EIO : 0;
}

int
spdk_nvme_wait_for_completion(struct spdk_nvme_qpair *qpair,
			      struct nvme_completion_poll_status *status)
{
	return spdk_nvme_wait_for_completion_robust_lock(qpair, status, NULL);
}

struct nvme_request *
nvme_allocate_request(struct spdk_nvme_qpair *qpair,
		      const struct nvme_payload *payload, uint32_t payload_size,
+20 −97
Original line number Diff line number Diff line
@@ -380,15 +380,11 @@ static int nvme_ctrlr_set_intel_support_log_pages(struct spdk_nvme_ctrlr *ctrlr)
		return -ENXIO;
	}

	status.done = false;
	spdk_nvme_ctrlr_cmd_get_log_page(ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY, SPDK_NVME_GLOBAL_NS_TAG,
					 log_page_directory, sizeof(struct spdk_nvme_intel_log_page_directory), 0,
					 nvme_completion_poll_cb,
					 &status);
	while (status.done == false) {
		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
	}
	if (spdk_nvme_cpl_is_error(&status.cpl)) {
	if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
		spdk_dma_free(log_page_directory);
		SPDK_ERRLOG("nvme_ctrlr_cmd_get_log_page failed!\n");
		return -ENXIO;
@@ -673,17 +669,13 @@ nvme_ctrlr_set_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr)
		goto error;
	}

	status.done = false;
	rc = nvme_ctrlr_cmd_doorbell_buffer_config(ctrlr, prp1, prp2,
			nvme_completion_poll_cb, &status);
	if (rc != 0) {
		goto error;
	}

	while (status.done == false) {
		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
	}
	if (spdk_nvme_cpl_is_error(&status.cpl)) {
	if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
		goto error;
	}

@@ -771,7 +763,6 @@ nvme_ctrlr_identify(struct spdk_nvme_ctrlr *ctrlr)
	struct nvme_completion_poll_status	status;
	int					rc;

	status.done = false;
	rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_CTRLR, 0, 0,
				     &ctrlr->cdata, sizeof(ctrlr->cdata),
				     nvme_completion_poll_cb, &status);
@@ -779,10 +770,7 @@ nvme_ctrlr_identify(struct spdk_nvme_ctrlr *ctrlr)
		return rc;
	}

	while (status.done == false) {
		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
	}
	if (spdk_nvme_cpl_is_error(&status.cpl)) {
	if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
		SPDK_ERRLOG("nvme_identify_controller failed!\n");
		return -ENXIO;
	}
@@ -824,24 +812,20 @@ nvme_ctrlr_identify_active_ns(struct spdk_nvme_ctrlr *ctrlr)
		SPDK_ERRLOG("Failed to allocate active_ns_list!\n");
		return -ENOMEM;
	}
	status.done = false;

	if (ctrlr->vs.raw >= SPDK_NVME_VERSION(1, 1, 0) && !(ctrlr->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
		/*
		 * Iterate through the pages and fetch each chunk of 1024 namespaces until
		 * there are no more active namespaces
		 */
		for (i = 0; i < num_pages; i++) {
			status.done = false;
			rc = nvme_ctrlr_cmd_identify(ctrlr, SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST, 0, next_nsid,
						     &new_ns_list[1024 * i], sizeof(struct spdk_nvme_ns_list),
						     nvme_completion_poll_cb, &status);
			if (rc != 0) {
				goto fail;
			}
			while (status.done == false) {
				spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
			}
			if (spdk_nvme_cpl_is_error(&status.cpl)) {
			if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
				SPDK_ERRLOG("nvme_ctrlr_cmd_identify_active_ns_list failed!\n");
				rc = -ENXIO;
				goto fail;
@@ -885,8 +869,6 @@ nvme_ctrlr_set_num_qpairs(struct spdk_nvme_ctrlr *ctrlr)
	uint32_t cq_allocated, sq_allocated, min_allocated, i;
	int rc;

	status.done = false;

	if (ctrlr->opts.num_io_queues > SPDK_NVME_MAX_IO_QUEUES) {
		SPDK_NOTICELOG("Limiting requested num_io_queues %u to max %d\n",
			       ctrlr->opts.num_io_queues, SPDK_NVME_MAX_IO_QUEUES);
@@ -902,24 +884,17 @@ nvme_ctrlr_set_num_qpairs(struct spdk_nvme_ctrlr *ctrlr)
		return rc;
	}

	while (status.done == false) {
		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
	}
	if (spdk_nvme_cpl_is_error(&status.cpl)) {
	if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
		SPDK_ERRLOG("Set Features - Number of Queues failed!\n");
	}

	/* Obtain the number of queues allocated using Get Features. */
	status.done = false;
	rc = nvme_ctrlr_cmd_get_num_queues(ctrlr, nvme_completion_poll_cb, &status);
	if (rc != 0) {
		return rc;
	}

	while (status.done == false) {
		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
	}
	if (spdk_nvme_cpl_is_error(&status.cpl)) {
	if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
		SPDK_ERRLOG("Get Features - Number of Queues failed!\n");
		ctrlr->opts.num_io_queues = 0;
	} else {
@@ -973,7 +948,6 @@ nvme_ctrlr_set_keep_alive_timeout(struct spdk_nvme_ctrlr *ctrlr)
	}

	/* Retrieve actual keep alive timeout, since the controller may have adjusted it. */
	status.done = false;
	rc = spdk_nvme_ctrlr_cmd_get_feature(ctrlr, SPDK_NVME_FEAT_KEEP_ALIVE_TIMER, 0, NULL, 0,
					     nvme_completion_poll_cb, &status);
	if (rc != 0) {
@@ -982,10 +956,7 @@ nvme_ctrlr_set_keep_alive_timeout(struct spdk_nvme_ctrlr *ctrlr)
		return rc;
	}

	while (status.done == false) {
		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
	}
	if (spdk_nvme_cpl_is_error(&status.cpl)) {
	if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
		SPDK_ERRLOG("Keep alive timeout Get Feature failed: SC %x SCT %x\n",
			    status.cpl.status.sc, status.cpl.status.sct);
		ctrlr->opts.keep_alive_timeout_ms = 0;
@@ -1049,17 +1020,13 @@ nvme_ctrlr_set_host_id(struct spdk_nvme_ctrlr *ctrlr)

	SPDK_TRACEDUMP(SPDK_LOG_NVME, "host_id", host_id, host_id_size);

	status.done = false;
	rc = nvme_ctrlr_cmd_set_host_id(ctrlr, host_id, host_id_size, nvme_completion_poll_cb, &status);
	if (rc != 0) {
		SPDK_ERRLOG("Set Features - Host ID failed: %d\n", rc);
		return rc;
	}

	while (status.done == false) {
		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
	}
	if (spdk_nvme_cpl_is_error(&status.cpl)) {
	if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
		SPDK_WARNLOG("Set Features - Host ID failed: SC 0x%x SCT 0x%x\n",
			     status.cpl.status.sc, status.cpl.status.sct);
		/*
@@ -1227,16 +1194,12 @@ _nvme_ctrlr_configure_aer(struct spdk_nvme_ctrlr *ctrlr)
		config.bits.telemetry_log_notice = 1;
	}

	status.done = false;
	rc = nvme_ctrlr_cmd_set_async_event_config(ctrlr, config, nvme_completion_poll_cb, &status);
	if (rc != 0) {
		return rc;
	}

	while (status.done == false) {
		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
	}
	if (spdk_nvme_cpl_is_error(&status.cpl)) {
	if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
		return -ENXIO;
	}

@@ -2050,18 +2013,12 @@ spdk_nvme_ctrlr_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
	struct nvme_completion_poll_status	status;
	int					res;

	status.done = false;
	res = nvme_ctrlr_cmd_attach_ns(ctrlr, nsid, payload,
				       nvme_completion_poll_cb, &status);
	if (res) {
		return res;
	}
	while (status.done == false) {
		nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	}
	if (spdk_nvme_cpl_is_error(&status.cpl)) {
	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
		SPDK_ERRLOG("spdk_nvme_ctrlr_attach_ns failed!\n");
		return -ENXIO;
	}
@@ -2076,18 +2033,12 @@ spdk_nvme_ctrlr_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
	struct nvme_completion_poll_status	status;
	int					res;

	status.done = false;
	res = nvme_ctrlr_cmd_detach_ns(ctrlr, nsid, payload,
				       nvme_completion_poll_cb, &status);
	if (res) {
		return res;
	}
	while (status.done == false) {
		nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	}
	if (spdk_nvme_cpl_is_error(&status.cpl)) {
	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
		SPDK_ERRLOG("spdk_nvme_ctrlr_detach_ns failed!\n");
		return -ENXIO;
	}
@@ -2101,17 +2052,11 @@ spdk_nvme_ctrlr_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_dat
	struct nvme_completion_poll_status	status;
	int					res;

	status.done = false;
	res = nvme_ctrlr_cmd_create_ns(ctrlr, payload, nvme_completion_poll_cb, &status);
	if (res) {
		return 0;
	}
	while (status.done == false) {
		nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	}
	if (spdk_nvme_cpl_is_error(&status.cpl)) {
	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
		SPDK_ERRLOG("spdk_nvme_ctrlr_create_ns failed!\n");
		return 0;
	}
@@ -2131,17 +2076,11 @@ spdk_nvme_ctrlr_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid)
	struct nvme_completion_poll_status	status;
	int					res;

	status.done = false;
	res = nvme_ctrlr_cmd_delete_ns(ctrlr, nsid, nvme_completion_poll_cb, &status);
	if (res) {
		return res;
	}
	while (status.done == false) {
		nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	}
	if (spdk_nvme_cpl_is_error(&status.cpl)) {
	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
		SPDK_ERRLOG("spdk_nvme_ctrlr_delete_ns failed!\n");
		return -ENXIO;
	}
@@ -2156,18 +2095,12 @@ spdk_nvme_ctrlr_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
	struct nvme_completion_poll_status	status;
	int					res;

	status.done = false;
	res = nvme_ctrlr_cmd_format(ctrlr, nsid, format, nvme_completion_poll_cb,
				    &status);
	if (res) {
		return res;
	}
	while (status.done == false) {
		nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	}
	if (spdk_nvme_cpl_is_error(&status.cpl)) {
	if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
		SPDK_ERRLOG("spdk_nvme_ctrlr_format failed!\n");
		return -ENXIO;
	}
@@ -2212,7 +2145,6 @@ spdk_nvme_ctrlr_update_firmware(struct spdk_nvme_ctrlr *ctrlr, void *payload, ui

	while (size_remaining > 0) {
		transfer = spdk_min(size_remaining, ctrlr->min_page_size);
		status.done = false;

		res = nvme_ctrlr_cmd_fw_image_download(ctrlr, transfer, offset, p,
						       nvme_completion_poll_cb,
@@ -2221,12 +2153,7 @@ spdk_nvme_ctrlr_update_firmware(struct spdk_nvme_ctrlr *ctrlr, void *payload, ui
			return res;
		}

		while (status.done == false) {
			nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
			spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
			nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		}
		if (spdk_nvme_cpl_is_error(&status.cpl)) {
		if (spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock)) {
			SPDK_ERRLOG("spdk_nvme_ctrlr_fw_image_download failed!\n");
			return -ENXIO;
		}
@@ -2240,21 +2167,17 @@ spdk_nvme_ctrlr_update_firmware(struct spdk_nvme_ctrlr *ctrlr, void *payload, ui
	fw_commit.fs = slot;
	fw_commit.ca = commit_action;

	status.done = false;

	res = nvme_ctrlr_cmd_fw_commit(ctrlr, &fw_commit, nvme_completion_poll_cb,
				       &status);
	if (res) {
		return res;
	}

	while (status.done == false) {
		nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	}
	res = spdk_nvme_wait_for_completion_robust_lock(ctrlr->adminq, &status, &ctrlr->ctrlr_lock);

	memcpy(completion_status, &status.cpl.status, sizeof(struct spdk_nvme_status));
	if (spdk_nvme_cpl_is_error(&status.cpl)) {

	if (res) {
		if (status.cpl.status.sct != SPDK_NVME_SCT_COMMAND_SPECIFIC ||
		    status.cpl.status.sc != SPDK_NVME_SC_FIRMWARE_REQ_NVM_RESET) {
			if (status.cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC  &&
+5 −0
Original line number Diff line number Diff line
@@ -576,6 +576,11 @@ int nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
		uint32_t size, uint32_t offset, void *payload,
		spdk_nvme_cmd_cb cb_fn, void *cb_arg);
void	nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl);
int	spdk_nvme_wait_for_completion(struct spdk_nvme_qpair *qpair,
				      struct nvme_completion_poll_status *status);
int	spdk_nvme_wait_for_completion_robust_lock(struct spdk_nvme_qpair *qpair,
		struct nvme_completion_poll_status *status,
		pthread_mutex_t *robust_mutex);

struct spdk_nvme_ctrlr_process *spdk_nvme_ctrlr_get_process(struct spdk_nvme_ctrlr *ctrlr,
		pid_t pid);
+3 −13
Original line number Diff line number Diff line
@@ -47,7 +47,6 @@ int nvme_ns_identify_update(struct spdk_nvme_ns *ns)
	int					rc;

	nsdata = _nvme_ns_get_data(ns);
	status.done = false;
	rc = nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS, 0, ns->id,
				     nsdata, sizeof(*nsdata),
				     nvme_completion_poll_cb, &status);
@@ -55,12 +54,8 @@ int nvme_ns_identify_update(struct spdk_nvme_ns *ns)
		return rc;
	}

	while (status.done == false) {
		nvme_robust_mutex_lock(&ns->ctrlr->ctrlr_lock);
		spdk_nvme_qpair_process_completions(ns->ctrlr->adminq, 0);
		nvme_robust_mutex_unlock(&ns->ctrlr->ctrlr_lock);
	}
	if (spdk_nvme_cpl_is_error(&status.cpl)) {
	if (spdk_nvme_wait_for_completion_robust_lock(ns->ctrlr->adminq, &status,
			&ns->ctrlr->ctrlr_lock)) {
		/* This can occur if the namespace is not active. Simply zero the
		 * namespace data and continue. */
		memset(nsdata, 0, sizeof(*nsdata));
@@ -127,16 +122,11 @@ int nvme_ns_identify_update(struct spdk_nvme_ns *ns)
	if (ns->ctrlr->vs.raw >= SPDK_NVME_VERSION(1, 3, 0) &&
	    !(ns->ctrlr->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Attempting to retrieve NS ID Descriptor List\n");
		status.done = false;
		rc = nvme_ctrlr_cmd_identify(ns->ctrlr, SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST, 0, ns->id,
					     ns->id_desc_list, sizeof(ns->id_desc_list),
					     nvme_completion_poll_cb, &status);
		if (rc == 0) {
			while (status.done == false) {
				nvme_robust_mutex_lock(&ns->ctrlr->ctrlr_lock);
				spdk_nvme_qpair_process_completions(ns->ctrlr->adminq, 0);
				nvme_robust_mutex_unlock(&ns->ctrlr->ctrlr_lock);
			}
			rc = spdk_nvme_wait_for_completion_robust_lock(ns->ctrlr->adminq, &status, &ns->ctrlr->ctrlr_lock);
		}

		if (rc != 0 || spdk_nvme_cpl_is_error(&status.cpl)) {
+5 −24
Original line number Diff line number Diff line
@@ -1516,40 +1516,29 @@ _nvme_pcie_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme
	struct nvme_completion_poll_status	status;
	int					rc;

	status.done = false;
	rc = nvme_pcie_ctrlr_cmd_create_io_cq(ctrlr, qpair, nvme_completion_poll_cb, &status);
	if (rc != 0) {
		return rc;
	}

	while (status.done == false) {
		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
	}
	if (spdk_nvme_cpl_is_error(&status.cpl)) {
	if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
		SPDK_ERRLOG("nvme_create_io_cq failed!\n");
		return -1;
	}

	status.done = false;
	rc = nvme_pcie_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair, nvme_completion_poll_cb, &status);
	if (rc != 0) {
		return rc;
	}

	while (status.done == false) {
		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
	}
	if (spdk_nvme_cpl_is_error(&status.cpl)) {
	if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
		SPDK_ERRLOG("nvme_create_io_sq failed!\n");
		/* Attempt to delete the completion queue */
		status.done = false;
		rc = nvme_pcie_ctrlr_cmd_delete_io_cq(qpair->ctrlr, qpair, nvme_completion_poll_cb, &status);
		if (rc != 0) {
			return -1;
		}
		while (status.done == false) {
			spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
		}
		spdk_nvme_wait_for_completion(ctrlr->adminq, &status);
		return -1;
	}

@@ -1625,15 +1614,11 @@ nvme_pcie_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_
	}

	/* Delete the I/O submission queue */
	status.done = false;
	rc = nvme_pcie_ctrlr_cmd_delete_io_sq(ctrlr, qpair, nvme_completion_poll_cb, &status);
	if (rc != 0) {
		return rc;
	}
	while (status.done == false) {
		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
	}
	if (spdk_nvme_cpl_is_error(&status.cpl)) {
	if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
		return -1;
	}

@@ -1646,15 +1631,11 @@ nvme_pcie_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_
	}

	/* Delete the completion queue */
	status.done = false;
	rc = nvme_pcie_ctrlr_cmd_delete_io_cq(ctrlr, qpair, nvme_completion_poll_cb, &status);
	if (rc != 0) {
		return rc;
	}
	while (status.done == false) {
		spdk_nvme_qpair_process_completions(ctrlr->adminq, 0);
	}
	if (spdk_nvme_cpl_is_error(&status.cpl)) {
	if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
		return -1;
	}

Loading