Commit 7d4d22a8 authored by Changpeng Liu's avatar Changpeng Liu Committed by Jim Harris
Browse files

nvme: add a wait for completion timeout API



Althrough SPDK already provides a API to users which
can process runtime timeout NVMe commands, but it's
nice to have another API here, SPDK NVMe driver can
use it to break the endless wait.  Also use the API
first in the initialization process, because we don't
want to add another initialization state with Intel
only supported log pages.

Change-Id: Ibe7cadbc59033a299a1fcf02a66e98fc4eca8100
Signed-off-by: default avatarChangpeng Liu <changpeng.liu@intel.com>
Reviewed-on: https://review.gerrithub.io/c/444353


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
parent 2c026cf4
Loading
Loading
Loading
Loading
+27 −0
Original line number Diff line number Diff line
@@ -142,6 +142,33 @@ spdk_nvme_wait_for_completion(struct spdk_nvme_qpair *qpair,
	return spdk_nvme_wait_for_completion_robust_lock(qpair, status, NULL);
}

int
spdk_nvme_wait_for_completion_timeout(struct spdk_nvme_qpair *qpair,
				      struct nvme_completion_poll_status *status,
				      uint64_t timeout_in_secs)
{
	uint64_t timeout_tsc = 0;

	memset(&status->cpl, 0, sizeof(status->cpl));
	status->done = false;
	if (timeout_in_secs) {
		timeout_tsc = spdk_get_ticks() + timeout_in_secs * spdk_get_ticks_hz();
	}

	while (status->done == false) {
		spdk_nvme_qpair_process_completions(qpair, 0);
		if (timeout_tsc && spdk_get_ticks() > timeout_tsc) {
			break;
		}
	}

	if (status->done == false) {
		return -EIO;
	}

	return spdk_nvme_cpl_is_error(&status->cpl) ? -EIO : 0;
}

static void
nvme_user_copy_cmd_complete(void *arg, const struct spdk_nvme_cpl *cpl)
{
+2 −1
Original line number Diff line number Diff line
@@ -410,7 +410,8 @@ static int nvme_ctrlr_set_intel_support_log_pages(struct spdk_nvme_ctrlr *ctrlr)
		return rc;
	}

	if (spdk_nvme_wait_for_completion(ctrlr->adminq, &status)) {
	if (spdk_nvme_wait_for_completion_timeout(ctrlr->adminq, &status,
			ctrlr->opts.admin_timeout_ms / 1000)) {
		spdk_free(log_page_directory);
		SPDK_WARNLOG("Intel log pages not supported on Intel drive!\n");
		return 0;
+3 −0
Original line number Diff line number Diff line
@@ -793,6 +793,9 @@ int spdk_nvme_wait_for_completion(struct spdk_nvme_qpair *qpair,
int	spdk_nvme_wait_for_completion_robust_lock(struct spdk_nvme_qpair *qpair,
		struct nvme_completion_poll_status *status,
		pthread_mutex_t *robust_mutex);
int	spdk_nvme_wait_for_completion_timeout(struct spdk_nvme_qpair *qpair,
		struct nvme_completion_poll_status *status,
		uint64_t timeout_in_secs);

struct spdk_nvme_ctrlr_process *spdk_nvme_ctrlr_get_process(struct spdk_nvme_ctrlr *ctrlr,
		pid_t pid);
+7 −0
Original line number Diff line number Diff line
@@ -293,6 +293,13 @@ spdk_nvme_wait_for_completion(struct spdk_nvme_qpair *qpair,
	return spdk_nvme_wait_for_completion_robust_lock(qpair, status, NULL);
}

int
spdk_nvme_wait_for_completion_timeout(struct spdk_nvme_qpair *qpair,
				      struct nvme_completion_poll_status *status,
				      uint64_t timeout_in_secs)
{
	return spdk_nvme_wait_for_completion_robust_lock(qpair, status, NULL);
}

int
nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,