Commit ecd46370 authored by Jim Harris's avatar Jim Harris Committed by Tomasz Zawadzki
Browse files

nvme: add nvme_wait_for_completion_robust_lock_timeout_poll



This is exension to the existing nvme_wait_for_completion* interface
that allows the user to poll for request's completion without blocking.

Signed-off-by: default avatarJim Harris <james.r.harris@intel.com>
Signed-off-by: default avatarKonrad Sztyber <konrad.sztyber@intel.com>
Change-Id: I6c5b7203883f8e2fa28ceb039ce63aa50631f571
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8601


Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-by: default avatarMonica Kenguva <monica.kenguva@intel.com>
Reviewed-by: default avatarAleksey Marchuk <alexeymar@mellanox.com>
parent 20e6c821
Loading
Loading
Loading
Loading
+66 −44
Original line number Diff line number Diff line
@@ -262,39 +262,13 @@ dummy_disconnected_qpair_cb(struct spdk_nvme_qpair *qpair, void *poll_group_ctx)
{
}

/**
 * Poll qpair for completions until a command completes.
 *
 * \param qpair queue to poll
 * \param status completion status. The user must fill this structure with zeroes before calling
 * this function
 * \param robust_mutex optional robust mutex to lock while polling qpair
 * \param timeout_in_usecs optional timeout
 *
 * \return 0 if command completed without error,
 * -EIO if command completed with error,
 * -ECANCELED if command is not completed due to transport/device error or time expired
 *
 *  The command to wait upon must be submitted with nvme_completion_poll_cb as the callback
 *  and status as the callback argument.
 */
int
nvme_wait_for_completion_robust_lock_timeout(
	struct spdk_nvme_qpair *qpair,
nvme_wait_for_completion_robust_lock_timeout_poll(struct spdk_nvme_qpair *qpair,
		struct nvme_completion_poll_status *status,
	pthread_mutex_t *robust_mutex,
	uint64_t timeout_in_usecs)
		pthread_mutex_t *robust_mutex)
{
	int rc = 0;

	if (timeout_in_usecs) {
		status->timeout_tsc = spdk_get_ticks() + timeout_in_usecs *
				      spdk_get_ticks_hz() / SPDK_SEC_TO_USEC;
	} else {
		status->timeout_tsc = 0;
	}
	int rc;

	while (status->done == false) {
	if (robust_mutex) {
		nvme_robust_mutex_lock(robust_mutex);
	}
@@ -313,31 +287,79 @@ nvme_wait_for_completion_robust_lock_timeout(
	if (rc < 0) {
		status->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
		status->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION;
			break;
		goto error;
	}
		if (status->timeout_tsc && spdk_get_ticks() > status->timeout_tsc) {
			rc = -1;
			break;

	if (!status->done && status->timeout_tsc && spdk_get_ticks() > status->timeout_tsc) {
		goto error;
	}

	if (qpair->ctrlr->trid.trtype == SPDK_NVME_TRANSPORT_PCIE) {
		union spdk_nvme_csts_register csts = spdk_nvme_ctrlr_get_regs_csts(qpair->ctrlr);
		if (csts.raw == SPDK_NVME_INVALID_REGISTER_VALUE) {
			status->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
			status->cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
				break;
			}
			goto error;
		}
	}

	if (status->done == false) {
	if (!status->done) {
		return -EAGAIN;
	} else if (spdk_nvme_cpl_is_error(&status->cpl)) {
		return -EIO;
	} else {
		return 0;
	}
error:
	/* Either transport error occurred or we've timed out.  Either way, if the response hasn't
	 * been received yet, mark the command as timed out, so the status gets freed when the
	 * command is completed or aborted.
	 */
	if (!status->done) {
		status->timed_out = true;
	}

	if (rc < 0) {
	return -ECANCELED;
}

	return spdk_nvme_cpl_is_error(&status->cpl) ? -EIO : 0;
/**
 * Poll qpair for completions until a command completes.
 *
 * \param qpair queue to poll
 * \param status completion status. The user must fill this structure with zeroes before calling
 * this function
 * \param robust_mutex optional robust mutex to lock while polling qpair
 * \param timeout_in_usecs optional timeout
 *
 * \return 0 if command completed without error,
 * -EIO if command completed with error,
 * -ECANCELED if command is not completed due to transport/device error or time expired
 *
 *  The command to wait upon must be submitted with nvme_completion_poll_cb as the callback
 *  and status as the callback argument.
 */
int
nvme_wait_for_completion_robust_lock_timeout(
	struct spdk_nvme_qpair *qpair,
	struct nvme_completion_poll_status *status,
	pthread_mutex_t *robust_mutex,
	uint64_t timeout_in_usecs)
{
	int rc;

	if (timeout_in_usecs) {
		status->timeout_tsc = spdk_get_ticks() + timeout_in_usecs *
				      spdk_get_ticks_hz() / SPDK_SEC_TO_USEC;
	} else {
		status->timeout_tsc = 0;
	}

	status->cpl.status_raw = 0;
	do {
		rc = nvme_wait_for_completion_robust_lock_timeout_poll(qpair, status, robust_mutex);
	} while (rc == -EAGAIN);

	return rc;
}

/**
+3 −0
Original line number Diff line number Diff line
@@ -1038,6 +1038,9 @@ int nvme_wait_for_completion_robust_lock_timeout(struct spdk_nvme_qpair *qpair,
		struct nvme_completion_poll_status *status,
		pthread_mutex_t *robust_mutex,
		uint64_t timeout_in_usecs);
int	nvme_wait_for_completion_robust_lock_timeout_poll(struct spdk_nvme_qpair *qpair,
		struct nvme_completion_poll_status *status,
		pthread_mutex_t *robust_mutex);

struct spdk_nvme_ctrlr_process *nvme_ctrlr_get_process(struct spdk_nvme_ctrlr *ctrlr,
		pid_t pid);