Commit 3e1ab5ea authored by Shuhei Matsumoto's avatar Shuhei Matsumoto Committed by Tomasz Zawadzki
Browse files

nvmf/rdma: Wait until request is abortable if it is transferring



If the state of the request is TRANSFERRING_HOST_TO_CONTROLLER,
we cannot abort it now but may be able to abort it when its state
is EXECUTING. Hence wait until its state is EXECUTING, and then
retry aborting.

The following patch will make the timeout value configurable as
an new transport option.

Signed-off-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: Ia4b43e79c3b0d9c53ed04b01a9eaa9b117b32d81
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/3013


Community-CI: Mellanox Build Bot
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarAleksey Marchuk <alexeymar@mellanox.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
parent c1305e71
Loading
Loading
Loading
Loading
+58 −30
Original line number Diff line number Diff line
@@ -4040,67 +4040,95 @@ nvmf_rdma_request_set_abort_status(struct spdk_nvmf_request *req,
	req->rsp->nvme_cpl.cdw0 &= ~1U;	/* Command was successfully aborted. */
}

static void
nvmf_rdma_qpair_abort_request(struct spdk_nvmf_qpair *qpair,
			      struct spdk_nvmf_request *req)
#define NVMF_RDMA_ABORT_TIMEOUT_SEC	1

static int
_nvmf_rdma_qpair_abort_request(void *ctx)
{
	struct spdk_nvmf_rdma_qpair *rqpair;
	uint16_t cid;
	uint32_t i;
	struct spdk_nvmf_rdma_request *rdma_req_to_abort = NULL;
	struct spdk_nvmf_request *req = ctx;
	struct spdk_nvmf_rdma_request *rdma_req_to_abort = SPDK_CONTAINEROF(
				req->req_to_abort, struct spdk_nvmf_rdma_request, req);
	struct spdk_nvmf_rdma_qpair *rqpair = SPDK_CONTAINEROF(req->req_to_abort->qpair,
					      struct spdk_nvmf_rdma_qpair, qpair);
	int rc;

	rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
	cid = req->cmd->nvme_cmd.cdw10_bits.abort.cid;

	for (i = 0; i < rqpair->max_queue_depth; i++) {
		rdma_req_to_abort = &rqpair->resources->reqs[i];

		if (rdma_req_to_abort->state != RDMA_REQUEST_STATE_FREE &&
		    rdma_req_to_abort->req.cmd->nvme_cmd.cid == cid) {
			break;
		}
	}

	if (rdma_req_to_abort == NULL) {
		goto complete;
	}
	spdk_poller_unregister(&req->poller);

	switch (rdma_req_to_abort->state) {
	case RDMA_REQUEST_STATE_EXECUTING:
		rc = nvmf_ctrlr_abort_request(req, &rdma_req_to_abort->req);
		if (rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS) {
			return;
			return SPDK_POLLER_BUSY;
		}
		break;

	case RDMA_REQUEST_STATE_NEED_BUFFER:
		STAILQ_REMOVE(&rqpair->poller->group->group.pending_buf_queue, &rdma_req_to_abort->req,
			      spdk_nvmf_request, buf_link);
		STAILQ_REMOVE(&rqpair->poller->group->group.pending_buf_queue,
			      &rdma_req_to_abort->req, spdk_nvmf_request, buf_link);

		nvmf_rdma_request_set_abort_status(req, rdma_req_to_abort);
		break;

	case RDMA_REQUEST_STATE_DATA_TRANSFER_TO_CONTROLLER_PENDING:
		STAILQ_REMOVE(&rqpair->pending_rdma_read_queue, rdma_req_to_abort, spdk_nvmf_rdma_request,
			      state_link);
		STAILQ_REMOVE(&rqpair->pending_rdma_read_queue, rdma_req_to_abort,
			      spdk_nvmf_rdma_request, state_link);

		nvmf_rdma_request_set_abort_status(req, rdma_req_to_abort);
		break;

	case RDMA_REQUEST_STATE_DATA_TRANSFER_TO_HOST_PENDING:
		STAILQ_REMOVE(&rqpair->pending_rdma_write_queue, rdma_req_to_abort, spdk_nvmf_rdma_request,
			      state_link);
		STAILQ_REMOVE(&rqpair->pending_rdma_write_queue, rdma_req_to_abort,
			      spdk_nvmf_rdma_request, state_link);

		nvmf_rdma_request_set_abort_status(req, rdma_req_to_abort);
		break;

	case RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER:
		if (spdk_get_ticks() < req->timeout_tsc) {
			req->poller = SPDK_POLLER_REGISTER(_nvmf_rdma_qpair_abort_request, req, 0);
			return SPDK_POLLER_BUSY;
		}
		break;

	default:
		break;
	}

complete:
	spdk_nvmf_request_complete(req);
	return SPDK_POLLER_BUSY;
}

static void
nvmf_rdma_qpair_abort_request(struct spdk_nvmf_qpair *qpair,
			      struct spdk_nvmf_request *req)
{
	struct spdk_nvmf_rdma_qpair *rqpair;
	uint16_t cid;
	uint32_t i;
	struct spdk_nvmf_rdma_request *rdma_req_to_abort = NULL;

	rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
	cid = req->cmd->nvme_cmd.cdw10_bits.abort.cid;

	for (i = 0; i < rqpair->max_queue_depth; i++) {
		rdma_req_to_abort = &rqpair->resources->reqs[i];

		if (rdma_req_to_abort->state != RDMA_REQUEST_STATE_FREE &&
		    rdma_req_to_abort->req.cmd->nvme_cmd.cid == cid) {
			break;
		}
	}

	if (rdma_req_to_abort == NULL) {
		spdk_nvmf_request_complete(req);
		return;
	}

	req->req_to_abort = &rdma_req_to_abort->req;
	req->timeout_tsc = spdk_get_ticks() + NVMF_RDMA_ABORT_TIMEOUT_SEC * spdk_get_ticks_hz();
	req->poller = NULL;

	_nvmf_rdma_qpair_abort_request(req);
}

static int