Commit 1fb629c4 authored by Seth Howell's avatar Seth Howell Committed by Ben Walker
Browse files

rdma: make the pending_data_buf_queue an STAILQ



Should speed up operations, and allows us to remove the 16 byte link
object from the request structure.

Change-Id: Ie62df1f44d22580a7a7ae41c498295841d1e3064
Signed-off-by: default avatarSeth Howell <seth.howell@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/448080


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: default avatarDarek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
parent 43e136df
Loading
Loading
Loading
Loading
+10 −11
Original line number Diff line number Diff line
@@ -269,7 +269,6 @@ struct spdk_nvmf_rdma_request {

	uint32_t				num_outstanding_data_wr;

	TAILQ_ENTRY(spdk_nvmf_rdma_request)	link;
	STAILQ_ENTRY(spdk_nvmf_rdma_request)	state_link;
};

@@ -417,7 +416,7 @@ struct spdk_nvmf_rdma_poll_group {
	struct spdk_nvmf_transport_poll_group	group;

	/* Requests that are waiting to obtain a data buffer */
	TAILQ_HEAD(, spdk_nvmf_rdma_request)	pending_data_buf_queue;
	STAILQ_HEAD(, spdk_nvmf_rdma_request)	pending_data_buf_queue;

	TAILQ_HEAD(, spdk_nvmf_rdma_poller)	pollers;
};
@@ -1816,7 +1815,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
	 * to release resources. */
	if (rqpair->ibv_state == IBV_QPS_ERR || rqpair->qpair.state != SPDK_NVMF_QPAIR_ACTIVE) {
		if (rdma_req->state == RDMA_REQUEST_STATE_NEED_BUFFER) {
			TAILQ_REMOVE(&rgroup->pending_data_buf_queue, rdma_req, link);
			STAILQ_REMOVE(&rgroup->pending_data_buf_queue, rdma_req, spdk_nvmf_rdma_request, state_link);
		} else if (rdma_req->state == RDMA_REQUEST_STATE_DATA_TRANSFER_TO_CONTROLLER_PENDING) {
			STAILQ_REMOVE(&rqpair->pending_rdma_read_queue, rdma_req, spdk_nvmf_rdma_request, state_link);
		} else if (rdma_req->state == RDMA_REQUEST_STATE_DATA_TRANSFER_TO_HOST_PENDING) {
@@ -1860,7 +1859,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
			}

			rdma_req->state = RDMA_REQUEST_STATE_NEED_BUFFER;
			TAILQ_INSERT_TAIL(&rgroup->pending_data_buf_queue, rdma_req, link);
			STAILQ_INSERT_TAIL(&rgroup->pending_data_buf_queue, rdma_req, state_link);
			break;
		case RDMA_REQUEST_STATE_NEED_BUFFER:
			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_NEED_BUFFER, 0, 0,
@@ -1868,7 +1867,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,

			assert(rdma_req->req.xfer != SPDK_NVME_DATA_NONE);

			if (rdma_req != TAILQ_FIRST(&rgroup->pending_data_buf_queue)) {
			if (rdma_req != STAILQ_FIRST(&rgroup->pending_data_buf_queue)) {
				/* This request needs to wait in line to obtain a buffer */
				break;
			}
@@ -1876,7 +1875,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
			/* Try to get a data buffer */
			rc = spdk_nvmf_rdma_request_parse_sgl(rtransport, device, rdma_req);
			if (rc < 0) {
				TAILQ_REMOVE(&rgroup->pending_data_buf_queue, rdma_req, link);
				STAILQ_REMOVE_HEAD(&rgroup->pending_data_buf_queue, state_link);
				rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
				rdma_req->state = RDMA_REQUEST_STATE_READY_TO_COMPLETE;
				break;
@@ -1887,7 +1886,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
				break;
			}

			TAILQ_REMOVE(&rgroup->pending_data_buf_queue, rdma_req, link);
			STAILQ_REMOVE_HEAD(&rgroup->pending_data_buf_queue, state_link);

			/* If data is transferring from host to controller and the data didn't
			 * arrive using in capsule data, we need to do a transfer from the host.
@@ -2526,7 +2525,7 @@ spdk_nvmf_rdma_qpair_process_pending(struct spdk_nvmf_rdma_transport *rtransport
	}

	/* The second highest priority is I/O waiting on memory buffers. */
	TAILQ_FOREACH_SAFE(rdma_req, &rqpair->poller->group->pending_data_buf_queue, link,
	STAILQ_FOREACH_SAFE(rdma_req, &rqpair->poller->group->pending_data_buf_queue, state_link,
			    req_tmp) {
		if (spdk_nvmf_rdma_request_process(rtransport, rdma_req) == false && drain == false) {
			break;
@@ -2902,7 +2901,7 @@ spdk_nvmf_rdma_poll_group_create(struct spdk_nvmf_transport *transport)
	}

	TAILQ_INIT(&rgroup->pollers);
	TAILQ_INIT(&rgroup->pending_data_buf_queue);
	STAILQ_INIT(&rgroup->pending_data_buf_queue);

	pthread_mutex_lock(&rtransport->lock);
	TAILQ_FOREACH(device, &rtransport->devices, link) {
@@ -3008,7 +3007,7 @@ spdk_nvmf_rdma_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
		free(poller);
	}

	if (!TAILQ_EMPTY(&rgroup->pending_data_buf_queue)) {
	if (!STAILQ_EMPTY(&rgroup->pending_data_buf_queue)) {
		SPDK_ERRLOG("Pending I/O list wasn't empty on poll group destruction\n");
	}