Commit 1f9ac117 authored by Seth Howell's avatar Seth Howell Committed by Jim Harris
Browse files

rdma: add num_outstanding_data_wr tracker to req



This will be necessary later on when we need to throttle send and recv
requests in software.

Change-Id: Ifb25eaabd15e101fbfc2959a08a321f80857b280
Signed-off-by: default avatarSeth Howell <seth.howell@intel.com>
Reviewed-on: https://review.gerrithub.io/c/441604


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-by: default avatarChangpeng Liu <changpeng.liu@intel.com>
parent faacc878
Loading
Loading
Loading
Loading
+18 −3
Original line number Diff line number Diff line
@@ -235,6 +235,8 @@ struct spdk_nvmf_rdma_request {

	struct spdk_nvmf_rdma_request_data	data;

	uint32_t				num_outstanding_data_wr;

	struct spdk_nvmf_rdma_wr		rdma_wr;

	TAILQ_ENTRY(spdk_nvmf_rdma_request)	link;
@@ -1357,6 +1359,9 @@ spdk_nvmf_rdma_request_parse_sgl(struct spdk_nvmf_rdma_transport *rtransport,
		rdma_req->data.wr.wr.rdma.rkey = sgl->keyed.key;
		rdma_req->data.wr.wr.rdma.remote_addr = sgl->address;

		/* set the number of outstanding data WRs for this request. */
		rdma_req->num_outstanding_data_wr = 1;

		SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Request %p took %d buffer/s from central pool\n", rdma_req,
			      rdma_req->req.iovcnt);

@@ -1384,6 +1389,7 @@ spdk_nvmf_rdma_request_parse_sgl(struct spdk_nvmf_rdma_transport *rtransport,
			return -1;
		}

		rdma_req->num_outstanding_data_wr = 0;
		rdma_req->req.data = rdma_req->recv->buf + offset;
		rdma_req->data_from_pool = false;
		rdma_req->req.length = sgl->unkeyed.length;
@@ -2726,8 +2732,12 @@ spdk_nvmf_rdma_poller_poll(struct spdk_nvmf_rdma_transport *rtransport,

				SPDK_ERRLOG("data=%p length=%u\n", rdma_req->req.data, rdma_req->req.length);
				if (rdma_req->data.wr.opcode == IBV_WR_RDMA_READ) {
					assert(rdma_req->num_outstanding_data_wr > 0);
					rdma_req->num_outstanding_data_wr--;
					if (rdma_req->num_outstanding_data_wr == 0) {
						spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_COMPLETED);
					}
				}
				break;
			case RDMA_WR_TYPE_DRAIN_RECV:
				rqpair = SPDK_CONTAINEROF(rdma_wr, struct spdk_nvmf_rdma_qpair, drain_recv_wr);
@@ -2795,8 +2805,13 @@ spdk_nvmf_rdma_poller_poll(struct spdk_nvmf_rdma_transport *rtransport,
			rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);

			assert(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
			/* wait for all outstanding reads associated with the same rdma_req to complete before proceeding. */
			assert(rdma_req->num_outstanding_data_wr > 0);
			rdma_req->num_outstanding_data_wr--;
			if (rdma_req->num_outstanding_data_wr == 0) {
				spdk_nvmf_rdma_request_set_state(rdma_req, RDMA_REQUEST_STATE_READY_TO_EXECUTE);
				spdk_nvmf_rdma_request_process(rtransport, rdma_req);
			}

			/* Try to process other queued requests */
			spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair, false);