Commit dbdc8390 authored by xupeng-mingtu's avatar xupeng-mingtu Committed by Tomasz Zawadzki
Browse files

nvmf/rdma: Keep the rdma_req in pending_rdma_read_queue until it completes data transfer



Signed-off-by: default avatarxupeng-mingtu <jingmamour@gmail.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/23748

 (master)

(cherry picked from commit e53bd98a)
Change-Id: I5436a2892796a8014470d1d83bee6e58ebcb9fee
Signed-off-by: default avatarMarek Chomnicki <marek.chomnicki@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/23777


Reviewed-by: default avatarJim Harris <jim.harris@samsung.com>
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarTomasz Zawadzki <tomasz.zawadzki@intel.com>
parent 28a4e1bd
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -2207,7 +2207,9 @@ nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
			}

			/* We have already verified that this request is the head of the queue. */
			if (rdma_req->num_remaining_data_wr == 0) {
				STAILQ_REMOVE_HEAD(&rqpair->pending_rdma_read_queue, state_link);
			}

			rc = request_transfer_in(&rdma_req->req);
			if (spdk_likely(rc == 0)) {
@@ -4726,8 +4728,6 @@ nvmf_rdma_poller_poll(struct spdk_nvmf_rdma_transport *rtransport,
					if (rdma_req->num_remaining_data_wr) {
						/* Only part of RDMA_READ operations was submitted, process the rest */
						nvmf_rdma_request_reset_transfer_in(rdma_req, rtransport);
						/* Prioritize partially handled request over others to avoid latency increase */
						STAILQ_INSERT_HEAD(&rqpair->pending_rdma_read_queue, rdma_req, state_link);
						rdma_req->state = RDMA_REQUEST_STATE_DATA_TRANSFER_TO_CONTROLLER_PENDING;
						nvmf_rdma_request_process(rtransport, rdma_req);
						break;