Commit e22dcc07 authored by Shuhei Matsumoto's avatar Shuhei Matsumoto Committed by Tomasz Zawadzki
Browse files

nvme_rdma: Factor out reset failed sends/recvs operation



Factor out reset failed recvs operation into a helper function
nvme_rdma_reset_failed_recvs(). This will make the following
patches simpler.

For send operation, this change is not required yet, but in future
we may support something like shared SQ. Hence, we do this change
for send operation too.

Signed-off-by: default avatarShuhei Matsumoto <smatsumoto@nvidia.com>
Signed-off-by: default avatarDenis Nagorny <denisn@nvidia.com>
Signed-off-by: default avatarEvgeniy Kochetov <evgeniik@nvidia.com>
Change-Id: Ib44acebe63e97e5a60ea6fa701b49278c7f44b45
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/14171


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Mellanox Build Bot
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-by: default avatarAleksey Marchuk <alexeymar@nvidia.com>
parent 4cef00cb
Loading
Loading
Loading
Loading
+29 −16
Original line number Diff line number Diff line
@@ -770,6 +770,32 @@ nvme_rdma_qpair_init(struct nvme_rdma_qpair *rqpair)
	return 0;
}

static void
nvme_rdma_reset_failed_sends(struct nvme_rdma_qpair *rqpair,
			     struct ibv_send_wr *bad_send_wr, int rc)
{
	SPDK_ERRLOG("Failed to post WRs on send queue, errno %d (%s), bad_wr %p\n",
		    rc, spdk_strerror(rc), bad_send_wr);
	while (bad_send_wr != NULL) {
		assert(rqpair->current_num_sends > 0);
		rqpair->current_num_sends--;
		bad_send_wr = bad_send_wr->next;
	}
}

static void
nvme_rdma_reset_failed_recvs(struct nvme_rdma_qpair *rqpair,
			     struct ibv_recv_wr *bad_recv_wr, int rc)
{
	SPDK_ERRLOG("Failed to post WRs on receive queue, errno %d (%s), bad_wr %p\n",
		    rc, spdk_strerror(rc), bad_recv_wr);
	while (bad_recv_wr != NULL) {
		assert(rqpair->current_num_recvs > 0);
		rqpair->current_num_recvs--;
		bad_recv_wr = bad_recv_wr->next;
	}
}

static inline int
nvme_rdma_qpair_submit_sends(struct nvme_rdma_qpair *rqpair)
{
@@ -779,17 +805,10 @@ nvme_rdma_qpair_submit_sends(struct nvme_rdma_qpair *rqpair)
	rc = spdk_rdma_qp_flush_send_wrs(rqpair->rdma_qp, &bad_send_wr);

	if (spdk_unlikely(rc)) {
		SPDK_ERRLOG("Failed to post WRs on send queue, errno %d (%s), bad_wr %p\n",
			    rc, spdk_strerror(rc), bad_send_wr);
		while (bad_send_wr != NULL) {
			assert(rqpair->current_num_sends > 0);
			rqpair->current_num_sends--;
			bad_send_wr = bad_send_wr->next;
		}
		return rc;
		nvme_rdma_reset_failed_sends(rqpair, bad_send_wr, rc);
	}

	return 0;
	return rc;
}

static inline int
@@ -800,13 +819,7 @@ nvme_rdma_qpair_submit_recvs(struct nvme_rdma_qpair *rqpair)

	rc = spdk_rdma_qp_flush_recv_wrs(rqpair->rdma_qp, &bad_recv_wr);
	if (spdk_unlikely(rc)) {
		SPDK_ERRLOG("Failed to post WRs on receive queue, errno %d (%s), bad_wr %p\n",
			    rc, spdk_strerror(rc), bad_recv_wr);
		while (bad_recv_wr != NULL) {
			assert(rqpair->current_num_recvs > 0);
			rqpair->current_num_recvs--;
			bad_recv_wr = bad_recv_wr->next;
		}
		nvme_rdma_reset_failed_recvs(rqpair, bad_recv_wr, rc);
	}

	return rc;