Commit 01887d3c authored by Evgeniy Kochetov's avatar Evgeniy Kochetov Committed by Jim Harris
Browse files

nvmf/rdma: Fix data WR release



One of stop conditions in data WR release function was wrong. This
can cause release of uncompleted data WRs. Release of WRs that are
not yet completed leads to different side-effects, up to data
corruption.

The issue was introduced with send WR batching feature in commit
9d63933b.

This patch fixes stop condition and contains some refactoring to
simplify WR release function.

Signed-off-by: default avatarEvgeniy Kochetov <evgeniik@mellanox.com>
Signed-off-by: default avatarSasha Kotchubievsky <sashakot@mellanox.com>
Signed-off-by: default avatarAlexey Marchuk <alexeymar@mellanox.com>
Change-Id: Ie79f64da345e38038f16a0210bef240f63af325b
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/466029


Reviewed-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: default avatarBroadcom SPDK FC-NVMe CI <spdk-ci.pdl@broadcom.com>
Reviewed-by: default avatarSeth Howell <seth.howell@intel.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
parent a9879ed1
Loading
Loading
Loading
Loading
+14 −32
Original line number Diff line number Diff line
@@ -644,40 +644,22 @@ static void
nvmf_rdma_request_free_data(struct spdk_nvmf_rdma_request *rdma_req,
			    struct spdk_nvmf_rdma_transport *rtransport)
{
	struct spdk_nvmf_rdma_request_data	*current_data_wr = NULL, *next_data_wr = NULL;
	struct ibv_send_wr			*send_wr;
	int					i;
	struct spdk_nvmf_rdma_request_data	*data_wr;
	struct ibv_send_wr			*next_send_wr;
	uint64_t				req_wrid;

	rdma_req->num_outstanding_data_wr = 0;
	current_data_wr = &rdma_req->data;
	for (i = 0; i < current_data_wr->wr.num_sge; i++) {
		current_data_wr->wr.sg_list[i].addr = 0;
		current_data_wr->wr.sg_list[i].length = 0;
		current_data_wr->wr.sg_list[i].lkey = 0;
	}
	current_data_wr->wr.num_sge = 0;

	send_wr = current_data_wr->wr.next;
	if (send_wr != NULL && send_wr != &rdma_req->rsp.wr) {
		next_data_wr = SPDK_CONTAINEROF(send_wr, struct spdk_nvmf_rdma_request_data, wr);
	}
	while (next_data_wr) {
		current_data_wr = next_data_wr;
		send_wr = current_data_wr->wr.next;
		if (send_wr != NULL && send_wr != &rdma_req->rsp.wr &&
		    send_wr->wr_id == current_data_wr->wr.wr_id) {
			next_data_wr = SPDK_CONTAINEROF(send_wr, struct spdk_nvmf_rdma_request_data, wr);
		} else {
			next_data_wr = NULL;
		}

		for (i = 0; i < current_data_wr->wr.num_sge; i++) {
			current_data_wr->wr.sg_list[i].addr = 0;
			current_data_wr->wr.sg_list[i].length = 0;
			current_data_wr->wr.sg_list[i].lkey = 0;
		}
		current_data_wr->wr.num_sge = 0;
		spdk_mempool_put(rtransport->data_wr_pool, current_data_wr);
	data_wr = &rdma_req->data;
	req_wrid = data_wr->wr.wr_id;
	while (data_wr && data_wr->wr.wr_id == req_wrid) {
		memset(data_wr->sgl, 0, sizeof(data_wr->wr.sg_list[0]) * data_wr->wr.num_sge);
		data_wr->wr.num_sge = 0;
		next_send_wr = data_wr->wr.next;
		if (data_wr != &rdma_req->data) {
			spdk_mempool_put(rtransport->data_wr_pool, data_wr);
		}
		data_wr = (!next_send_wr || next_send_wr == &rdma_req->rsp.wr) ? NULL :
			  SPDK_CONTAINEROF(next_send_wr, struct spdk_nvmf_rdma_request_data, wr);
	}
}