Commit eb6006c2 authored by Seth Howell's avatar Seth Howell Committed by Ben Walker
Browse files

nvme_rdma: don't send split sgl requests inline.



In order to truly support multi-sgl inline requests in the RDMA
transport, we would need to increase the size of the
spdk_nvme_rdma_req object dramatically. This is because we would need
enough ibv_sge objects in it to support up to the maximum number of SGEs
supported by the target (for SPDK that is up to 16). Instead of doing
that or creating a new pool of shared ibv_sge objects to support that
case, just send split multi-sgl requests through the regular sgl path.

Change-Id: I78313bd88f3ed1cea3b772d9476a00087f49a4dd
Signed-off-by: default avatarSeth Howell <seth.howell@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/452266


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
Reviewed-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
parent 47097a3f
Loading
Loading
Loading
Loading
+28 −35
Original line number Diff line number Diff line
@@ -1137,7 +1137,6 @@ nvme_rdma_build_sgl_inline_request(struct nvme_rdma_qpair *rqpair,
	struct ibv_mr *mr;
	uint32_t length;
	uint64_t requested_size;
	uint32_t remaining_payload;
	void *virt_addr;
	int rc, i;

@@ -1147,17 +1146,18 @@ nvme_rdma_build_sgl_inline_request(struct nvme_rdma_qpair *rqpair,
	assert(req->payload.next_sge_fn != NULL);
	req->payload.reset_sgl_fn(req->payload.contig_or_cb_arg, req->payload_offset);

	remaining_payload = req->payload_size;
	rdma_req->send_wr.num_sge = 1;

	do {
	rc = req->payload.next_sge_fn(req->payload.contig_or_cb_arg, &virt_addr, &length);
	if (rc) {
		return -1;
	}

		if (length > remaining_payload) {
			length = remaining_payload;
	if (length < req->payload_size) {
		SPDK_DEBUGLOG(SPDK_LOG_NVME, "Inline SGL request split so sending separately.\n");
		return nvme_rdma_build_sgl_request(rqpair, rdma_req);
	}

	if (length > req->payload_size) {
		length = req->payload_size;
	}

	requested_size = length;
@@ -1176,18 +1176,11 @@ nvme_rdma_build_sgl_inline_request(struct nvme_rdma_qpair *rqpair,
		return -1;
	}

		rdma_req->send_sgl[rdma_req->send_wr.num_sge].addr = (uint64_t)virt_addr;
		rdma_req->send_sgl[rdma_req->send_wr.num_sge].length = length;
		rdma_req->send_sgl[rdma_req->send_wr.num_sge].lkey = mr->lkey;
		rdma_req->send_wr.num_sge++;

		remaining_payload -= length;
	} while (remaining_payload && rdma_req->send_wr.num_sge < (int64_t)rqpair->max_send_sge);
	rdma_req->send_sgl[1].addr = (uint64_t)virt_addr;
	rdma_req->send_sgl[1].length = length;
	rdma_req->send_sgl[1].lkey = mr->lkey;

	if (remaining_payload) {
		SPDK_ERRLOG("Unable to prepare request. Too many SGL elements\n");
		return -1;
	}
	rdma_req->send_wr.num_sge = 2;

	/* The first element of this SGL is pointing at an
	 * spdk_nvmf_cmd object. For this particular command,