Commit 5593b61f authored by Shuhei Matsumoto's avatar Shuhei Matsumoto Committed by Jim Harris
Browse files

nvmf/rdma: Separate filling wr->sg_list from filling req->iov



This patch separates filling wr->sg_list from filling req->iov
in nvmf_rdma_fill_buffers() and create an new helper function
nvmf_rdma_fill_wr_sgl() to fill wr->sg_list by adding iovcnt to
struct spdk_nvmf_rdma_request.

The next patch will do the same change for
nvmf_rdma_fill_buffers_with_md_interleave().

The subsequent patches will merge nvmf_rdma_fill_buffers() into
spdk_nvmf_request_get_buffers().

Signed-off-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: Ia4cdf134df39997deb06522cbcb6af6666712ccc
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/469203


Community-CI: Broadcom SPDK FC-NVMe CI <spdk-ci.pdl@broadcom.com>
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarAlexey Marchuk <alexeymar@mellanox.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
parent 5ea1c959
Loading
Loading
Loading
Loading
+38 −20
Original line number Diff line number Diff line
@@ -264,6 +264,8 @@ struct spdk_nvmf_rdma_request {

	struct spdk_nvmf_rdma_request_data	data;

	int					iovpos;

	uint32_t				num_outstanding_data_wr;
	uint64_t				receive_tsc;

@@ -1659,9 +1661,10 @@ nvmf_rdma_fill_buffers_with_md_interleave(struct spdk_nvmf_rdma_transport *rtran

static bool
nvmf_rdma_fill_wr_sge(struct spdk_nvmf_rdma_device *device,
		      struct spdk_nvmf_request *req, struct ibv_send_wr *wr)
		      struct spdk_nvmf_request *req, struct ibv_send_wr *wr,
		      int iovpos)
{
	struct iovec	*iov = &req->iov[req->iovcnt];
	struct iovec	*iov = &req->iov[iovpos];
	struct ibv_sge	*sg_ele = &wr->sg_list[wr->num_sge];

	if (spdk_unlikely(!nvmf_rdma_get_lkey(device, iov, &sg_ele->lkey))) {
@@ -1678,36 +1681,48 @@ nvmf_rdma_fill_wr_sge(struct spdk_nvmf_rdma_device *device,
}

static int
nvmf_rdma_fill_buffers(struct spdk_nvmf_rdma_transport *rtransport,
		       struct spdk_nvmf_rdma_poll_group *rgroup,
nvmf_rdma_fill_wr_sgl(struct spdk_nvmf_rdma_poll_group *rgroup,
		      struct spdk_nvmf_rdma_device *device,
		       struct spdk_nvmf_request *req,
		      struct spdk_nvmf_rdma_request *rdma_req,
		      struct ibv_send_wr *wr,
		      uint32_t length)
{
	struct spdk_nvmf_request *req = &rdma_req->req;

	wr->num_sge = 0;
	while (length) {
		req->iov[req->iovcnt].iov_base = (void *)((uintptr_t)(req->buffers[req->iovcnt] +
						 NVMF_DATA_BUFFER_MASK) &
						 ~NVMF_DATA_BUFFER_MASK);
		req->iov[req->iovcnt].iov_len  = spdk_min(length,
						 rtransport->transport.opts.io_unit_size);
		while (spdk_unlikely(!nvmf_rdma_fill_wr_sge(device, req, wr))) {
			if (nvmf_rdma_replace_buffer(rgroup, &req->buffers[req->iovcnt]) == -ENOMEM) {
		while (spdk_unlikely(!nvmf_rdma_fill_wr_sge(device, req, wr, rdma_req->iovpos))) {
			if (nvmf_rdma_replace_buffer(rgroup, &req->buffers[rdma_req->iovpos]) == -ENOMEM) {
				return -ENOMEM;
			}
			req->iov[req->iovcnt].iov_base = (void *)((uintptr_t)(req->buffers[req->iovcnt] +
			req->iov[rdma_req->iovpos].iov_base = (void *)((uintptr_t)(req->buffers[rdma_req->iovpos] +
							      NVMF_DATA_BUFFER_MASK) &
							      ~NVMF_DATA_BUFFER_MASK);
		}

		length -= req->iov[req->iovcnt].iov_len;
		req->iovcnt++;
		length -= req->iov[rdma_req->iovpos].iov_len;
		rdma_req->iovpos++;
	}

	return 0;
}

static void
nvmf_rdma_fill_buffers(struct spdk_nvmf_rdma_transport *rtransport,
		       struct spdk_nvmf_request *req,
		       uint32_t length)
{
	while (length) {
		req->iov[req->iovcnt].iov_base = (void *)((uintptr_t)(req->buffers[req->iovcnt] +
						 NVMF_DATA_BUFFER_MASK) &
						 ~NVMF_DATA_BUFFER_MASK);
		req->iov[req->iovcnt].iov_len  = spdk_min(length,
						 rtransport->transport.opts.io_unit_size);
		length -= req->iov[req->iovcnt].iov_len;
		req->iovcnt++;
	}
}

static int
spdk_nvmf_rdma_request_fill_iovs(struct spdk_nvmf_rdma_transport *rtransport,
				 struct spdk_nvmf_rdma_device *device,
@@ -1729,6 +1744,7 @@ spdk_nvmf_rdma_request_fill_iovs(struct spdk_nvmf_rdma_transport *rtransport,
	}

	req->iovcnt = 0;
	rdma_req->iovpos = 0;

	if (spdk_unlikely(rdma_req->dif_insert_or_strip)) {
		rc = nvmf_rdma_fill_buffers_with_md_interleave(rtransport,
@@ -1739,7 +1755,8 @@ spdk_nvmf_rdma_request_fill_iovs(struct spdk_nvmf_rdma_transport *rtransport,
				length,
				&rdma_req->dif_ctx);
	} else {
		rc = nvmf_rdma_fill_buffers(rtransport, rgroup, device, req, wr, length);
		nvmf_rdma_fill_buffers(rtransport, req, length);
		rc = nvmf_rdma_fill_wr_sgl(rgroup, device, rdma_req, wr, length);
	}
	if (rc != 0) {
		goto err_exit;
@@ -1793,6 +1810,7 @@ nvmf_rdma_request_fill_iovs_multi_sgl(struct spdk_nvmf_rdma_transport *rtranspor

	req->iovcnt = 0;
	req->length = 0;
	rdma_req->iovpos = 0;
	desc = (struct spdk_nvme_sgl_descriptor *)rdma_req->recv->buf + inline_segment->address;
	for (i = 0; i < num_sgl_descriptors; i++) {
		/* The descriptors must be keyed data block descriptors with an address, not an offset. */
@@ -1810,8 +1828,8 @@ nvmf_rdma_request_fill_iovs_multi_sgl(struct spdk_nvmf_rdma_transport *rtranspor

		current_wr->num_sge = 0;

		rc = nvmf_rdma_fill_buffers(rtransport, rgroup, device, req, current_wr,
					    desc->keyed.length);
		nvmf_rdma_fill_buffers(rtransport, req, desc->keyed.length);
		rc = nvmf_rdma_fill_wr_sgl(rgroup, device, rdma_req, current_wr, desc->keyed.length);
		if (rc != 0) {
			rc = -ENOMEM;
			goto err_exit;