Commit 89a28bfd authored by Shuhei Matsumoto's avatar Shuhei Matsumoto Committed by Jim Harris
Browse files

nvmf/rdma: Factor out WR SGE setup in fill_buffers() into fill_wr_sge()



Factor out setup WR operation from nvmf_rdma_fill_buffers() into a
function nvmf_rdma_fill_wr_sge().

Signed-off-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: I813f156b83b6e1773ea76d0d1ed8684b1e267691
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/468945


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarAlexey Marchuk <alexeymar@mellanox.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
Reviewed-by: default avatarSeth Howell <seth.howell@intel.com>
parent 7c7a0c0a
Loading
Loading
Loading
Loading
+30 −18
Original line number Diff line number Diff line
@@ -1618,23 +1618,12 @@ nvmf_rdma_fill_buffers_with_md_interleave(struct spdk_nvmf_rdma_transport *rtran
	return 0;
}

static int
nvmf_rdma_fill_buffers(struct spdk_nvmf_rdma_transport *rtransport,
		       struct spdk_nvmf_rdma_poll_group *rgroup,
		       struct spdk_nvmf_rdma_device *device,
		       struct spdk_nvmf_request *req,
		       struct ibv_send_wr *wr,
		       uint32_t length)
static bool
nvmf_rdma_fill_wr_sge(struct spdk_nvmf_rdma_device *device,
		      struct spdk_nvmf_request *req, struct ibv_send_wr *wr)
{
	uint64_t	translation_len;

	wr->num_sge = 0;
	while (length) {
		req->iov[req->iovcnt].iov_base = (void *)((uintptr_t)(req->buffers[req->iovcnt] +
						 NVMF_DATA_BUFFER_MASK) &
						 ~NVMF_DATA_BUFFER_MASK);
		req->iov[req->iovcnt].iov_len  = spdk_min(length,
						 rtransport->transport.opts.io_unit_size);
	translation_len = req->iov[req->iovcnt].iov_len;

	if (!g_nvmf_hooks.get_rkey) {
@@ -1645,9 +1634,35 @@ nvmf_rdma_fill_buffers(struct spdk_nvmf_rdma_transport *rtransport,
						(uint64_t)req->iov[req->iovcnt].iov_base, &translation_len);
	}

		/* This is a very rare case that can occur when using DPDK version < 19.05 */
	if (spdk_unlikely(translation_len < req->iov[req->iovcnt].iov_len)) {
		/* This is a very rare case that can occur when using DPDK version < 19.05 */
		SPDK_ERRLOG("Data buffer split over multiple RDMA Memory Regions. Removing it from circulation.\n");
		return false;
	}

	wr->sg_list[wr->num_sge].addr = (uintptr_t)(req->iov[req->iovcnt].iov_base);
	wr->sg_list[wr->num_sge].length = req->iov[req->iovcnt].iov_len;
	wr->num_sge++;

	return true;
}

static int
nvmf_rdma_fill_buffers(struct spdk_nvmf_rdma_transport *rtransport,
		       struct spdk_nvmf_rdma_poll_group *rgroup,
		       struct spdk_nvmf_rdma_device *device,
		       struct spdk_nvmf_request *req,
		       struct ibv_send_wr *wr,
		       uint32_t length)
{
	wr->num_sge = 0;
	while (length) {
		req->iov[req->iovcnt].iov_base = (void *)((uintptr_t)(req->buffers[req->iovcnt] +
						 NVMF_DATA_BUFFER_MASK) &
						 ~NVMF_DATA_BUFFER_MASK);
		req->iov[req->iovcnt].iov_len  = spdk_min(length,
						 rtransport->transport.opts.io_unit_size);
		if (spdk_unlikely(!nvmf_rdma_fill_wr_sge(device, req, wr))) {
			if (nvmf_rdma_replace_buffer(rgroup, &req->buffers[req->iovcnt]) == -ENOMEM) {
				return -ENOMEM;
			}
@@ -1655,10 +1670,7 @@ nvmf_rdma_fill_buffers(struct spdk_nvmf_rdma_transport *rtransport,
		}

		length -= req->iov[req->iovcnt].iov_len;
		wr->sg_list[wr->num_sge].addr = (uintptr_t)(req->iov[req->iovcnt].iov_base);
		wr->sg_list[wr->num_sge].length = req->iov[req->iovcnt].iov_len;
		req->iovcnt++;
		wr->num_sge++;
	}

	return 0;