Commit 170e3745 authored by Aleksey Marchuk's avatar Aleksey Marchuk Committed by Tomasz Zawadzki
Browse files

nvmf/rdma: Split buffer alloc and WR setup code, part 1



As part of preparation to use iobuf based requests queueing,
extract code which can be used to configure a request with
allocated buffer.
This commit handles KEYED_DATA_BLOCK requests

Change-Id: Id9ab9e5c658c792faca3163efe2982979318bfc1
Signed-off-by: default avatarAleksey Marchuk <alexeymar@nvidia.com>
Reviewed-on: https://review.spdk.io/c/spdk/spdk/+/26653


Community-CI: Mellanox Build Bot
Reviewed-by: default avatarJim Harris <jim.harris@nvidia.com>
Tested-by: default avatarSPDK Automated Test System <spdkbot@gmail.com>
Reviewed-by: default avatarKrzysztof Goreczny <krzysztof.goreczny@dell.com>
Reviewed-by: default avatarKonrad Sztyber <ksztyber@nvidia.com>
parent 10fbe570
Loading
Loading
Loading
Loading
+22 −26
Original line number Diff line number Diff line
@@ -1657,35 +1657,19 @@ nvmf_rdma_calc_num_wrs(uint32_t length, uint32_t io_unit_size, uint32_t block_si
static int
nvmf_rdma_request_fill_iovs(struct spdk_nvmf_rdma_transport *rtransport,
			    struct spdk_nvmf_rdma_device *device,
			    struct spdk_nvmf_rdma_request *rdma_req)
			    struct spdk_nvmf_rdma_request *rdma_req,
			    uint32_t length)
{
	struct spdk_nvmf_rdma_qpair		*rqpair;
	struct spdk_nvmf_rdma_poll_group	*rgroup;
	struct spdk_nvmf_request		*req = &rdma_req->req;
	struct ibv_send_wr			*wr = &rdma_req->data.wr;
	int					rc;
	int					rc = 0;
	uint32_t				num_wrs = 1;
	uint32_t				length;

	rqpair = SPDK_CONTAINEROF(req->qpair, struct spdk_nvmf_rdma_qpair, qpair);
	rgroup = rqpair->poller->group;

	/* rdma wr specifics */
	nvmf_rdma_setup_request(rdma_req);

	length = req->length;
	if (spdk_unlikely(req->dif_enabled)) {
		req->dif.orig_length = length;
		length = spdk_dif_get_length_with_md(length, &req->dif.dif_ctx);
		req->dif.elba_length = length;
	}

	rc = spdk_nvmf_request_get_buffers(req, &rgroup->group, &rtransport->transport,
					   length);
	if (spdk_unlikely(rc != 0)) {
		return rc;
	}

	assert(req->iovcnt <= rqpair->max_send_sge);

	/* When dif_insert_or_strip is true and the I/O data length is greater than one block,
@@ -1909,11 +1893,13 @@ nvmf_rdma_request_parse_sgl(struct spdk_nvmf_rdma_transport *rtransport,
			    struct spdk_nvmf_rdma_request *rdma_req)
{
	struct spdk_nvmf_request		*req = &rdma_req->req;
	struct spdk_nvmf_rdma_qpair		*rqpair;
	struct spdk_nvme_cpl			*rsp;
	struct spdk_nvme_sgl_descriptor		*sgl;
	int					rc;
	uint32_t				length;

	rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
	rsp = &req->rsp->nvme_cpl;
	sgl = &req->cmd->nvme_cmd.dptr.sgl1;

@@ -1939,18 +1925,28 @@ nvmf_rdma_request_parse_sgl(struct spdk_nvmf_rdma_transport *rtransport,

		/* fill request length and populate iovs */
		req->length = length;
		/* rdma wr specifics */
		nvmf_rdma_setup_request(rdma_req);
		if (spdk_unlikely(req->dif_enabled)) {
			req->dif.orig_length = length;
			length = spdk_dif_get_length_with_md(length, &req->dif.dif_ctx);
			req->dif.elba_length = length;
		}

		rc = nvmf_rdma_request_fill_iovs(rtransport, device, rdma_req);
		rc = spdk_nvmf_request_get_buffers(req, &rqpair->poller->group->group, &rtransport->transport,
						   length);
		if (spdk_unlikely(rc != 0)) {
			/* No available buffers. Queue this request up. */
			SPDK_DEBUGLOG(rdma, "No available large data buffers. Queueing request %p\n", rdma_req);
			return 0;
		}

		rc = nvmf_rdma_request_fill_iovs(rtransport, device, rdma_req, length);
		if (spdk_unlikely(rc < 0)) {
			if (rc == -EINVAL) {
			SPDK_ERRLOG("SGL length exceeds the max I/O size\n");
			rsp->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
			return -1;
		}
			/* No available buffers. Queue this request up. */
			SPDK_DEBUGLOG(rdma, "No available large data buffers. Queueing request %p\n", rdma_req);
			return 0;
		}

		SPDK_DEBUGLOG(rdma, "Request %p took %d buffer/s from central pool\n", rdma_req,
			      req->iovcnt);