Commit 51bde662 authored by Alexey Marchuk's avatar Alexey Marchuk Committed by Konrad Sztyber
Browse files

nvme/rdma: Factor our contig request preparation



Move the NVMF configuration to dedicated functions, they
are to be used in next patches.
Move rdma_req and cid initialization out of nvme_rdma_req_init,
that is needed in next patches to support accel sequence

Signed-off-by: default avatarAlexey Marchuk <alexeymar@nvidia.com>
Change-Id: I9aca26d96c92d44b1b3f6542c3cf00fe9af9cc4b
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/24694


Reviewed-by: default avatarShuhei Matsumoto <smatsumoto@nvidia.com>
Community-CI: Mellanox Build Bot
Community-CI: Community CI Samsung <spdk.community.ci.samsung@gmail.com>
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarBen Walker <ben@nvidia.com>
parent 07416b7e
Loading
Loading
Loading
Loading
+52 −38
Original line number Diff line number Diff line
@@ -1433,6 +1433,35 @@ nvme_rdma_build_null_request(struct spdk_nvme_rdma_req *rdma_req)
	return 0;
}

static inline void
nvme_rdma_configure_contig_inline_request(struct spdk_nvme_rdma_req *rdma_req,
		struct nvme_request *req, struct nvme_rdma_memory_translation_ctx *ctx)
{
	rdma_req->send_sgl[1].lkey = ctx->lkey;

	/* The first element of this SGL is pointing at an
	 * spdk_nvmf_cmd object. For this particular command,
	 * we only need the first 64 bytes corresponding to
	 * the NVMe command. */
	rdma_req->send_sgl[0].length = sizeof(struct spdk_nvme_cmd);

	rdma_req->send_sgl[1].addr = (uint64_t)ctx->addr;
	rdma_req->send_sgl[1].length = (uint32_t)ctx->length;

	/* The RDMA SGL contains two elements. The first describes
	 * the NVMe command and the second describes the data
	 * payload. */
	rdma_req->send_wr.num_sge = 2;

	req->cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG;
	req->cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
	req->cmd.dptr.sgl1.unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
	req->cmd.dptr.sgl1.unkeyed.length = (uint32_t)ctx->length;
	/* Inline only supported for icdoff == 0 currently.  This function will
	 * not get called for controllers with other values. */
	req->cmd.dptr.sgl1.address = (uint64_t)0;
}

/*
 * Build inline SGL describing contiguous payload buffer.
 */
@@ -1455,7 +1484,16 @@ nvme_rdma_build_contig_inline_request(struct nvme_rdma_qpair *rqpair,
		return -1;
	}

	rdma_req->send_sgl[1].lkey = ctx.lkey;
	nvme_rdma_configure_contig_inline_request(rdma_req, req, &ctx);

	return 0;
}

static inline void
nvme_rdma_configure_contig_request(struct spdk_nvme_rdma_req *rdma_req, struct nvme_request *req,
				   struct nvme_rdma_memory_translation_ctx *ctx)
{
	req->cmd.dptr.sgl1.keyed.key = ctx->rkey;

	/* The first element of this SGL is pointing at an
	 * spdk_nvmf_cmd object. For this particular command,
@@ -1463,23 +1501,14 @@ nvme_rdma_build_contig_inline_request(struct nvme_rdma_qpair *rqpair,
	 * the NVMe command. */
	rdma_req->send_sgl[0].length = sizeof(struct spdk_nvme_cmd);

	rdma_req->send_sgl[1].addr = (uint64_t)ctx.addr;
	rdma_req->send_sgl[1].length = (uint32_t)ctx.length;

	/* The RDMA SGL contains two elements. The first describes
	 * the NVMe command and the second describes the data
	 * payload. */
	rdma_req->send_wr.num_sge = 2;
	/* The RDMA SGL needs one element describing the NVMe command. */
	rdma_req->send_wr.num_sge = 1;

	req->cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG;
	req->cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
	req->cmd.dptr.sgl1.unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
	req->cmd.dptr.sgl1.unkeyed.length = (uint32_t)ctx.length;
	/* Inline only supported for icdoff == 0 currently.  This function will
	 * not get called for controllers with other values. */
	req->cmd.dptr.sgl1.address = (uint64_t)0;

	return 0;
	req->cmd.dptr.sgl1.keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
	req->cmd.dptr.sgl1.keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
	req->cmd.dptr.sgl1.keyed.length = (uint32_t)ctx->length;
	req->cmd.dptr.sgl1.address = (uint64_t)ctx->addr;
}

/*
@@ -1510,22 +1539,7 @@ nvme_rdma_build_contig_request(struct nvme_rdma_qpair *rqpair,
		return -1;
	}

	req->cmd.dptr.sgl1.keyed.key = ctx.rkey;

	/* The first element of this SGL is pointing at an
	 * spdk_nvmf_cmd object. For this particular command,
	 * we only need the first 64 bytes corresponding to
	 * the NVMe command. */
	rdma_req->send_sgl[0].length = sizeof(struct spdk_nvme_cmd);

	/* The RDMA SGL needs one element describing the NVMe command. */
	rdma_req->send_wr.num_sge = 1;

	req->cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG;
	req->cmd.dptr.sgl1.keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
	req->cmd.dptr.sgl1.keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
	req->cmd.dptr.sgl1.keyed.length = (uint32_t)ctx.length;
	req->cmd.dptr.sgl1.address = (uint64_t)ctx.addr;
	nvme_rdma_configure_contig_request(rdma_req, req, &ctx);

	return 0;
}
@@ -1696,17 +1710,14 @@ nvme_rdma_build_sgl_inline_request(struct nvme_rdma_qpair *rqpair,
}

static int
nvme_rdma_req_init(struct nvme_rdma_qpair *rqpair, struct nvme_request *req,
		   struct spdk_nvme_rdma_req *rdma_req)
nvme_rdma_req_init(struct nvme_rdma_qpair *rqpair, struct spdk_nvme_rdma_req *rdma_req)
{
	struct nvme_request *req = rdma_req->req;
	struct spdk_nvme_ctrlr *ctrlr = rqpair->qpair.ctrlr;
	enum nvme_payload_type payload_type;
	bool icd_supported;
	int rc;

	assert(rdma_req->req == NULL);
	rdma_req->req = req;
	req->cmd.cid = rdma_req->id;
	payload_type = nvme_payload_type(&req->payload);
	/*
	 * Check if icdoff is non zero, to avoid interop conflicts with
@@ -2258,7 +2269,10 @@ nvme_rdma_qpair_submit_request(struct spdk_nvme_qpair *qpair,
		return -EAGAIN;
	}

	if (nvme_rdma_req_init(rqpair, req, rdma_req)) {
	assert(rdma_req->req == NULL);
	rdma_req->req = req;
	req->cmd.cid = rdma_req->id;
	if (nvme_rdma_req_init(rqpair, rdma_req)) {
		SPDK_ERRLOG("nvme_rdma_req_init() failed\n");
		nvme_rdma_req_put(rqpair, rdma_req);
		return -1;
+7 −9
Original line number Diff line number Diff line
@@ -831,7 +831,9 @@ test_nvme_rdma_req_init(void)
	req.payload_size = 0;
	rqpair.qpair.ctrlr->ioccsz_bytes = 1024;
	rqpair.qpair.ctrlr->icdoff = 0;
	rc = nvme_rdma_req_init(&rqpair, &req, &rdma_req);
	rdma_req.req = &req;
	req.cmd.cid = rdma_req.id;
	rc = nvme_rdma_req_init(&rqpair, &rdma_req);
	CU_ASSERT(rc == 0);
	CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG);
	CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
@@ -844,12 +846,11 @@ test_nvme_rdma_req_init(void)

	/* case 2: payload_type == NVME_PAYLOAD_TYPE_CONTIG, expect: pass. */
	/* icd_supported is true */
	rdma_req.req = NULL;
	rqpair.qpair.ctrlr->icdoff = 0;
	req.payload_offset = 0;
	req.payload_size = 1024;
	req.payload = NVME_PAYLOAD_CONTIG((void *)0xdeadbeef, NULL);
	rc = nvme_rdma_req_init(&rqpair, &req, &rdma_req);
	rc = nvme_rdma_req_init(&rqpair, &rdma_req);
	CU_ASSERT(rc == 0);
	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
@@ -861,12 +862,11 @@ test_nvme_rdma_req_init(void)
	CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY);

	/* icd_supported is false */
	rdma_req.req = NULL;
	rqpair.qpair.ctrlr->icdoff = 1;
	req.payload_offset = 0;
	req.payload_size = 1024;
	req.payload = NVME_PAYLOAD_CONTIG((void *)0xdeadbeef, NULL);
	rc = nvme_rdma_req_init(&rqpair, &req, &rdma_req);
	rc = nvme_rdma_req_init(&rqpair, &rdma_req);
	CU_ASSERT(rc == 0);
	CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
	CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
@@ -877,7 +877,6 @@ test_nvme_rdma_req_init(void)

	/* case 3: payload_type == NVME_PAYLOAD_TYPE_SGL, expect: pass. */
	/* icd_supported is true */
	rdma_req.req = NULL;
	rqpair.qpair.ctrlr->icdoff = 0;
	req.payload = NVME_PAYLOAD_SGL(nvme_rdma_ut_reset_sgl, nvme_rdma_ut_next_sge, &bio, NULL);
	req.qpair = &rqpair.qpair;
@@ -886,7 +885,7 @@ test_nvme_rdma_req_init(void)
	req.payload_size = 1024;
	bio.iovs[0].iov_base = (void *)0xdeadbeef;
	bio.iovs[0].iov_len = 1024;
	rc = nvme_rdma_req_init(&rqpair, &req, &rdma_req);
	rc = nvme_rdma_req_init(&rqpair, &rdma_req);
	CU_ASSERT(rc == 0);
	CU_ASSERT(bio.iovpos == 1);
	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
@@ -899,7 +898,6 @@ test_nvme_rdma_req_init(void)
	CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY);

	/* icd_supported is false */
	rdma_req.req = NULL;
	rqpair.qpair.ctrlr->icdoff = 1;
	req.payload = NVME_PAYLOAD_SGL(nvme_rdma_ut_reset_sgl, nvme_rdma_ut_next_sge, &bio, NULL);
	req.qpair = &rqpair.qpair;
@@ -908,7 +906,7 @@ test_nvme_rdma_req_init(void)
	req.payload_size = 1024;
	bio.iovs[0].iov_base = (void *)0xdeadbeef;
	bio.iovs[0].iov_len = 1024;
	rc = nvme_rdma_req_init(&rqpair, &req, &rdma_req);
	rc = nvme_rdma_req_init(&rqpair, &rdma_req);
	CU_ASSERT(rc == 0);
	CU_ASSERT(bio.iovpos == 1);
	CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);