Commit 4cef00cb authored by Shuhei Matsumoto's avatar Shuhei Matsumoto Committed by Tomasz Zawadzki
Browse files

nvme_rdma: Merge alloc_ and register_reqs/rsps into create_reqs/rsps functions



In the following patches, poll group will have rsps objects and to share
the code between poll group and qpair, option for creation will be used.

As a preparation, merge nvme_rdma_alloc_rsps() and
nvme_rdma_register_rsps() into nvme_rdma_create_rsps(). For consistency,
merge nvme_rdma_alloc_reqs() and nvme_rdma_register_reqs() into
nvme_rdma_create_reqs().

Update unit tests accordingly.

Signed-off-by: default avatarShuhei Matsumoto <smatsumoto@nvidia.com>
Signed-off-by: default avatarDenis Nagorny <denisn@nvidia.com>
Signed-off-by: default avatarEvgeniy Kochetov <evgeniik@nvidia.com>
Change-Id: I92ec9e642043da601b38b890089eaa96c3ad870a
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/14170


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Mellanox Build Bot
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-by: default avatarAleksey Marchuk <alexeymar@nvidia.com>
parent 8e48517f
Loading
Loading
Loading
Loading
+24 −57
Original line number Diff line number Diff line
@@ -830,8 +830,12 @@ nvme_rdma_free_rsps(struct nvme_rdma_qpair *rqpair)
}

static int
nvme_rdma_alloc_rsps(struct nvme_rdma_qpair *rqpair)
nvme_rdma_create_rsps(struct nvme_rdma_qpair *rqpair)
{
	struct spdk_rdma_memory_translation translation;
	uint16_t i;
	int rc;

	rqpair->rsps = NULL;
	rqpair->rsp_recv_wrs = NULL;

@@ -856,19 +860,6 @@ nvme_rdma_alloc_rsps(struct nvme_rdma_qpair *rqpair)
		goto fail;
	}

	return 0;
fail:
	nvme_rdma_free_rsps(rqpair);
	return -ENOMEM;
}

static int
nvme_rdma_register_rsps(struct nvme_rdma_qpair *rqpair)
{
	struct spdk_rdma_memory_translation translation;
	uint16_t i;
	int rc;

	for (i = 0; i < rqpair->num_entries; i++) {
		struct ibv_sge *rsp_sgl = &rqpair->rsp_sgls[i];
		struct spdk_nvme_rdma_rsp *rsp = &rqpair->rsps[i];
@@ -881,7 +872,7 @@ nvme_rdma_register_rsps(struct nvme_rdma_qpair *rqpair)
		rsp_sgl->length = sizeof(struct spdk_nvme_cpl);
		rc = spdk_rdma_get_translation(rqpair->mr_map, rsp, sizeof(*rsp), &translation);
		if (rc) {
			return rc;
			goto fail;
		}
		rsp_sgl->lkey = spdk_rdma_memory_translation_get_lkey(&translation);

@@ -898,6 +889,9 @@ nvme_rdma_register_rsps(struct nvme_rdma_qpair *rqpair)
	rqpair->current_num_recvs = rqpair->num_entries;

	return 0;
fail:
	nvme_rdma_free_rsps(rqpair);
	return -ENOMEM;
}

static void
@@ -915,9 +909,11 @@ nvme_rdma_free_reqs(struct nvme_rdma_qpair *rqpair)
}

static int
nvme_rdma_alloc_reqs(struct nvme_rdma_qpair *rqpair)
nvme_rdma_create_reqs(struct nvme_rdma_qpair *rqpair)
{
	struct spdk_rdma_memory_translation translation;
	uint16_t i;
	int rc;

	rqpair->rdma_reqs = spdk_zmalloc(rqpair->num_entries * sizeof(struct spdk_nvme_rdma_req), 0, NULL,
					 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
@@ -946,6 +942,12 @@ nvme_rdma_alloc_reqs(struct nvme_rdma_qpair *rqpair)

		rdma_req->id = i;

		rc = spdk_rdma_get_translation(rqpair->mr_map, cmd, sizeof(*cmd), &translation);
		if (rc) {
			goto fail;
		}
		rdma_req->send_sgl[0].lkey = spdk_rdma_memory_translation_get_lkey(&translation);

		/* The first RDMA sgl element will always point
		 * at this data structure. Depending on whether
		 * an NVMe-oF SGL is required, the length of
@@ -967,25 +969,6 @@ fail:
	return -ENOMEM;
}

static int
nvme_rdma_register_reqs(struct nvme_rdma_qpair *rqpair)
{
	struct spdk_rdma_memory_translation translation;
	uint16_t i;
	int rc;

	for (i = 0; i < rqpair->num_entries; i++) {
		rc = spdk_rdma_get_translation(rqpair->mr_map, &rqpair->cmds[i], sizeof(*rqpair->cmds),
					       &translation);
		if (rc) {
			return rc;
		}
		rqpair->rdma_reqs[i].send_sgl[0].lkey = spdk_rdma_memory_translation_get_lkey(&translation);
	}

	return 0;
}

static int nvme_rdma_connect(struct nvme_rdma_qpair *rqpair);

static int
@@ -1088,37 +1071,21 @@ nvme_rdma_connect_established(struct nvme_rdma_qpair *rqpair, int ret)
		return -1;
	}

	ret = nvme_rdma_alloc_reqs(rqpair);
	ret = nvme_rdma_create_reqs(rqpair);
	SPDK_DEBUGLOG(nvme, "rc =%d\n", ret);
	if (ret) {
		SPDK_ERRLOG("Unable to allocate rqpair RDMA requests\n");
		return -1;
	}
	SPDK_DEBUGLOG(nvme, "RDMA requests allocated\n");

	ret = nvme_rdma_register_reqs(rqpair);
	SPDK_DEBUGLOG(nvme, "rc =%d\n", ret);
	if (ret) {
		SPDK_ERRLOG("Unable to register rqpair RDMA requests\n");
		return -1;
	}
	SPDK_DEBUGLOG(nvme, "RDMA requests registered\n");

	ret = nvme_rdma_alloc_rsps(rqpair);
	SPDK_DEBUGLOG(nvme, "rc =%d\n", ret);
	if (ret < 0) {
		SPDK_ERRLOG("Unable to allocate rqpair RDMA responses\n");
		SPDK_ERRLOG("Unable to create rqpair RDMA requests\n");
		return -1;
	}
	SPDK_DEBUGLOG(nvme, "RDMA responses allocated\n");
	SPDK_DEBUGLOG(nvme, "RDMA requests created\n");

	ret = nvme_rdma_register_rsps(rqpair);
	ret = nvme_rdma_create_rsps(rqpair);
	SPDK_DEBUGLOG(nvme, "rc =%d\n", ret);
	if (ret < 0) {
		SPDK_ERRLOG("Unable to register rqpair RDMA responses\n");
		SPDK_ERRLOG("Unable to create rqpair RDMA responses\n");
		return -1;
	}
	SPDK_DEBUGLOG(nvme, "RDMA responses registered\n");
	SPDK_DEBUGLOG(nvme, "RDMA responses created\n");

	ret = nvme_rdma_qpair_submit_recvs(rqpair);
	SPDK_DEBUGLOG(nvme, "rc =%d\n", ret);
+16 −44
Original line number Diff line number Diff line
@@ -449,7 +449,7 @@ test_nvme_rdma_build_contig_inline_request(void)
}

static void
test_nvme_rdma_alloc_reqs(void)
test_nvme_rdma_create_reqs(void)
{
	struct nvme_rdma_qpair rqpair = {};
	int rc;
@@ -459,7 +459,7 @@ test_nvme_rdma_alloc_reqs(void)
	/* Test case 1: zero entry. Expect: FAIL */
	rqpair.num_entries = 0;

	rc = nvme_rdma_alloc_reqs(&rqpair);
	rc = nvme_rdma_create_reqs(&rqpair);
	CU_ASSERT(rqpair.rdma_reqs == NULL);
	SPDK_CU_ASSERT_FATAL(rc == -ENOMEM);

@@ -467,8 +467,9 @@ test_nvme_rdma_alloc_reqs(void)
	memset(&rqpair, 0, sizeof(rqpair));
	rqpair.num_entries = 1;

	rc = nvme_rdma_alloc_reqs(&rqpair);
	rc = nvme_rdma_create_reqs(&rqpair);
	CU_ASSERT(rc == 0);
	CU_ASSERT(rqpair.rdma_reqs[0].send_sgl[0].lkey == g_rdma_mr.lkey);
	CU_ASSERT(rqpair.rdma_reqs[0].send_sgl[0].addr
		  == (uint64_t)&rqpair.cmds[0]);
	CU_ASSERT(rqpair.rdma_reqs[0].send_wr.wr_id
@@ -486,9 +487,10 @@ test_nvme_rdma_alloc_reqs(void)
	memset(&rqpair, 0, sizeof(rqpair));
	rqpair.num_entries = 5;

	rc = nvme_rdma_alloc_reqs(&rqpair);
	rc = nvme_rdma_create_reqs(&rqpair);
	CU_ASSERT(rc == 0);
	for (int i = 0; i < 5; i++) {
		CU_ASSERT(rqpair.rdma_reqs[i].send_sgl[0].lkey == g_rdma_mr.lkey);
		CU_ASSERT(rqpair.rdma_reqs[i].send_sgl[0].addr
			  == (uint64_t)&rqpair.cmds[i]);
		CU_ASSERT(rqpair.rdma_reqs[i].send_wr.wr_id
@@ -506,7 +508,7 @@ test_nvme_rdma_alloc_reqs(void)
}

static void
test_nvme_rdma_alloc_rsps(void)
test_nvme_rdma_create_rsps(void)
{
	struct nvme_rdma_qpair rqpair = {};
	int rc;
@@ -515,7 +517,7 @@ test_nvme_rdma_alloc_rsps(void)

	/* Test case 1 calloc false */
	rqpair.num_entries = 0;
	rc = nvme_rdma_alloc_rsps(&rqpair);
	rc = nvme_rdma_create_rsps(&rqpair);
	CU_ASSERT(rqpair.rsp_sgls == NULL);
	SPDK_CU_ASSERT_FATAL(rc == -ENOMEM);

@@ -523,11 +525,15 @@ test_nvme_rdma_alloc_rsps(void)
	memset(&rqpair, 0, sizeof(rqpair));
	rqpair.num_entries = 1;

	rc = nvme_rdma_alloc_rsps(&rqpair);
	rc = nvme_rdma_create_rsps(&rqpair);
	CU_ASSERT(rc == 0);
	CU_ASSERT(rqpair.rsp_sgls != NULL);
	CU_ASSERT(rqpair.rsp_recv_wrs != NULL);
	CU_ASSERT(rqpair.rsps != NULL);
	CU_ASSERT(rqpair.rsp_sgls[0].lkey == g_rdma_mr.lkey);
	CU_ASSERT(rqpair.rsp_sgls[0].addr == (uint64_t)&rqpair.rsps[0]);
	CU_ASSERT(rqpair.rsp_recv_wrs[0].wr_id == (uint64_t)&rqpair.rsps[0].rdma_wr);

	nvme_rdma_free_rsps(&rqpair);
}

@@ -956,39 +962,6 @@ test_nvme_rdma_validate_cm_event(void)
	CU_ASSERT(rc == 0);
}

static void
test_nvme_rdma_register_and_unregister_reqs(void)
{
	struct nvme_rdma_qpair rqpair = {};
	struct spdk_nvmf_cmd cmds = {};
	struct ibv_qp qp = {};
	struct spdk_rdma_qp rdma_qp = {};
	struct rdma_cm_id cm_id = {};
	struct spdk_nvme_rdma_req rdma_reqs[50] = {};
	int rc;

	rqpair.cm_id = &cm_id;
	rqpair.cmds = &cmds;
	rqpair.rdma_qp = &rdma_qp;
	rdma_qp.qp = &qp;
	g_nvme_hooks.get_rkey = NULL;
	rqpair.rdma_reqs = rdma_reqs;

	/* case 1: nvme_rdma_register_req: single entry, expect: PASS */
	rqpair.num_entries = 1;
	rc = nvme_rdma_register_reqs(&rqpair);
	CU_ASSERT(rc == 0);
	CU_ASSERT(rqpair.rdma_reqs[0].send_sgl[0].lkey == g_rdma_mr.lkey);

	/* case 2: nvme_rdma_register_req: multiple entry, expect: PASS */
	rqpair.num_entries = 50;
	rc = nvme_rdma_register_reqs(&rqpair);
	CU_ASSERT(rc == 0);
	for (int i = 0; i < rqpair.num_entries; i++) {
		CU_ASSERT(rqpair.rdma_reqs[0].send_sgl[0].lkey == g_rdma_mr.lkey);
	}
}

static void
test_nvme_rdma_parse_addr(void)
{
@@ -1058,7 +1031,7 @@ test_nvme_rdma_qpair_submit_request(void)
	rqpair.qpair.trtype = SPDK_NVME_TRANSPORT_RDMA;
	rqpair.poller = &poller;

	rc = nvme_rdma_alloc_reqs(&rqpair);
	rc = nvme_rdma_create_reqs(&rqpair);
	CU_ASSERT(rc == 0);
	/* Give send_wr.next a non null value */
	rdma_req = TAILQ_FIRST(&rqpair.free_reqs);
@@ -1496,8 +1469,8 @@ main(int argc, char **argv)
	CU_ADD_TEST(suite, test_nvme_rdma_build_sgl_inline_request);
	CU_ADD_TEST(suite, test_nvme_rdma_build_contig_request);
	CU_ADD_TEST(suite, test_nvme_rdma_build_contig_inline_request);
	CU_ADD_TEST(suite, test_nvme_rdma_alloc_reqs);
	CU_ADD_TEST(suite, test_nvme_rdma_alloc_rsps);
	CU_ADD_TEST(suite, test_nvme_rdma_create_reqs);
	CU_ADD_TEST(suite, test_nvme_rdma_create_rsps);
	CU_ADD_TEST(suite, test_nvme_rdma_ctrlr_create_qpair);
	CU_ADD_TEST(suite, test_nvme_rdma_poller_create);
	CU_ADD_TEST(suite, test_nvme_rdma_qpair_process_cm_event);
@@ -1505,7 +1478,6 @@ main(int argc, char **argv)
	CU_ADD_TEST(suite, test_nvme_rdma_req_put_and_get);
	CU_ADD_TEST(suite, test_nvme_rdma_req_init);
	CU_ADD_TEST(suite, test_nvme_rdma_validate_cm_event);
	CU_ADD_TEST(suite, test_nvme_rdma_register_and_unregister_reqs);
	CU_ADD_TEST(suite, test_nvme_rdma_parse_addr);
	CU_ADD_TEST(suite, test_nvme_rdma_qpair_init);
	CU_ADD_TEST(suite, test_nvme_rdma_qpair_submit_request);