Commit 20370a5d authored by Ben Walker's avatar Ben Walker Committed by Konrad Sztyber
Browse files

nvme: In unit tests, initialize payload using the macros



This is the correct way to do it.

Change-Id: I73d3b313b2d4709f32c8d3974626df597af69fb2
Signed-off-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/17897


Community-CI: Mellanox Build Bot
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
Reviewed-by: default avatarKonrad Sztyber <konrad.sztyber@intel.com>
parent e0b9348b
Loading
Loading
Loading
Loading
+8 −15
Original line number Diff line number Diff line
@@ -502,7 +502,7 @@ test_nvme_pcie_qpair_build_metadata(void)
	tr.req = &req;
	qpair->ctrlr = &ctrlr;

	req.payload.md = (void *)0xDEADBEE0;
	req.payload = NVME_PAYLOAD_CONTIG(NULL, (void *)0xDEADBEE0);
	req.md_offset = 0;
	req.md_size = 4096;
	/* The nvme_pcie_qpair_build_metadata() function expects the cmd.psdt
@@ -611,10 +611,7 @@ test_nvme_pcie_qpair_build_prps_sgl_request(void)

	tr.req = &req;
	qpair.ctrlr = &ctrlr;
	req.payload.contig_or_cb_arg = &bio;

	req.payload.reset_sgl_fn = nvme_pcie_ut_reset_sgl;
	req.payload.next_sge_fn = nvme_pcie_ut_next_sge;
	req.payload = NVME_PAYLOAD_SGL(nvme_pcie_ut_reset_sgl, nvme_pcie_ut_next_sge, &bio, NULL);
	req.payload_size = 4096;
	ctrlr.page_size = 4096;
	bio.iovs[0].iov_base = (void *)0x100000;
@@ -637,9 +634,7 @@ test_nvme_pcie_qpair_build_hw_sgl_request(void)

	ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
	qpair.ctrlr = &ctrlr;
	req.payload.contig_or_cb_arg = &bio;
	req.payload.reset_sgl_fn = nvme_pcie_ut_reset_sgl;
	req.payload.next_sge_fn = nvme_pcie_ut_next_sge;
	req.payload = NVME_PAYLOAD_SGL(nvme_pcie_ut_reset_sgl, nvme_pcie_ut_next_sge, &bio, NULL);
	req.cmd.opc = SPDK_NVME_OPC_WRITE;
	tr.prp_sgl_bus_addr =  0xDAADBEE0;
	g_vtophys_size = 4096;
@@ -677,9 +672,7 @@ test_nvme_pcie_qpair_build_hw_sgl_request(void)
	memset(&tr, 0, sizeof(tr));
	memset(&bio, 0, sizeof(bio));
	memset(&req, 0, sizeof(req));
	req.payload.contig_or_cb_arg = &bio;
	req.payload.reset_sgl_fn = nvme_pcie_ut_reset_sgl;
	req.payload.next_sge_fn = nvme_pcie_ut_next_sge;
	req.payload = NVME_PAYLOAD_SGL(nvme_pcie_ut_reset_sgl, nvme_pcie_ut_next_sge, &bio, NULL);
	req.cmd.opc = SPDK_NVME_OPC_WRITE;
	req.payload_size = 4096;
	bio.iovpos = 1;
@@ -713,8 +706,8 @@ test_nvme_pcie_qpair_build_contig_request(void)

	/* 1 prp, 4k-aligned */
	prp_list_prep(&tr, &req, NULL);
	req.payload = NVME_PAYLOAD_CONTIG((void *)0x100000, NULL);
	req.payload_size = 0x1000;
	req.payload.contig_or_cb_arg = (void *)0x100000;

	rc = nvme_pcie_qpair_build_contig_request(&pqpair.qpair, &req, &tr, true);
	CU_ASSERT(rc == 0);
@@ -722,9 +715,9 @@ test_nvme_pcie_qpair_build_contig_request(void)

	/* 2 prps, non-4K-aligned */
	prp_list_prep(&tr, &req, NULL);
	req.payload = NVME_PAYLOAD_CONTIG((void *)0x100000, NULL);
	req.payload_size = 0x1000;
	req.payload_offset = 0x800;
	req.payload.contig_or_cb_arg = (void *)0x100000;

	rc = nvme_pcie_qpair_build_contig_request(&pqpair.qpair, &req, &tr, true);
	CU_ASSERT(rc == 0);
@@ -733,8 +726,8 @@ test_nvme_pcie_qpair_build_contig_request(void)

	/* 3 prps, 4k-aligned */
	prp_list_prep(&tr, &req, NULL);
	req.payload = NVME_PAYLOAD_CONTIG((void *)0x100000, NULL);
	req.payload_size = 0x3000;
	req.payload.contig_or_cb_arg = (void *)0x100000;

	rc = nvme_pcie_qpair_build_contig_request(&pqpair.qpair, &req, &tr, true);
	CU_ASSERT(rc == 0);
@@ -745,8 +738,8 @@ test_nvme_pcie_qpair_build_contig_request(void)

	/* address not dword aligned */
	prp_list_prep(&tr, &req, NULL);
	req.payload = NVME_PAYLOAD_CONTIG((void *)0x100001, NULL);
	req.payload_size = 0x3000;
	req.payload.contig_or_cb_arg = (void *)0x100001;
	req.qpair = &pqpair.qpair;
	TAILQ_INIT(&pqpair.outstanding_tr);
	TAILQ_INSERT_TAIL(&pqpair.outstanding_tr, &tr, tq_list);
+10 −18
Original line number Diff line number Diff line
@@ -202,9 +202,7 @@ test_nvme_rdma_build_sgl_request(void)
	rdma_req.id = 0;
	rdma_req.req = &req;

	req.payload.reset_sgl_fn = nvme_rdma_ut_reset_sgl;
	req.payload.next_sge_fn = nvme_rdma_ut_next_sge;
	req.payload.contig_or_cb_arg = &bio;
	req.payload = NVME_PAYLOAD_SGL(nvme_rdma_ut_reset_sgl, nvme_rdma_ut_next_sge, &bio, NULL);
	req.qpair = &rqpair.qpair;

	for (i = 0; i < NVME_RDMA_MAX_SGL_DESCRIPTORS; i++) {
@@ -311,9 +309,7 @@ test_nvme_rdma_build_sgl_inline_request(void)
	rdma_req.id = 0;
	rdma_req.req = &req;

	req.payload.reset_sgl_fn = nvme_rdma_ut_reset_sgl;
	req.payload.next_sge_fn = nvme_rdma_ut_next_sge;
	req.payload.contig_or_cb_arg = &bio;
	req.payload = NVME_PAYLOAD_SGL(nvme_rdma_ut_reset_sgl, nvme_rdma_ut_next_sge, &bio, NULL);
	req.qpair = &rqpair.qpair;

	/* Test case 1: single inline SGL. Expected: PASS */
@@ -373,7 +369,7 @@ test_nvme_rdma_build_contig_request(void)
	rdma_req.id = 0;
	rdma_req.req = &req;

	req.payload.contig_or_cb_arg = (void *)0xdeadbeef;
	req.payload = NVME_PAYLOAD_CONTIG((void *)0xdeadbeef, NULL);
	req.qpair = &rqpair.qpair;

	/* Test case 1: contig request. Expected: PASS */
@@ -416,7 +412,7 @@ test_nvme_rdma_build_contig_inline_request(void)
	rdma_req.id = 0;
	rdma_req.req = &req;

	req.payload.contig_or_cb_arg = (void *)0xdeadbeef;
	req.payload = NVME_PAYLOAD_CONTIG((void *)0xdeadbeef, NULL);
	req.qpair = &rqpair.qpair;

	/* Test case 1: single inline SGL. Expected: PASS */
@@ -831,7 +827,7 @@ test_nvme_rdma_req_init(void)
	rdma_req.id = 0;
	req.cmd.opc = SPDK_NVME_DATA_HOST_TO_CONTROLLER;

	req.payload.contig_or_cb_arg = (void *)0xdeadbeef;
	req.payload = NVME_PAYLOAD_CONTIG((void *)0xdeadbeef, NULL);
	/* case 1: req->payload_size == 0, expect: pass. */
	req.payload_size = 0;
	rqpair.qpair.ctrlr->ioccsz_bytes = 1024;
@@ -853,7 +849,7 @@ test_nvme_rdma_req_init(void)
	rqpair.qpair.ctrlr->icdoff = 0;
	req.payload_offset = 0;
	req.payload_size = 1024;
	req.payload.reset_sgl_fn = NULL;
	req.payload = NVME_PAYLOAD_CONTIG((void *)0xdeadbeef, NULL);
	rc = nvme_rdma_req_init(&rqpair, &req, &rdma_req);
	CU_ASSERT(rc == 0);
	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
@@ -870,7 +866,7 @@ test_nvme_rdma_req_init(void)
	rqpair.qpair.ctrlr->icdoff = 1;
	req.payload_offset = 0;
	req.payload_size = 1024;
	req.payload.reset_sgl_fn = NULL;
	req.payload = NVME_PAYLOAD_CONTIG((void *)0xdeadbeef, NULL);
	rc = nvme_rdma_req_init(&rqpair, &req, &rdma_req);
	CU_ASSERT(rc == 0);
	CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
@@ -884,9 +880,7 @@ test_nvme_rdma_req_init(void)
	/* icd_supported is true */
	rdma_req.req = NULL;
	rqpair.qpair.ctrlr->icdoff = 0;
	req.payload.reset_sgl_fn = nvme_rdma_ut_reset_sgl;
	req.payload.next_sge_fn = nvme_rdma_ut_next_sge;
	req.payload.contig_or_cb_arg = &bio;
	req.payload = NVME_PAYLOAD_SGL(nvme_rdma_ut_reset_sgl, nvme_rdma_ut_next_sge, &bio, NULL);
	req.qpair = &rqpair.qpair;
	bio.iovpos = 0;
	req.payload_offset = 0;
@@ -908,9 +902,7 @@ test_nvme_rdma_req_init(void)
	/* icd_supported is false */
	rdma_req.req = NULL;
	rqpair.qpair.ctrlr->icdoff = 1;
	req.payload.reset_sgl_fn = nvme_rdma_ut_reset_sgl;
	req.payload.next_sge_fn = nvme_rdma_ut_next_sge;
	req.payload.contig_or_cb_arg = &bio;
	req.payload = NVME_PAYLOAD_SGL(nvme_rdma_ut_reset_sgl, nvme_rdma_ut_next_sge, &bio, NULL);
	req.qpair = &rqpair.qpair;
	bio.iovpos = 0;
	req.payload_offset = 0;
@@ -1023,7 +1015,7 @@ test_nvme_rdma_qpair_submit_request(void)
	struct spdk_nvme_rdma_req	*rdma_req = NULL;

	req.cmd.opc = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
	req.payload.contig_or_cb_arg = (void *)0xdeadbeef;
	req.payload = NVME_PAYLOAD_CONTIG((void *)0xdeadbeef, NULL);
	req.payload_size = 0;
	rqpair.mr_map = (struct spdk_rdma_mem_map *)0xdeadbeef;
	rqpair.rdma_qp = (struct spdk_rdma_qp *)0xdeadbeef;
+5 −9
Original line number Diff line number Diff line
@@ -229,9 +229,7 @@ test_nvme_tcp_build_sgl_request(void)
	tqpair.qpair.ctrlr = &ctrlr;
	tcp_req.req = &req;

	req.payload.reset_sgl_fn = nvme_tcp_ut_reset_sgl;
	req.payload.next_sge_fn = nvme_tcp_ut_next_sge;
	req.payload.contig_or_cb_arg = &bio;
	req.payload = NVME_PAYLOAD_SGL(nvme_tcp_ut_reset_sgl, nvme_tcp_ut_next_sge, &bio, NULL);
	req.qpair = &tqpair.qpair;

	for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
@@ -516,8 +514,7 @@ test_nvme_tcp_req_init(void)
	req.qpair = &tqpair.qpair;

	tcp_req.cid = 1;
	req.payload.next_sge_fn = nvme_tcp_ut_next_sge;
	req.payload.contig_or_cb_arg = &bio;
	req.payload = NVME_PAYLOAD_SGL(nvme_tcp_ut_reset_sgl, nvme_tcp_ut_next_sge, &bio, NULL);
	req.payload_offset = 0;
	req.payload_size = 4096;
	ctrlr.max_sges = NVME_TCP_MAX_SGL_DESCRIPTORS;
@@ -548,7 +545,7 @@ test_nvme_tcp_req_init(void)
	memset(&req.cmd, 0, sizeof(req.cmd));
	memset(&tcp_req, 0, sizeof(tcp_req));
	tcp_req.cid = 1;
	req.payload.reset_sgl_fn = NULL;
	req.payload = NVME_PAYLOAD_CONTIG(&bio, NULL);
	req.cmd.opc = SPDK_NVME_DATA_HOST_TO_CONTROLLER;

	rc = nvme_tcp_req_init(&tqpair, &req, &tcp_req);
@@ -1342,6 +1339,7 @@ test_nvme_tcp_capsule_resp_hdr_handle(void)
	tqpair.stats = &stats;
	req.qpair = &tqpair.qpair;
	req.qpair->ctrlr = &ctrlr;
	req.payload = NVME_PAYLOAD_CONTIG(NULL, NULL);

	rc = nvme_tcp_alloc_reqs(&tqpair);
	SPDK_CU_ASSERT_FATAL(rc == 0);
@@ -1699,14 +1697,12 @@ test_nvme_tcp_qpair_submit_request(void)
	tqpair->stats = &stat;
	req.qpair = &tqpair->qpair;
	req.cmd.opc = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
	req.payload.reset_sgl_fn = nvme_tcp_ut_reset_sgl;
	req.payload.next_sge_fn = nvme_tcp_ut_next_sge;
	req.payload = NVME_PAYLOAD_SGL(nvme_tcp_ut_reset_sgl, nvme_tcp_ut_next_sge, &bio, NULL);

	/* Failed to construct request, because not enough max_sges */
	req.qpair->ctrlr->max_sges = 1;
	req.payload_size = 2048;
	req.payload_offset = 0;
	req.payload.contig_or_cb_arg = &bio;
	bio.iovpos = 0;
	bio.iovs[0].iov_len = 1024;
	bio.iovs[1].iov_len = 1024;