Commit 39a65cc4 authored by Tomasz Zawadzki's avatar Tomasz Zawadzki Committed by Jim Harris
Browse files

ut/nvme_tcp: assign NULL to poll_group in qpair



nvme_tcp UT marked the pointer to poll_group, making
sure it is never touched. Which worked fine since the UT
does not verify poll_group functionality.
Would have to be changed anyway if the tests were expaneded.

Next patch in series adds check for poll_group during
submission that is used in single qpair UTs.

To address that start off the poll_group with NULL.
No futher changes were needed since poll_group add
and remove already apprioprietly un/sets it.

Modified UT to accomodate.

Change-Id: If8cd652333e3e3342d92cea5634109abf8e7129b
Signed-off-by: default avatarTomasz Zawadzki <tomasz.zawadzki@nutanix.com>
Reviewed-on: https://review.spdk.io/c/spdk/spdk/+/25798


Reviewed-by: default avatarJim Harris <jim.harris@nvidia.com>
Reviewed-by: default avatarJacek Kalwas <jacek.kalwas@nutanix.com>
Reviewed-by: default avatarKonrad Sztyber <ksztyber@nvidia.com>
Community-CI: Mellanox Build Bot
Tested-by: default avatarSPDK Automated Test System <spdkbot@gmail.com>
Reviewed-by: default avatarAleksey Marchuk <alexeymar@nvidia.com>
parent f0c38101
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -882,6 +882,8 @@ nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
	qpair->poll_status = NULL;
	qpair->num_outstanding_reqs = 0;

	qpair->poll_group = NULL;

	STAILQ_INIT(&qpair->free_req);
	STAILQ_INIT(&qpair->queued_req);
	STAILQ_INIT(&qpair->aborting_queued_req);
+1 −1
Original line number Diff line number Diff line
@@ -111,7 +111,7 @@ nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
	qpair->qprio = qprio;
	qpair->async = async;
	qpair->trtype = SPDK_NVME_TRANSPORT_TCP;
	qpair->poll_group = (void *)0xDEADBEEF;
	qpair->poll_group = NULL;

	return 0;
}
+3 −2
Original line number Diff line number Diff line
@@ -1053,7 +1053,7 @@ test_nvme_tcp_qpair_connect_sock(void)

	tqpair.qpair.trtype = SPDK_NVME_TRANSPORT_TCP;
	tqpair.qpair.id = 1;
	tqpair.qpair.poll_group = (void *)0xDEADBEEF;
	tqpair.qpair.poll_group = NULL;
	ctrlr->trid.priority = 1;
	ctrlr->trid.adrfam = SPDK_NVMF_ADRFAM_IPV4;
	memcpy(ctrlr->trid.traddr, "192.168.1.78", sizeof("192.168.1.78"));
@@ -1504,6 +1504,7 @@ test_nvme_tcp_ctrlr_disconnect_qpair(void)

	/* Check that outstanding requests are aborted */
	treq.state = NVME_TCP_REQ_ACTIVE;
	qpair->poll_group = NULL;
	qpair->num_outstanding_reqs = 1;
	qpair->state = NVME_QPAIR_DISCONNECTING;
	TAILQ_INSERT_TAIL(&tqpair.outstanding_reqs, &treq, link);
@@ -1641,7 +1642,7 @@ test_nvme_tcp_ctrlr_create_io_qpair(void)
	CU_ASSERT(qpair->ctrlr == ctrlr);
	CU_ASSERT(qpair->qprio == SPDK_NVME_QPRIO_URGENT);
	CU_ASSERT(qpair->trtype == SPDK_NVME_TRANSPORT_TCP);
	CU_ASSERT(qpair->poll_group == (void *)0xDEADBEEF);
	CU_ASSERT(qpair->poll_group == NULL);
	CU_ASSERT(tqpair->num_entries == 1);

	free(tqpair->tcp_reqs);