Commit 94cd652b authored by Ziye Yang's avatar Ziye Yang Committed by Changpeng Liu
Browse files

nvmf/tcp: Add a poller to check the timeout of each qpair



This makes the timeout check for each qpair in the group
efficient. If there are many qpairs in the group, we
can scale.

Change-Id: I75c29a92107dc32377a2ef7edb5ac92868f1c5df
Signed-off-by: default avatarZiye Yang <ziye.yang@intel.com>
Reviewed-on: https://review.gerrithub.io/435277


Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-by: default avatarChangpeng Liu <changpeng.liu@intel.com>
parent 9d11abfd
Loading
Loading
Loading
Loading
+46 −35
Original line number Diff line number Diff line
@@ -282,6 +282,7 @@ struct nvme_tcp_qpair {
struct spdk_nvmf_tcp_poll_group {
	struct spdk_nvmf_transport_poll_group	group;
	struct spdk_sock_group			*sock_group;
	struct spdk_poller			*timeout_poller;
	TAILQ_HEAD(, nvme_tcp_qpair)		qpairs;
};

@@ -1216,6 +1217,47 @@ spdk_nvmf_tcp_discover(struct spdk_nvmf_transport *transport,
	entry->tsas.tcp.sectype = SPDK_NVME_TCP_SECURITY_NONE;
}

static void
spdk_nvmf_tcp_qpair_handle_timeout(struct nvme_tcp_qpair *tqpair, uint64_t tsc)
{
	if ((tqpair->state == NVME_TCP_QPAIR_STATE_EXITING) ||
	    (tqpair->state == NVME_TCP_QPAIR_STATE_EXITED)) {
		return;
	}

	/* Currently, we did not have keep alive support, so make sure that we should have the generic support later */
	if ((tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_ERROR) ||
	    (tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY)) {
		return;
	}

	/* Check for interval expiration */
	if ((tsc - tqpair->last_pdu_time) > (tqpair->timeout  * spdk_get_ticks_hz())) {
		SPDK_ERRLOG("No pdu coming for tqpair=%p within %d seconds\n", tqpair, tqpair->timeout);
		tqpair->state = NVME_TCP_QPAIR_STATE_EXITING;
	}
}

static int
spdk_nvmf_tcp_poll_group_handle_timeout(void *ctx)
{
	struct spdk_nvmf_tcp_poll_group *tgroup = ctx;
	struct nvme_tcp_qpair *tqpair, *tmp;
	uint64_t tsc = spdk_get_ticks();

	TAILQ_FOREACH_SAFE(tqpair, &tgroup->qpairs, link, tmp) {
		spdk_nvmf_tcp_qpair_handle_timeout(tqpair, tsc);
		if (tqpair->state == NVME_TCP_QPAIR_STATE_EXITING) {
			/* to prevent the state is set again */
			tqpair->state = NVME_TCP_QPAIR_STATE_EXITED;
			SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "will disconect the tqpair=%p\n", tqpair);
			spdk_nvmf_qpair_disconnect(&tqpair->qpair, NULL, NULL);
		}
	}

	return -1;
}

static struct spdk_nvmf_transport_poll_group *
spdk_nvmf_tcp_poll_group_create(struct spdk_nvmf_transport *transport)
{
@@ -1232,6 +1274,9 @@ spdk_nvmf_tcp_poll_group_create(struct spdk_nvmf_transport *transport)
	}

	TAILQ_INIT(&tgroup->qpairs);

	tgroup->timeout_poller = spdk_poller_register(spdk_nvmf_tcp_poll_group_handle_timeout, tgroup,
				 1000000);
	return &tgroup->group;

cleanup:
@@ -1246,6 +1291,7 @@ spdk_nvmf_tcp_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)

	tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group);
	spdk_sock_group_close(&tgroup->sock_group);
	spdk_poller_unregister(&tgroup->timeout_poller);
	free(tgroup);
}

@@ -2693,34 +2739,10 @@ spdk_nvmf_tcp_close_qpair(struct spdk_nvmf_qpair *qpair)
	spdk_nvmf_tcp_qpair_destroy(SPDK_CONTAINEROF(qpair, struct nvme_tcp_qpair, qpair));
}

static void
spdk_nvmf_tcp_qpair_handle_timout(struct nvme_tcp_qpair *tqpair)
{
	uint64_t	tsc;

	if ((tqpair->state == NVME_TCP_QPAIR_STATE_EXITING) ||
	    (tqpair->state == NVME_TCP_QPAIR_STATE_EXITED)) {
		return;
	}

	/* Currently, we did not have keep alive support, so make sure that we should have the generic support later */
	if (tqpair->recv_state != NVME_TCP_PDU_RECV_STATE_ERROR) {
		return;
	}

	/* Check for interval expiration */
	tsc = spdk_get_ticks();
	if ((tsc - tqpair->last_pdu_time) > (tqpair->timeout  * spdk_get_ticks_hz())) {
		SPDK_ERRLOG("No pdu coming for tqpair=%p within %d seconds\n", tqpair, tqpair->timeout);
		tqpair->state = NVME_TCP_QPAIR_STATE_EXITING;
	}
}

static int
spdk_nvmf_tcp_poll_group_poll(struct spdk_nvmf_transport_poll_group *group)
{
	struct spdk_nvmf_tcp_poll_group *tgroup;
	struct nvme_tcp_qpair *tqpair, *tmp;
	int rc;

	tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group);
@@ -2735,17 +2757,6 @@ spdk_nvmf_tcp_poll_group_poll(struct spdk_nvmf_transport_poll_group *group)
		return rc;
	}


	TAILQ_FOREACH_SAFE(tqpair, &tgroup->qpairs, link, tmp) {
		spdk_nvmf_tcp_qpair_handle_timout(tqpair);
		if (tqpair->state == NVME_TCP_QPAIR_STATE_EXITING) {
			/* to prevent the state is set again */
			tqpair->state = NVME_TCP_QPAIR_STATE_EXITED;
			SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "will disconect the tqpair=%p\n", tqpair);
			spdk_nvmf_qpair_disconnect(&tqpair->qpair, NULL, NULL);
		}
	}

	return 0;
}

+7 −0
Original line number Diff line number Diff line
@@ -302,11 +302,18 @@ test_nvmf_tcp_poll_group_create(void)
{
	struct spdk_nvmf_tcp_transport ttransport;
	struct spdk_nvmf_transport_poll_group *group;
	struct spdk_thread *thread;

	thread = spdk_allocate_thread(NULL, NULL, NULL, NULL, NULL);
	SPDK_CU_ASSERT_FATAL(thread != NULL);
	spdk_set_thread(thread);

	memset(&ttransport, 0, sizeof(ttransport));
	group = spdk_nvmf_tcp_poll_group_create(&ttransport.transport);
	CU_ASSERT_PTR_NOT_NULL(group);
	spdk_nvmf_tcp_poll_group_destroy(group);

	spdk_free_thread();
}

static void