Commit b95aae63 authored by Konrad Sztyber's avatar Konrad Sztyber Committed by Jim Harris
Browse files

tcp: make nvme_tcp_qpair_state private



There's no reason to have a common enum for qpair state, as the qpair is
processed differently on the target vs. initiator.  The target didn't
even use all of the defined values.

Signed-off-by: default avatarKonrad Sztyber <konrad.sztyber@intel.com>
Change-Id: Icec6ff56929ca51e7c7c5efd784512d78544631b
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/24231


Reviewed-by: default avatarKrzysztof Karas <krzysztof.karas@intel.com>
Community-CI: Mellanox Build Bot
Reviewed-by: default avatarJim Harris <jim.harris@samsung.com>
Reviewed-by: default avatarBen Walker <ben@nvidia.com>
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
parent 73a20ca5
Loading
Loading
Loading
Loading
+0 −10
Original line number Diff line number Diff line
@@ -166,16 +166,6 @@ enum nvme_tcp_error_codes {
	NVME_TCP_PDU_FATAL              = -2,
};

enum nvme_tcp_qpair_state {
	NVME_TCP_QPAIR_STATE_INVALID = 0,
	NVME_TCP_QPAIR_STATE_INITIALIZING = 1,
	NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_SEND = 2,
	NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_POLL = 3,
	NVME_TCP_QPAIR_STATE_RUNNING = 4,
	NVME_TCP_QPAIR_STATE_EXITING = 5,
	NVME_TCP_QPAIR_STATE_EXITED = 6,
};

static const bool g_nvme_tcp_hdgst[] = {
	[SPDK_NVME_TCP_PDU_TYPE_IC_REQ]         = false,
	[SPDK_NVME_TCP_PDU_TYPE_IC_RESP]        = false,
+9 −0
Original line number Diff line number Diff line
@@ -41,6 +41,15 @@
 */
#define NVME_TCP_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT	31

enum nvme_tcp_qpair_state {
	NVME_TCP_QPAIR_STATE_INVALID = 0,
	NVME_TCP_QPAIR_STATE_INITIALIZING = 1,
	NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_SEND = 2,
	NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_POLL = 3,
	NVME_TCP_QPAIR_STATE_RUNNING = 4,
	NVME_TCP_QPAIR_STATE_EXITING = 5,
	NVME_TCP_QPAIR_STATE_EXITED = 6,
};

/* NVMe TCP transport extensions for spdk_nvme_ctrlr */
struct nvme_tcp_ctrlr {
+19 −11
Original line number Diff line number Diff line
@@ -106,6 +106,14 @@ enum spdk_nvmf_tcp_req_state {
	TCP_REQUEST_NUM_STATES,
};

enum nvmf_tcp_qpair_state {
	NVMF_TCP_QPAIR_STATE_INVALID = 0,
	NVMF_TCP_QPAIR_STATE_INITIALIZING = 1,
	NVMF_TCP_QPAIR_STATE_RUNNING = 2,
	NVMF_TCP_QPAIR_STATE_EXITING = 3,
	NVMF_TCP_QPAIR_STATE_EXITED = 4,
};

static const char *spdk_nvmf_tcp_term_req_fes_str[] = {
	"Invalid PDU Header Field",
	"PDU Sequence Error",
@@ -258,7 +266,7 @@ struct spdk_nvmf_tcp_qpair {
	struct spdk_sock			*sock;

	enum nvme_tcp_pdu_recv_state		recv_state;
	enum nvme_tcp_qpair_state		state;
	enum nvmf_tcp_qpair_state		state;

	/* PDU being actively received */
	struct nvme_tcp_pdu			*pdu_in_progress;
@@ -1099,7 +1107,7 @@ static void nvmf_tcp_qpair_set_recv_state(struct spdk_nvmf_tcp_qpair *tqpair,
		enum nvme_tcp_pdu_recv_state state);

static void
nvmf_tcp_qpair_set_state(struct spdk_nvmf_tcp_qpair *tqpair, enum nvme_tcp_qpair_state state)
nvmf_tcp_qpair_set_state(struct spdk_nvmf_tcp_qpair *tqpair, enum nvmf_tcp_qpair_state state)
{
	tqpair->state = state;
	spdk_trace_record(TRACE_TCP_QP_STATE_CHANGE, tqpair->qpair.trace_id, 0, 0,
@@ -1113,8 +1121,8 @@ nvmf_tcp_qpair_disconnect(struct spdk_nvmf_tcp_qpair *tqpair)

	spdk_trace_record(TRACE_TCP_QP_DISCONNECT, tqpair->qpair.trace_id, 0, 0);

	if (tqpair->state <= NVME_TCP_QPAIR_STATE_RUNNING) {
		nvmf_tcp_qpair_set_state(tqpair, NVME_TCP_QPAIR_STATE_EXITING);
	if (tqpair->state <= NVMF_TCP_QPAIR_STATE_RUNNING) {
		nvmf_tcp_qpair_set_state(tqpair, NVMF_TCP_QPAIR_STATE_EXITING);
		assert(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
		spdk_poller_unregister(&tqpair->timeout_poller);

@@ -2235,7 +2243,7 @@ nvmf_tcp_send_icresp_complete(void *cb_arg)
{
	struct spdk_nvmf_tcp_qpair *tqpair = cb_arg;

	nvmf_tcp_qpair_set_state(tqpair, NVME_TCP_QPAIR_STATE_RUNNING);
	nvmf_tcp_qpair_set_state(tqpair, NVMF_TCP_QPAIR_STATE_RUNNING);
}

static void
@@ -2304,7 +2312,7 @@ nvmf_tcp_icreq_handle(struct spdk_nvmf_tcp_transport *ttransport,
	SPDK_DEBUGLOG(nvmf_tcp, "host_hdgst_enable: %u\n", tqpair->host_hdgst_enable);
	SPDK_DEBUGLOG(nvmf_tcp, "host_ddgst_enable: %u\n", tqpair->host_ddgst_enable);

	nvmf_tcp_qpair_set_state(tqpair, NVME_TCP_QPAIR_STATE_INITIALIZING);
	nvmf_tcp_qpair_set_state(tqpair, NVMF_TCP_QPAIR_STATE_INITIALIZING);
	nvmf_tcp_qpair_write_mgmt_pdu(tqpair, nvmf_tcp_send_icresp_complete, tqpair);
	nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
	return;
@@ -2377,7 +2385,7 @@ nvmf_tcp_pdu_ch_handle(struct spdk_nvmf_tcp_qpair *tqpair)
	pdu = tqpair->pdu_in_progress;
	assert(pdu);
	if (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_REQ) {
		if (tqpair->state != NVME_TCP_QPAIR_STATE_INVALID) {
		if (tqpair->state != NVMF_TCP_QPAIR_STATE_INVALID) {
			SPDK_ERRLOG("Already received ICreq PDU, and reject this pdu=%p\n", pdu);
			fes = SPDK_NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR;
			goto err;
@@ -2387,7 +2395,7 @@ nvmf_tcp_pdu_ch_handle(struct spdk_nvmf_tcp_qpair *tqpair)
			plen_error = true;
		}
	} else {
		if (tqpair->state != NVME_TCP_QPAIR_STATE_RUNNING) {
		if (tqpair->state != NVMF_TCP_QPAIR_STATE_RUNNING) {
			SPDK_ERRLOG("The TCP/IP connection is not negotiated\n");
			fes = SPDK_NVME_TCP_TERM_REQ_FES_PDU_SEQUENCE_ERROR;
			goto err;
@@ -2494,7 +2502,7 @@ nvmf_tcp_sock_process(struct spdk_nvmf_tcp_qpair *tqpair)
			nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH);
		/* FALLTHROUGH */
		case NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH:
			if (spdk_unlikely(tqpair->state == NVME_TCP_QPAIR_STATE_INITIALIZING)) {
			if (spdk_unlikely(tqpair->state == NVMF_TCP_QPAIR_STATE_INITIALIZING)) {
				return rc;
			}

@@ -3416,7 +3424,7 @@ nvmf_tcp_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
	}

	tqpair->group = tgroup;
	nvmf_tcp_qpair_set_state(tqpair, NVME_TCP_QPAIR_STATE_INVALID);
	nvmf_tcp_qpair_set_state(tqpair, NVMF_TCP_QPAIR_STATE_INVALID);
	TAILQ_INSERT_TAIL(&tgroup->qpairs, tqpair, link);

	return 0;
@@ -3512,7 +3520,7 @@ nvmf_tcp_close_qpair(struct spdk_nvmf_qpair *qpair,
	tqpair->fini_cb_fn = cb_fn;
	tqpair->fini_cb_arg = cb_arg;

	nvmf_tcp_qpair_set_state(tqpair, NVME_TCP_QPAIR_STATE_EXITED);
	nvmf_tcp_qpair_set_state(tqpair, NVMF_TCP_QPAIR_STATE_EXITED);
	nvmf_tcp_qpair_destroy(tqpair);
}

+16 −16
Original line number Diff line number Diff line
@@ -615,7 +615,7 @@ test_nvmf_tcp_send_c2h_data(void)
	tqpair.qpair.transport = &ttransport.transport;

	/* Set qpair state to make unrelated operations NOP */
	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
	tqpair.state = NVMF_TCP_QPAIR_STATE_RUNNING;
	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;

	tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd;
@@ -687,7 +687,7 @@ test_nvmf_tcp_h2c_data_hdr_handle(void)
	struct spdk_nvme_tcp_h2c_data_hdr *h2c_data;

	/* Set qpair state to make unrelated operations NOP */
	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
	tqpair.state = NVMF_TCP_QPAIR_STATE_RUNNING;
	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
	tqpair.resource_count = 1;
	tqpair.reqs = &tcp_req;
@@ -759,7 +759,7 @@ test_nvmf_tcp_in_capsule_data_handle(void)
	TAILQ_INSERT_TAIL(&tqpair.tcp_req_free_queue, &tcp_req2, state_link);
	tqpair.state_cntr[TCP_REQUEST_STATE_FREE]++;
	tqpair.qpair.transport = &ttransport.transport;
	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
	tqpair.state = NVMF_TCP_QPAIR_STATE_RUNNING;
	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH;
	tqpair.qpair.state = SPDK_NVMF_QPAIR_ENABLED;

@@ -1052,7 +1052,7 @@ test_nvmf_tcp_check_xfer_type(void)
	TAILQ_INIT(&tqpair.tcp_req_working_queue);

	tqpair.qpair.transport = &ttransport.transport;
	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
	tqpair.state = NVMF_TCP_QPAIR_STATE_RUNNING;
	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH;
	tqpair.qpair.state = SPDK_NVMF_QPAIR_ENABLED;

@@ -1125,7 +1125,7 @@ test_nvmf_tcp_invalid_sgl(void)
	TAILQ_INIT(&tqpair.tcp_req_working_queue);

	tqpair.qpair.transport = &ttransport.transport;
	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
	tqpair.state = NVMF_TCP_QPAIR_STATE_RUNNING;
	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH;
	tqpair.qpair.state = SPDK_NVMF_QPAIR_ENABLED;

@@ -1179,7 +1179,7 @@ test_nvmf_tcp_pdu_ch_handle(void)

	/* Test case: Already received ICreq PDU. Expect: fail */
	tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ;
	tqpair.state = NVME_TCP_QPAIR_STATE_INITIALIZING;
	tqpair.state = NVMF_TCP_QPAIR_STATE_INITIALIZING;
	nvmf_tcp_pdu_ch_handle(&tqpair);
	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
@@ -1189,7 +1189,7 @@ test_nvmf_tcp_pdu_ch_handle(void)
	/* Test case: Expected PDU header length and received are different. Expect: fail */
	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH;
	tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ;
	tqpair.state = NVME_TCP_QPAIR_STATE_INVALID;
	tqpair.state = NVMF_TCP_QPAIR_STATE_INVALID;
	tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req);
	tqpair.pdu_in_progress->hdr.common.hlen = 0;
	nvmf_tcp_pdu_ch_handle(&tqpair);
@@ -1202,7 +1202,7 @@ test_nvmf_tcp_pdu_ch_handle(void)
	/* Test case: The TCP/IP tqpair connection is not negotiated. Expect: fail */
	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH;
	tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
	tqpair.state = NVME_TCP_QPAIR_STATE_INVALID;
	tqpair.state = NVMF_TCP_QPAIR_STATE_INVALID;
	tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req);
	tqpair.pdu_in_progress->hdr.common.hlen = 0;
	nvmf_tcp_pdu_ch_handle(&tqpair);
@@ -1214,7 +1214,7 @@ test_nvmf_tcp_pdu_ch_handle(void)
	/* Test case: Unexpected PDU type. Expect: fail */
	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH;
	tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP;
	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
	tqpair.state = NVMF_TCP_QPAIR_STATE_RUNNING;
	tqpair.pdu_in_progress->hdr.common.plen = 0;
	tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req);
	nvmf_tcp_pdu_ch_handle(&tqpair);
@@ -1227,7 +1227,7 @@ test_nvmf_tcp_pdu_ch_handle(void)
	/* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_IC_REQ, let plen error. Expect: fail */
	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH;
	tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ;
	tqpair.state = NVME_TCP_QPAIR_STATE_INVALID;
	tqpair.state = NVMF_TCP_QPAIR_STATE_INVALID;
	tqpair.pdu_in_progress->hdr.common.plen = 0;
	tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req);
	nvmf_tcp_pdu_ch_handle(&tqpair);
@@ -1241,7 +1241,7 @@ test_nvmf_tcp_pdu_ch_handle(void)
	/* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD, let plen error. Expect: fail */
	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH;
	tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
	tqpair.state = NVMF_TCP_QPAIR_STATE_RUNNING;
	tqpair.pdu_in_progress->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF;
	tqpair.pdu_in_progress->hdr.common.plen = 0;
	tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
@@ -1256,7 +1256,7 @@ test_nvmf_tcp_pdu_ch_handle(void)
	/* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_DATA, let plen error. Expect: fail */
	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH;
	tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA;
	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
	tqpair.state = NVMF_TCP_QPAIR_STATE_RUNNING;
	tqpair.pdu_in_progress->hdr.common.plen = 0;
	tqpair.pdu_in_progress->hdr.common.pdo = 64;
	tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr);
@@ -1271,7 +1271,7 @@ test_nvmf_tcp_pdu_ch_handle(void)
	/* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ, let plen error. Expect: fail */
	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH;
	tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ;
	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
	tqpair.state = NVMF_TCP_QPAIR_STATE_RUNNING;
	tqpair.pdu_in_progress->hdr.common.plen = 0;
	tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr);
	nvmf_tcp_pdu_ch_handle(&tqpair);
@@ -1285,7 +1285,7 @@ test_nvmf_tcp_pdu_ch_handle(void)
	/* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD, let pdo error. Expect: fail */
	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH;
	tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
	tqpair.state = NVMF_TCP_QPAIR_STATE_RUNNING;
	tqpair.cpda = 1;
	tqpair.pdu_in_progress->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF;
	tqpair.pdu_in_progress->hdr.common.plen = 0;
@@ -1302,7 +1302,7 @@ test_nvmf_tcp_pdu_ch_handle(void)
	/* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_DATA, let pdo error. Expect: fail */
	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH;
	tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA;
	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
	tqpair.state = NVMF_TCP_QPAIR_STATE_RUNNING;
	tqpair.cpda = 1;
	tqpair.pdu_in_progress->hdr.common.plen = 0;
	tqpair.pdu_in_progress->hdr.common.pdo = 63;
@@ -1318,7 +1318,7 @@ test_nvmf_tcp_pdu_ch_handle(void)
	/* Test case: All parameters is conformed to the function. Expect: PASS */
	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH;
	tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ;
	tqpair.state = NVME_TCP_QPAIR_STATE_INVALID;
	tqpair.state = NVMF_TCP_QPAIR_STATE_INVALID;
	tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req);
	tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req);
	nvmf_tcp_pdu_ch_handle(&tqpair);