Commit 60af3c00 authored by Rui Chang's avatar Rui Chang Committed by Tomasz Zawadzki
Browse files

nvmf/tcp: optimize nvmf_tcp_req_set_state() to reduce cpu usage



In the stress test of NVMe TCP (ARM platform, 6 nvme disks),
we see nvmf_tcp_req_set_state() takes quite some CPU cycles
(about 2%~3% of the nvmf_tgt process, ranking 6) moving TCP
request structure between different queues. And after some
analyzes, we think these actions can be saved. With this change
we get 1%~1.5% performance gain overall.

Change-Id: Ifd2f5609e4d99cab9fea06e773b461ded6320e93
Signed-off-by: default avatarRui Chang <rui.chang@arm.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/5667


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Broadcom CI
Community-CI: Mellanox Build Bot
Reviewed-by: default avatarChangpeng Liu <changpeng.liu@intel.com>
Reviewed-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
parent 6fd14594
Loading
Loading
Loading
Loading
+38 −21
Original line number Diff line number Diff line
@@ -212,7 +212,9 @@ struct spdk_nvmf_tcp_qpair {
	struct nvme_tcp_pdu			pdu_in_progress;

	/* Queues to track the requests in all states */
	TAILQ_HEAD(, spdk_nvmf_tcp_req)		state_queue[TCP_REQUEST_NUM_STATES];
	TAILQ_HEAD(, spdk_nvmf_tcp_req)		tcp_req_working_queue;
	TAILQ_HEAD(, spdk_nvmf_tcp_req)		tcp_req_free_queue;

	/* Number of requests in each state */
	uint32_t				state_cntr[TCP_REQUEST_NUM_STATES];

@@ -320,11 +322,8 @@ nvmf_tcp_req_set_state(struct spdk_nvmf_tcp_req *tcp_req,
	qpair = tcp_req->req.qpair;
	tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair);

	TAILQ_REMOVE(&tqpair->state_queue[tcp_req->state], tcp_req, state_link);
	assert(tqpair->state_cntr[tcp_req->state] > 0);
	tqpair->state_cntr[tcp_req->state]--;

	TAILQ_INSERT_TAIL(&tqpair->state_queue[state], tcp_req, state_link);
	tqpair->state_cntr[state]++;

	tcp_req->state = state;
@@ -353,7 +352,7 @@ nvmf_tcp_req_get(struct spdk_nvmf_tcp_qpair *tqpair)
{
	struct spdk_nvmf_tcp_req *tcp_req;

	tcp_req = TAILQ_FIRST(&tqpair->state_queue[TCP_REQUEST_STATE_FREE]);
	tcp_req = TAILQ_FIRST(&tqpair->tcp_req_free_queue);
	if (!tcp_req) {
		return NULL;
	}
@@ -363,10 +362,20 @@ nvmf_tcp_req_get(struct spdk_nvmf_tcp_qpair *tqpair)
	tcp_req->has_incapsule_data = false;
	tcp_req->req.dif.dif_insert_or_strip = false;

	TAILQ_REMOVE(&tqpair->tcp_req_free_queue, tcp_req, state_link);
	TAILQ_INSERT_TAIL(&tqpair->tcp_req_working_queue, tcp_req, state_link);
	nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_NEW);
	return tcp_req;
}

static inline void
nvmf_tcp_req_put(struct spdk_nvmf_tcp_qpair *tqpair, struct spdk_nvmf_tcp_req *tcp_req)
{
	TAILQ_REMOVE(&tqpair->tcp_req_working_queue, tcp_req, state_link);
	TAILQ_INSERT_TAIL(&tqpair->tcp_req_free_queue, tcp_req, state_link);
	nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_FREE);
}

static void
nvmf_tcp_request_free(void *cb_arg)
{
@@ -398,10 +407,13 @@ nvmf_tcp_drain_state_queue(struct spdk_nvmf_tcp_qpair *tqpair,
{
	struct spdk_nvmf_tcp_req *tcp_req, *req_tmp;

	TAILQ_FOREACH_SAFE(tcp_req, &tqpair->state_queue[state], state_link, req_tmp) {
	assert(state != TCP_REQUEST_STATE_FREE);
	TAILQ_FOREACH_SAFE(tcp_req, &tqpair->tcp_req_working_queue, state_link, req_tmp) {
		if (state == tcp_req->state) {
			nvmf_tcp_request_free(tcp_req);
		}
	}
}

static void
nvmf_tcp_cleanup_all_states(struct spdk_nvmf_tcp_qpair *tqpair)
@@ -412,11 +424,12 @@ nvmf_tcp_cleanup_all_states(struct spdk_nvmf_tcp_qpair *tqpair)
	nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_NEW);

	/* Wipe the requests waiting for buffer from the global list */
	TAILQ_FOREACH_SAFE(tcp_req, &tqpair->state_queue[TCP_REQUEST_STATE_NEED_BUFFER], state_link,
			   req_tmp) {
	TAILQ_FOREACH_SAFE(tcp_req, &tqpair->tcp_req_working_queue, state_link, req_tmp) {
		if (tcp_req->state == TCP_REQUEST_STATE_NEED_BUFFER) {
			STAILQ_REMOVE(&tqpair->group->group.pending_buf_queue, &tcp_req->req,
				      spdk_nvmf_request, buf_link);
		}
	}

	nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_NEED_BUFFER);
	nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_EXECUTING);
@@ -433,12 +446,14 @@ nvmf_tcp_dump_qpair_req_contents(struct spdk_nvmf_tcp_qpair *tqpair)
	SPDK_ERRLOG("Dumping contents of queue pair (QID %d)\n", tqpair->qpair.qid);
	for (i = 1; i < TCP_REQUEST_NUM_STATES; i++) {
		SPDK_ERRLOG("\tNum of requests in state[%d] = %u\n", i, tqpair->state_cntr[i]);
		TAILQ_FOREACH(tcp_req, &tqpair->state_queue[i], state_link) {
		TAILQ_FOREACH(tcp_req, &tqpair->tcp_req_working_queue, state_link) {
			if ((int)tcp_req->state == i) {
				SPDK_ERRLOG("\t\tRequest Data From Pool: %d\n", tcp_req->req.data_from_pool);
				SPDK_ERRLOG("\t\tRequest opcode: %d\n", tcp_req->req.cmd->nvmf_cmd.opcode);
			}
		}
	}
}

static void
nvmf_tcp_qpair_destroy(struct spdk_nvmf_tcp_qpair *tqpair)
@@ -892,7 +907,7 @@ nvmf_tcp_qpair_init_mem_resource(struct spdk_nvmf_tcp_qpair *tqpair)

		/* Initialize request state to FREE */
		tcp_req->state = TCP_REQUEST_STATE_FREE;
		TAILQ_INSERT_TAIL(&tqpair->state_queue[tcp_req->state], tcp_req, state_link);
		TAILQ_INSERT_TAIL(&tqpair->tcp_req_free_queue, tcp_req, state_link);
		tqpair->state_cntr[TCP_REQUEST_STATE_FREE]++;
	}

@@ -909,16 +924,14 @@ static int
nvmf_tcp_qpair_init(struct spdk_nvmf_qpair *qpair)
{
	struct spdk_nvmf_tcp_qpair *tqpair;
	int i;

	tqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_tcp_qpair, qpair);

	SPDK_DEBUGLOG(nvmf_tcp, "New TCP Connection: %p\n", qpair);

	/* Initialise request state queues of the qpair */
	for (i = TCP_REQUEST_STATE_FREE; i < TCP_REQUEST_NUM_STATES; i++) {
		TAILQ_INIT(&tqpair->state_queue[i]);
	}
	TAILQ_INIT(&tqpair->tcp_req_free_queue);
	TAILQ_INIT(&tqpair->tcp_req_working_queue);

	tqpair->host_hdgst_enable = true;
	tqpair->host_ddgst_enable = true;
@@ -1290,7 +1303,11 @@ nvmf_tcp_find_req_in_state(struct spdk_nvmf_tcp_qpair *tqpair,
{
	struct spdk_nvmf_tcp_req *tcp_req = NULL;

	TAILQ_FOREACH(tcp_req, &tqpair->state_queue[state], state_link) {
	TAILQ_FOREACH(tcp_req, &tqpair->tcp_req_working_queue, state_link) {
		if (tcp_req->state != state) {
			continue;
		}

		if (tcp_req->req.cmd->nvme_cmd.cid != cid) {
			continue;
		}
@@ -2427,7 +2444,7 @@ nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,

			nvmf_tcp_req_pdu_fini(tcp_req);

			nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_FREE);
			nvmf_tcp_req_put(tqpair, tcp_req);
			break;
		case TCP_REQUEST_NUM_STATES:
		default:
+9 −11
Original line number Diff line number Diff line
@@ -577,7 +577,7 @@ test_nvmf_tcp_h2c_data_hdr_handle(void)
	struct spdk_nvmf_tcp_req tcp_req = {};
	struct spdk_nvme_tcp_h2c_data_hdr *h2c_data;

	TAILQ_INIT(&tqpair.state_queue[TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER]);
	TAILQ_INIT(&tqpair.tcp_req_working_queue);

	/* Set qpair state to make unrelated operations NOP */
	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
@@ -589,12 +589,13 @@ test_nvmf_tcp_h2c_data_hdr_handle(void)
	tcp_req.req.iov[1].iov_len = 99;
	tcp_req.req.iovcnt = 2;
	tcp_req.req.length = 200;
	tcp_req.state = TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER;

	tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd;
	tcp_req.req.cmd->nvme_cmd.cid = 1;
	tcp_req.ttag = 2;

	TAILQ_INSERT_TAIL(&tqpair.state_queue[TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER],
	TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue,
			  &tcp_req, state_link);

	h2c_data = &pdu.hdr.h2c_data;
@@ -611,9 +612,9 @@ test_nvmf_tcp_h2c_data_hdr_handle(void)
	CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF);
	CU_ASSERT(pdu.data_iov[1].iov_len == 99);

	CU_ASSERT(TAILQ_FIRST(&tqpair.state_queue[TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER]) ==
	CU_ASSERT(TAILQ_FIRST(&tqpair.tcp_req_working_queue) ==
		  &tcp_req);
	TAILQ_REMOVE(&tqpair.state_queue[TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER],
	TAILQ_REMOVE(&tqpair.tcp_req_working_queue,
		     &tcp_req, state_link);
}

@@ -638,7 +639,6 @@ test_nvmf_tcp_incapsule_data_handle(void)
	struct spdk_nvmf_transport_poll_group *group;
	struct spdk_nvmf_tcp_poll_group tcp_group = {};
	struct spdk_sock_group grp = {};
	int i = 0;

	ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE;
	ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE;
@@ -650,12 +650,10 @@ test_nvmf_tcp_incapsule_data_handle(void)
	STAILQ_INIT(&group->pending_buf_queue);
	tqpair.group = &tcp_group;

	/* init tqpair, add pdu to pdu_in_progress and wait for the buff */
	for (i = TCP_REQUEST_STATE_FREE; i < TCP_REQUEST_NUM_STATES; i++) {
		TAILQ_INIT(&tqpair.state_queue[i]);
	}
	TAILQ_INIT(&tqpair.tcp_req_free_queue);
	TAILQ_INIT(&tqpair.tcp_req_working_queue);

	TAILQ_INSERT_TAIL(&tqpair.state_queue[TCP_REQUEST_STATE_FREE], &tcp_req2, state_link);
	TAILQ_INSERT_TAIL(&tqpair.tcp_req_free_queue, &tcp_req2, state_link);
	tqpair.state_cntr[TCP_REQUEST_STATE_FREE]++;
	tqpair.qpair.transport = &ttransport.transport;
	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
@@ -673,7 +671,7 @@ test_nvmf_tcp_incapsule_data_handle(void)
	tcp_req1.req.rsp = &rsp0;
	tcp_req1.state = TCP_REQUEST_STATE_NEW;

	TAILQ_INSERT_TAIL(&tqpair.state_queue[TCP_REQUEST_STATE_NEW], &tcp_req1, state_link);
	TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req1, state_link);
	tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++;

	/* init pdu, make pdu need sgl buff */