Commit 245490e4 authored by Jacek Kalwas's avatar Jacek Kalwas Committed by Jim Harris
Browse files

nvme/tcp: adjust (un)likely around timeout



Timeout can be enabled by user hence it cannot be ruled out if it is
(un)likely. In worst case scenario e.g. bdev nvme in use and timeouts
enabled then all qpairs will have incorrect hint. This could impact
performance as timeout is meant to be checked on each round of
nvme_tcp_qpair_process_completions execution. While here adjust
nvme_tcp_qpair_check_timeout and nvme_request_check_timeout
for the same reason.

Change-Id: Id8fb459e822423be3cdfb8c660a1ec2f0d03a463
Signed-off-by: default avatarJacek Kalwas <jacek.kalwas@nutanix.com>
Reviewed-on: https://review.spdk.io/c/spdk/spdk/+/25763


Community-CI: Mellanox Build Bot
Tested-by: default avatarSPDK Automated Test System <spdkbot@gmail.com>
Reviewed-by: default avatarAleksey Marchuk <alexeymar@nvidia.com>
Reviewed-by: default avatarTomasz Zawadzki <tomasz@tzawadzki.com>
Reviewed-by: default avatarJim Harris <jim.harris@nvidia.com>
parent f153f632
Loading
Loading
Loading
Loading
+5 −5
Original line number Diff line number Diff line
@@ -484,20 +484,20 @@ nvme_request_check_timeout(struct nvme_request *req, uint16_t cid,

	assert(active_proc->timeout_cb_fn != NULL);

	if (req->timed_out || req->submit_tick == 0) {
	if (spdk_unlikely(req->timed_out || req->submit_tick == 0)) {
		return 0;
	}

	if (req->pid != g_spdk_nvme_pid) {
	if (spdk_unlikely(req->pid != g_spdk_nvme_pid)) {
		return 0;
	}

	if (nvme_qpair_is_admin_queue(qpair) &&
	    req->cmd.opc == SPDK_NVME_OPC_ASYNC_EVENT_REQUEST) {
	if (spdk_unlikely(nvme_qpair_is_admin_queue(qpair) &&
			  req->cmd.opc == SPDK_NVME_OPC_ASYNC_EVENT_REQUEST)) {
		return 0;
	}

	if (req->submit_tick + timeout_ticks > now_tick) {
	if (spdk_likely(req->submit_tick + timeout_ticks > now_tick)) {
		return 1;
	}

+7 −7
Original line number Diff line number Diff line
@@ -2036,30 +2036,30 @@ nvme_tcp_qpair_check_timeout(struct spdk_nvme_qpair *qpair)
	struct spdk_nvme_ctrlr_process *active_proc;

	/* Don't check timeouts during controller initialization. */
	if (ctrlr->state != NVME_CTRLR_STATE_READY) {
	if (spdk_unlikely(ctrlr->state != NVME_CTRLR_STATE_READY)) {
		return;
	}

	if (nvme_qpair_is_admin_queue(qpair)) {
	if (spdk_unlikely(nvme_qpair_is_admin_queue(qpair))) {
		active_proc = nvme_ctrlr_get_current_process(ctrlr);
	} else {
		active_proc = qpair->active_proc;
	}

	/* Only check timeouts if the current process has a timeout callback. */
	if (active_proc == NULL || active_proc->timeout_cb_fn == NULL) {
	if (spdk_unlikely(active_proc == NULL || active_proc->timeout_cb_fn == NULL)) {
		return;
	}

	t02 = spdk_get_ticks();
	TAILQ_FOREACH_SAFE(tcp_req, &tqpair->outstanding_reqs, link, tmp) {
		if (ctrlr->is_failed) {
		if (spdk_unlikely(ctrlr->is_failed)) {
			/* The controller state may be changed to failed in one of the nvme_request_check_timeout callbacks. */
			return;
		}
		assert(tcp_req->req != NULL);

		if (nvme_request_check_timeout(tcp_req->req, tcp_req->cid, active_proc, t02)) {
		if (spdk_likely(nvme_request_check_timeout(tcp_req->req, tcp_req->cid, active_proc, t02))) {
			/*
			 * The requests are in order, so as soon as one has not timed out,
			 * stop iterating.
@@ -2084,7 +2084,7 @@ nvme_tcp_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_c
		if (rc < 0 && errno != EAGAIN) {
			SPDK_ERRLOG("Failed to flush tqpair=%p (%d): %s\n", tqpair,
				    errno, spdk_strerror(errno));
			if (spdk_unlikely(tqpair->qpair.ctrlr->timeout_enabled)) {
			if (tqpair->qpair.ctrlr->timeout_enabled) {
				nvme_tcp_qpair_check_timeout(qpair);
			}

@@ -2115,7 +2115,7 @@ nvme_tcp_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_c
		goto fail;
	}

	if (spdk_unlikely(tqpair->qpair.ctrlr->timeout_enabled)) {
	if (tqpair->qpair.ctrlr->timeout_enabled) {
		nvme_tcp_qpair_check_timeout(qpair);
	}