Commit 39119220 authored by Seth Howell's avatar Seth Howell Committed by Tomasz Zawadzki
Browse files

nvme: remove redundant transport_qp_is_failed checks



The qpair state transport_qpair_is_failed is actually equivalent to
NVME_QPAIR_IS_CONNECTED in the qpair state machine.

There are a couple of places where we check against
transport_qp_is_failed and then immediately check to see if we are in
the connected state. If we are failed, or we are not in the connected
state we return the same value to the calling function.

Since the checks for transport_qpair_is_failed are not necessary, they
can be removed. As a result, there is no need to keep track of it and it
can be removed from the qpair structure.

Change-Id: I4aef5d20eb267bfd6118e5d1d088df05574d9ffd
Signed-off-by: default avatarSeth Howell <seth.howell@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/475802


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
Reviewed-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
parent 3369105f
Loading
Loading
Loading
Loading
+1 −5
Original line number Diff line number Diff line
@@ -408,7 +408,7 @@ spdk_nvme_ctrlr_reconnect_io_qpair(struct spdk_nvme_qpair *qpair)
		goto out;
	}

	if (!qpair->transport_qp_is_failed) {
	if (!nvme_qpair_state_equals(qpair, NVME_QPAIR_DISABLED)) {
		rc = 0;
		goto out;
	}
@@ -419,12 +419,10 @@ spdk_nvme_ctrlr_reconnect_io_qpair(struct spdk_nvme_qpair *qpair)
	rc = nvme_transport_ctrlr_connect_qpair(ctrlr, qpair);
	if (rc) {
		nvme_qpair_set_state(qpair, NVME_QPAIR_DISABLED);
		qpair->transport_qp_is_failed = true;
		rc = -EAGAIN;
		goto out;
	}
	nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTED);
	qpair->transport_qp_is_failed = false;

out:
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
@@ -1079,7 +1077,6 @@ spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
	/* Disable all queues before disabling the controller hardware. */
	TAILQ_FOREACH(qpair, &ctrlr->active_io_qpairs, tailq) {
		nvme_qpair_set_state(qpair, NVME_QPAIR_DISABLED);
		qpair->transport_qp_is_failed = true;
	}
	nvme_qpair_set_state(ctrlr->adminq, NVME_QPAIR_DISABLED);
	nvme_qpair_complete_error_reqs(ctrlr->adminq);
@@ -1124,7 +1121,6 @@ spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr)
				continue;
			}
			nvme_qpair_set_state(qpair, NVME_QPAIR_CONNECTED);
			qpair->transport_qp_is_failed = false;
		}
	}

+0 −2
Original line number Diff line number Diff line
@@ -356,8 +356,6 @@ struct spdk_nvme_qpair {
	uint8_t				in_completion_context : 1;
	uint8_t				delete_after_completion_context: 1;

	uint8_t				transport_qp_is_failed: 1;

	/*
	 * Set when no deletion notification is needed. For example, the process
	 * which allocated this qpair exited unexpectedly.
+0 −8
Original line number Diff line number Diff line
@@ -448,10 +448,6 @@ spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_
		return -ENXIO;
	}

	if (spdk_unlikely(qpair->transport_qp_is_failed == true)) {
		return -ENXIO;
	}

	if (spdk_unlikely(!nvme_qpair_check_enabled(qpair) &&
			  !nvme_qpair_state_equals(qpair, NVME_QPAIR_CONNECTING))) {
		/*
@@ -708,10 +704,6 @@ nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *re
		return 0;
	}

	if (spdk_unlikely(qpair->transport_qp_is_failed == true)) {
		return -ENXIO;
	}

	rc = _nvme_qpair_submit_request(qpair, req);
	if (rc == -EAGAIN) {
		STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq);
+5 −6
Original line number Diff line number Diff line
@@ -288,13 +288,13 @@ nvme_rdma_qpair_process_cm_event(struct nvme_rdma_qpair *rqpair)
			break;
		case RDMA_CM_EVENT_DISCONNECTED:
		case RDMA_CM_EVENT_DEVICE_REMOVAL:
			rqpair->qpair.transport_qp_is_failed = true;
			nvme_qpair_set_state(&rqpair->qpair, NVME_QPAIR_DISABLED);
			break;
		case RDMA_CM_EVENT_MULTICAST_JOIN:
		case RDMA_CM_EVENT_MULTICAST_ERROR:
			break;
		case RDMA_CM_EVENT_ADDR_CHANGE:
			rqpair->qpair.transport_qp_is_failed = true;
			nvme_qpair_set_state(&rqpair->qpair, NVME_QPAIR_DISABLED);
			break;
		case RDMA_CM_EVENT_TIMEWAIT_EXIT:
			break;
@@ -1058,10 +1058,9 @@ nvme_rdma_qpair_connect(struct nvme_rdma_qpair *rqpair)
		return -1;
	}

	rqpair->qpair.transport_qp_is_failed = false;
	rc = nvme_fabric_qpair_connect(&rqpair->qpair, rqpair->num_entries);
	if (rc < 0) {
		rqpair->qpair.transport_qp_is_failed = true;
		nvme_qpair_set_state(&rqpair->qpair, NVME_QPAIR_DISABLED);
		SPDK_ERRLOG("Failed to send an NVMe-oF Fabric CONNECT command\n");
		return -1;
	}
@@ -1514,7 +1513,7 @@ nvme_rdma_qpair_disconnect(struct spdk_nvme_qpair *qpair)
{
	struct nvme_rdma_qpair *rqpair = nvme_rdma_qpair(qpair);

	qpair->transport_qp_is_failed = true;
	nvme_qpair_set_state(qpair, NVME_QPAIR_DISABLED);
	nvme_rdma_unregister_mem(rqpair);
	nvme_rdma_unregister_reqs(rqpair);
	nvme_rdma_unregister_rsps(rqpair);
@@ -1895,7 +1894,7 @@ nvme_rdma_qpair_process_completions(struct spdk_nvme_qpair *qpair,
	}
	nvme_rdma_qpair_process_cm_event(rqpair);

	if (spdk_unlikely(qpair->transport_qp_is_failed)) {
	if (spdk_unlikely(nvme_qpair_state_equals(qpair, NVME_QPAIR_DISABLED))) {
		goto fail;
	}

+2 −3
Original line number Diff line number Diff line
@@ -235,7 +235,7 @@ nvme_tcp_qpair_disconnect(struct spdk_nvme_qpair *qpair)
	struct nvme_tcp_qpair *tqpair = nvme_tcp_qpair(qpair);
	struct nvme_tcp_pdu *pdu;

	qpair->transport_qp_is_failed = true;
	nvme_qpair_set_state(qpair, NVME_QPAIR_DISABLED);
	spdk_sock_close(&tqpair->sock);

	/* clear the send_queue */
@@ -1624,10 +1624,9 @@ nvme_tcp_qpair_connect(struct nvme_tcp_qpair *tqpair)
		return -1;
	}

	tqpair->qpair.transport_qp_is_failed = false;
	rc = nvme_fabric_qpair_connect(&tqpair->qpair, tqpair->num_entries);
	if (rc < 0) {
		tqpair->qpair.transport_qp_is_failed = true;
		nvme_qpair_set_state(&tqpair->qpair, NVME_QPAIR_DISABLED);
		SPDK_ERRLOG("Failed to send an NVMe-oF Fabric CONNECT command\n");
		return -1;
	}
Loading