Commit 55b94957 authored by Jacek Kalwas's avatar Jacek Kalwas Committed by Tomasz Zawadzki
Browse files

nvme: move in_connect_flag to generic qpair type



It is common between RDMA and TCP transports and, if needed, could be
used in the generic layer.

Change-Id: I0b0b7550b8d17ff3db2881c004355deffc9a6dc9
Signed-off-by: default avatarJacek Kalwas <jacek.kalwas@nutanix.com>
Reviewed-on: https://review.spdk.io/c/spdk/spdk/+/26803


Reviewed-by: default avatarBen Walker <ben@nvidia.com>
Reviewed-by: default avatarTomasz Zawadzki <tomasz@tzawadzki.com>
Tested-by: default avatarSPDK Automated Test System <spdkbot@gmail.com>
parent 2a4e86ee
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -497,6 +497,8 @@ struct spdk_nvme_qpair {
	/* The user is destroying qpair */
	uint8_t					destroy_in_progress: 1;

	uint8_t					in_connect_poll : 1;

	/* Number of IO outstanding at transport level */
	uint16_t				queue_depth;

+4 −7
Original line number Diff line number Diff line
@@ -242,8 +242,6 @@ struct nvme_rdma_qpair {

	enum nvme_rdma_qpair_state		state;

	bool					in_connect_poll;

	uint8_t					stale_conn_retry_count;
	bool					need_destroy;
	bool					connected;
@@ -1330,11 +1328,11 @@ nvme_rdma_ctrlr_connect_qpair_poll(struct spdk_nvme_ctrlr *ctrlr,
	struct nvme_rdma_qpair *rqpair = nvme_rdma_qpair(qpair);
	int rc;

	if (rqpair->in_connect_poll) {
	if (qpair->in_connect_poll) {
		return -EAGAIN;
	}

	rqpair->in_connect_poll = true;
	qpair->in_connect_poll = true;

	switch (rqpair->state) {
	case NVME_RDMA_QPAIR_STATE_INVALID:
@@ -1356,7 +1354,7 @@ nvme_rdma_ctrlr_connect_qpair_poll(struct spdk_nvme_ctrlr *ctrlr,
		if (rc == 0) {
			rc = -EAGAIN;
		}
		rqpair->in_connect_poll = false;
		qpair->in_connect_poll = false;

		return rc;

@@ -1408,8 +1406,7 @@ nvme_rdma_ctrlr_connect_qpair_poll(struct spdk_nvme_ctrlr *ctrlr,
		break;
	}

	rqpair->in_connect_poll = false;

	qpair->in_connect_poll = false;
	return rc;
}

+5 −7
Original line number Diff line number Diff line
@@ -108,8 +108,7 @@ struct nvme_tcp_qpair {
		uint16_t host_ddgst_enable: 1;
		uint16_t icreq_send_ack: 1;
		uint16_t icresp_received: 1;
		uint16_t in_connect_poll: 1;
		uint16_t reserved: 11;
		uint16_t reserved: 12;
	} flags;

	/** Specifies the maximum number of PDU-Data bytes per H2C Data Transfer PDU */
@@ -447,8 +446,7 @@ nvme_tcp_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_
	}

	/* A Fabric command may be outstanding before a disconnect is invoked. */
	if (qpair->fabric_poll_status &&
	    !(qpair->auth.flags.in_auth_poll || tqpair->flags.in_connect_poll)) {
	if (qpair->fabric_poll_status && !(qpair->auth.flags.in_auth_poll || qpair->in_connect_poll)) {
		nvme_fabric_qpair_poll_cleanup(qpair);
		if (qpair->auth.cb_fn != NULL) {
			qpair->auth.cb_fn(qpair->auth.cb_ctx, -ECANCELED);
@@ -2394,11 +2392,11 @@ nvme_tcp_ctrlr_connect_qpair_poll(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvm
	 * nvme_fabric_qpair_connect_poll() if the connect response is received in the recursive
	 * call.
	 */
	if (tqpair->flags.in_connect_poll) {
	if (qpair->in_connect_poll) {
		return -EAGAIN;
	}

	tqpair->flags.in_connect_poll = 1;
	qpair->in_connect_poll = true;

	switch (tqpair->state) {
	case NVME_TCP_QPAIR_STATE_SOCK_CONNECTING:
@@ -2455,7 +2453,7 @@ nvme_tcp_ctrlr_connect_qpair_poll(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvm
		break;
	}

	tqpair->flags.in_connect_poll = 0;
	qpair->in_connect_poll = false;
	return rc;
}