Commit 3caf2080 authored by Konrad Sztyber's avatar Konrad Sztyber Committed by Tomasz Zawadzki
Browse files

nvmf: add spdk_nvmf_qpair_is_active()



Signed-off-by: default avatarKonrad Sztyber <konrad.sztyber@intel.com>
Change-Id: Icef9ab8527185a451957833e0e51cb869cbd1e57
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/22586


Community-CI: Mellanox Build Bot
Reviewed-by: default avatarJim Harris <jim.harris@samsung.com>
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarBen Walker <ben@nvidia.com>
parent 3e4c5347
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -471,6 +471,12 @@ int spdk_nvmf_ctrlr_connect(struct spdk_nvmf_request *req);
 */
void spdk_nvmf_tgt_new_qpair(struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair);

static inline bool
spdk_nvmf_qpair_is_active(struct spdk_nvmf_qpair *qpair)
{
	return qpair->state == SPDK_NVMF_QPAIR_ACTIVE;
}

/**
 * A subset of struct spdk_nvme_registers that are emulated by a fabrics device.
 */
+8 −4
Original line number Diff line number Diff line
@@ -617,6 +617,7 @@ nvmf_ctrlr_add_io_qpair(void *ctx)
	struct spdk_nvmf_qpair *admin_qpair = ctrlr->admin_qpair;
	struct spdk_nvmf_poll_group *admin_qpair_group = NULL;
	enum spdk_nvmf_qpair_state admin_qpair_state = SPDK_NVMF_QPAIR_UNINITIALIZED;
	bool admin_qpair_active = false;

	SPDK_DTRACE_PROBE4_TICKS(nvmf_ctrlr_add_io_qpair, ctrlr, req->qpair, req->qpair->qid,
				 spdk_thread_get_id(ctrlr->thread));
@@ -661,11 +662,12 @@ nvmf_ctrlr_add_io_qpair(void *ctx)

	/* There is a chance that admin qpair was destroyed. This is an issue that was observed only with ESX initiators */
	if (admin_qpair) {
		admin_qpair_active = spdk_nvmf_qpair_is_active(admin_qpair);
		admin_qpair_group = admin_qpair->group;
		admin_qpair_state = admin_qpair->state;
	}

	if (admin_qpair_state != SPDK_NVMF_QPAIR_ACTIVE || admin_qpair_group == NULL) {
	if (!admin_qpair_active || admin_qpair_group == NULL) {
		/* There is a chance that admin qpair was destroyed or is being destroyed at this moment due to e.g.
		 * expired keep alive timer. Part of the qpair destruction process is change of qpair's
		 * state to DEACTIVATING and removing it from poll group */
@@ -704,6 +706,7 @@ _nvmf_ctrlr_add_io_qpair(void *ctx)
	const struct spdk_nvmf_subsystem_listener *listener;
	struct spdk_nvmf_poll_group *admin_qpair_group = NULL;
	enum spdk_nvmf_qpair_state admin_qpair_state = SPDK_NVMF_QPAIR_UNINITIALIZED;
	bool admin_qpair_active = false;

	assert(req->iovcnt == 1);

@@ -753,11 +756,12 @@ _nvmf_ctrlr_add_io_qpair(void *ctx)

	/* There is a chance that admin qpair was destroyed. This is an issue that was observed only with ESX initiators */
	if (admin_qpair) {
		admin_qpair_active = spdk_nvmf_qpair_is_active(admin_qpair);
		admin_qpair_group = admin_qpair->group;
		admin_qpair_state = admin_qpair->state;
	}

	if (admin_qpair_state != SPDK_NVMF_QPAIR_ACTIVE || admin_qpair_group == NULL) {
	if (!admin_qpair_active || admin_qpair_group == NULL) {
		/* There is a chance that admin qpair was destroyed or is being destroyed at this moment due to e.g.
		 * expired keep alive timer. Part of the qpair destruction process is change of qpair's
		 * state to DEACTIVATING and removing it from poll group */
@@ -4624,7 +4628,7 @@ nvmf_check_subsystem_active(struct spdk_nvmf_request *req)
			ns_info->io_outstanding++;
		}

		if (spdk_unlikely(qpair->state != SPDK_NVMF_QPAIR_ACTIVE)) {
		if (spdk_unlikely(!spdk_nvmf_qpair_is_active(qpair))) {
			req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
			req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
			TAILQ_INSERT_TAIL(&qpair->outstanding, req, link);
@@ -4706,7 +4710,7 @@ spdk_nvmf_request_get_dif_ctx(struct spdk_nvmf_request *req, struct spdk_dif_ctx
		return false;
	}

	if (spdk_unlikely(qpair->state != SPDK_NVMF_QPAIR_ACTIVE)) {
	if (spdk_unlikely(!spdk_nvmf_qpair_is_active(qpair))) {
		return false;
	}

+1 −1
Original line number Diff line number Diff line
@@ -1504,7 +1504,7 @@ nvmf_fc_hwqp_handle_request(struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_
		return -EACCES;
	}

	if (fc_conn->qpair.state != SPDK_NVMF_QPAIR_ACTIVE) {
	if (!spdk_nvmf_qpair_is_active(&fc_conn->qpair)) {
		SPDK_ERRLOG("Connection %ld qpair state = %d not valid\n",
			    rqst_conn_id, fc_conn->qpair.state);
		return -EACCES;
+1 −1
Original line number Diff line number Diff line
@@ -1396,7 +1396,7 @@ spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair)
	}

	SPDK_DTRACE_PROBE2_TICKS(nvmf_qpair_disconnect, qpair, spdk_thread_get_id(group->thread));
	assert(qpair->state == SPDK_NVMF_QPAIR_ACTIVE);
	assert(spdk_nvmf_qpair_is_active(qpair));
	nvmf_qpair_set_state(qpair, SPDK_NVMF_QPAIR_DEACTIVATING);

	qpair_ctx = calloc(1, sizeof(struct nvmf_qpair_disconnect_ctx));
+5 −5
Original line number Diff line number Diff line
@@ -2052,7 +2052,7 @@ nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,

	/* If the queue pair is in an error state, force the request to the completed state
	 * to release resources. */
	if (spdk_unlikely(rqpair->ibv_in_error_state || rqpair->qpair.state != SPDK_NVMF_QPAIR_ACTIVE)) {
	if (spdk_unlikely(rqpair->ibv_in_error_state || !spdk_nvmf_qpair_is_active(&rqpair->qpair))) {
		switch (rdma_req->state) {
		case RDMA_REQUEST_STATE_NEED_BUFFER:
			STAILQ_REMOVE(&rgroup->group.pending_buf_queue, &rdma_req->req, spdk_nvmf_request, buf_link);
@@ -2093,7 +2093,7 @@ nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
			memset(rdma_req->req.rsp, 0, sizeof(*rdma_req->req.rsp));
			rdma_req->transfer_wr = &rdma_req->data.wr;

			if (spdk_unlikely(rqpair->ibv_in_error_state || rqpair->qpair.state != SPDK_NVMF_QPAIR_ACTIVE)) {
			if (spdk_unlikely(rqpair->ibv_in_error_state || !spdk_nvmf_qpair_is_active(&rqpair->qpair))) {
				rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
				break;
			}
@@ -4536,7 +4536,7 @@ _qp_reset_failed_sends(struct spdk_nvmf_rdma_transport *rtransport,
		prev_rdma_req = cur_rdma_req;
	}

	if (rqpair->qpair.state == SPDK_NVMF_QPAIR_ACTIVE) {
	if (spdk_nvmf_qpair_is_active(&rqpair->qpair)) {
		/* Disconnect the connection. */
		spdk_nvmf_qpair_disconnect(&rqpair->qpair);
	}
@@ -4753,7 +4753,7 @@ nvmf_rdma_poller_poll(struct spdk_nvmf_rdma_transport *rtransport,

			error = true;

			if (rqpair->qpair.state == SPDK_NVMF_QPAIR_ACTIVE) {
			if (spdk_nvmf_qpair_is_active(&rqpair->qpair)) {
				/* Disconnect the connection. */
				spdk_nvmf_qpair_disconnect(&rqpair->qpair);
			} else {
@@ -4764,7 +4764,7 @@ nvmf_rdma_poller_poll(struct spdk_nvmf_rdma_transport *rtransport,

		nvmf_rdma_qpair_process_pending(rtransport, rqpair, false);

		if (spdk_unlikely(rqpair->qpair.state != SPDK_NVMF_QPAIR_ACTIVE)) {
		if (spdk_unlikely(!spdk_nvmf_qpair_is_active(&rqpair->qpair))) {
			nvmf_rdma_destroy_drained_qpair(rqpair);
		}
	}
Loading