Commit a9b9f095 authored by Ben Walker's avatar Ben Walker Committed by Jim Harris
Browse files

nvmf/rdma: Don't trigger error recovery on IBV_EVENT_SQ_DRAINED



IBV_EVENT_SQ_DRAINED can occur during both error recovery and
normal operation. We don't want to spend time sending a message
to the correct qpair thread and then attempting to abort
all I/O in the case where this wasn't triggered by an error.

The case where this occurs during an error is very rare and
only in response to a user forcing the state to err from the
sqd state. For now, don't handle that case at all. Handle that
corner case in a later patch.

Change-Id: I16462ca52739b68f6b52a963f7344e12f7f48a55
Signed-off-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.gerrithub.io/420936


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
Reviewed-by: default avatarChangpeng Liu <changpeng.liu@intel.com>
Reviewed-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: default avatarSeth Howell <seth.howell5141@gmail.com>
parent 13a887f1
Loading
Loading
Loading
Loading
+6 −39
Original line number Diff line number Diff line
@@ -2050,25 +2050,6 @@ spdk_nvmf_rdma_qp_drained(struct spdk_nvmf_rdma_qpair *rqpair)
	}
}

static void
_spdk_nvmf_rdma_sq_drained(void *cb_arg)
{
	spdk_nvmf_rdma_qp_drained(cb_arg);
}

static void
_spdk_nvmf_rdma_qp_last_wqe(void *cb_arg)
{
	struct spdk_nvmf_rdma_qpair *rqpair = cb_arg;

	if (rqpair->qpair.state != SPDK_NVMF_QPAIR_ERROR) {
		SPDK_ERRLOG("QP#%u is not in ERROR state, dropping LAST_WQE event...\n",
			    rqpair->qpair.qid);
		return;
	}
	spdk_nvmf_rdma_qp_drained(rqpair);
}

static void
_spdk_nvmf_rdma_qp_error(void *arg)
{
@@ -2076,17 +2057,8 @@ _spdk_nvmf_rdma_qp_error(void *arg)

	rqpair->qpair.state = SPDK_NVMF_QPAIR_ERROR;

	if (spdk_nvmf_rdma_qpair_is_idle(&rqpair->qpair)) {
		/* There are no outstanding requests */
	spdk_nvmf_rdma_qp_drained(rqpair);
}
}

static struct spdk_nvmf_rdma_qpair *
spdk_nvmf_rqpair_from_qp(struct ibv_qp *qp)
{
	return qp->qp_context;
}

static void
spdk_nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
@@ -2106,19 +2078,14 @@ spdk_nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
	SPDK_NOTICELOG("Async event: %s\n",
		       ibv_event_type_str(event.event_type));

	rqpair = event.element.qp->qp_context;

	switch (event.event_type) {
	case IBV_EVENT_QP_FATAL:
		rqpair = spdk_nvmf_rqpair_from_qp(event.element.qp);
	case IBV_EVENT_QP_LAST_WQE_REACHED:
		spdk_thread_send_msg(rqpair->qpair.group->thread, _spdk_nvmf_rdma_qp_error, rqpair);
		break;
	case IBV_EVENT_SQ_DRAINED:
		rqpair = spdk_nvmf_rqpair_from_qp(event.element.qp);
		spdk_thread_send_msg(rqpair->qpair.group->thread, _spdk_nvmf_rdma_sq_drained, rqpair);
		break;
	case IBV_EVENT_QP_LAST_WQE_REACHED:
		rqpair = spdk_nvmf_rqpair_from_qp(event.element.qp);
		spdk_thread_send_msg(rqpair->qpair.group->thread, _spdk_nvmf_rdma_qp_last_wqe, rqpair);
		break;
	case IBV_EVENT_CQ_ERR:
	case IBV_EVENT_QP_REQ_ERR:
	case IBV_EVENT_QP_ACCESS_ERR: