Commit fa01f996 authored by Seth Howell's avatar Seth Howell Committed by Tomasz Zawadzki
Browse files

nvmf/rdma: disconnect qpair from ibv_event ctx



This call can be made directly now that
spdk_nvmf_qpair_disconnect is thread safe. It's
actually better that we do it this way, because
the qp destruct call is guaranteed to block until
the ib events associated with it are acknowledged.

this means that by processing the disconnect before
we ack the event, we will have valid memory to do
the atomic checks.

Signed-off-by: default avatarSeth Howell <seth.howell@intel.com>
Change-Id: If6882b7dc568fe4c35f4a35375769634326e9d76
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/3681


Community-CI: Mellanox Build Bot
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-by: default avatarChangpeng Liu <changpeng.liu@intel.com>
Reviewed-by: default avatarAleksey Marchuk <alexeymar@mellanox.com>
parent 86a6ac99
Loading
Loading
Loading
Loading
+3 −23
Original line number Diff line number Diff line
@@ -3000,13 +3000,6 @@ nvmf_process_cm_event(struct spdk_nvmf_transport *transport)
	}
}

static void
nvmf_rdma_handle_qp_fatal(struct spdk_nvmf_rdma_qpair *rqpair)
{
	nvmf_rdma_update_ibv_state(rqpair);
	spdk_nvmf_qpair_disconnect(&rqpair->qpair, NULL, NULL);
}

static void
nvmf_rdma_handle_last_wqe_reached(struct spdk_nvmf_rdma_qpair *rqpair)
{
@@ -3014,12 +3007,6 @@ nvmf_rdma_handle_last_wqe_reached(struct spdk_nvmf_rdma_qpair *rqpair)
	nvmf_rdma_destroy_drained_qpair(rqpair);
}

static void
nvmf_rdma_handle_sq_drained(struct spdk_nvmf_rdma_qpair *rqpair)
{
	spdk_nvmf_qpair_disconnect(&rqpair->qpair, NULL, NULL);
}

static void
nvmf_rdma_qpair_process_ibv_event(void *ctx)
{
@@ -3092,11 +3079,8 @@ nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
		SPDK_ERRLOG("Fatal event received for rqpair %p\n", rqpair);
		spdk_trace_record(TRACE_RDMA_IBV_ASYNC_EVENT, 0, 0,
				  (uintptr_t)rqpair->cm_id, event.event_type);
		rc = nvmf_rdma_send_qpair_async_event(rqpair, nvmf_rdma_handle_qp_fatal);
		if (rc) {
			SPDK_WARNLOG("Failed to send QP_FATAL event. rqpair %p, err %d\n", rqpair, rc);
			nvmf_rdma_handle_qp_fatal(rqpair);
		}
		nvmf_rdma_update_ibv_state(rqpair);
		spdk_nvmf_qpair_disconnect(&rqpair->qpair, NULL, NULL);
		break;
	case IBV_EVENT_QP_LAST_WQE_REACHED:
		/* This event only occurs for shared receive queues. */
@@ -3116,11 +3100,7 @@ nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
		spdk_trace_record(TRACE_RDMA_IBV_ASYNC_EVENT, 0, 0,
				  (uintptr_t)rqpair->cm_id, event.event_type);
		if (nvmf_rdma_update_ibv_state(rqpair) == IBV_QPS_ERR) {
			rc = nvmf_rdma_send_qpair_async_event(rqpair, nvmf_rdma_handle_sq_drained);
			if (rc) {
				SPDK_WARNLOG("Failed to send SQ_DRAINED event. rqpair %p, err %d\n", rqpair, rc);
				nvmf_rdma_handle_sq_drained(rqpair);
			}
			spdk_nvmf_qpair_disconnect(&rqpair->qpair, NULL, NULL);
		}
		break;
	case IBV_EVENT_QP_REQ_ERR: