Commit b3e1db32 authored by Shuhei Matsumoto's avatar Shuhei Matsumoto Committed by Tomasz Zawadzki
Browse files

nvmf/rdma: Ignore async_event if its qp_context is NULL



If initiator and target run on the same application, and initiator
uses SRQ, target may get async events for initiator, e.g.,
IBV_EVENT_QP_LAST_WQE_REACHED unexpectedly.

The reason is initiator and target may use the same device
simultaneously and only target polls async events.

Target sets attr.qp_context to rqpair when creating QP, but initiator
sets attr.qp_context to NULL when creating QP.

Hence one simple fix is to ignore async events whose qp_context is
NULL.

Signed-off-by: default avatarShuhei Matsumoto <smatsumoto@nvidia.com>
Change-Id: Id9ead1934f0b2ad1e18b174d2df2f1bf9853f7e1
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/14297


Reviewed-by: default avatarAleksey Marchuk <alexeymar@nvidia.com>
Reviewed-by: default avatarDong Yi <dongx.yi@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Mellanox Build Bot
parent 0e4b13dc
Loading
Loading
Loading
Loading
+48 −31
Original line number Diff line number Diff line
@@ -3239,7 +3239,23 @@ nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)

	switch (event.event_type) {
	case IBV_EVENT_QP_FATAL:
	case IBV_EVENT_QP_LAST_WQE_REACHED:
	case IBV_EVENT_SQ_DRAINED:
	case IBV_EVENT_QP_REQ_ERR:
	case IBV_EVENT_QP_ACCESS_ERR:
	case IBV_EVENT_COMM_EST:
	case IBV_EVENT_PATH_MIG:
	case IBV_EVENT_PATH_MIG_ERR:
		rqpair = event.element.qp->qp_context;
		if (!rqpair) {
			/* Any QP event for NVMe-RDMA initiator may be returned. */
			SPDK_NOTICELOG("Async QP event for unknown QP: %s\n",
				       ibv_event_type_str(event.event_type));
			break;
		}

		switch (event.event_type) {
		case IBV_EVENT_QP_FATAL:
			SPDK_ERRLOG("Fatal event received for rqpair %p\n", rqpair);
			spdk_trace_record(TRACE_RDMA_IBV_ASYNC_EVENT, 0, 0,
					  (uintptr_t)rqpair, event.event_type);
@@ -3248,7 +3264,6 @@ nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
			break;
		case IBV_EVENT_QP_LAST_WQE_REACHED:
			/* This event only occurs for shared receive queues. */
		rqpair = event.element.qp->qp_context;
			SPDK_DEBUGLOG(rdma, "Last WQE reached event received for rqpair %p\n", rqpair);
			rc = nvmf_rdma_send_qpair_async_event(rqpair, nvmf_rdma_handle_last_wqe_reached);
			if (rc) {
@@ -3259,7 +3274,6 @@ nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
		case IBV_EVENT_SQ_DRAINED:
			/* This event occurs frequently in both error and non-error states.
			 * Check if the qpair is in an error state before sending a message. */
		rqpair = event.element.qp->qp_context;
			SPDK_DEBUGLOG(rdma, "Last sq drained event received for rqpair %p\n", rqpair);
			spdk_trace_record(TRACE_RDMA_IBV_ASYNC_EVENT, 0, 0,
					  (uintptr_t)rqpair, event.event_type);
@@ -3272,13 +3286,16 @@ nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
		case IBV_EVENT_COMM_EST:
		case IBV_EVENT_PATH_MIG:
		case IBV_EVENT_PATH_MIG_ERR:
		SPDK_NOTICELOG("Async event: %s\n",
			SPDK_NOTICELOG("Async QP event: %s\n",
				       ibv_event_type_str(event.event_type));
		rqpair = event.element.qp->qp_context;
			spdk_trace_record(TRACE_RDMA_IBV_ASYNC_EVENT, 0, 0,
					  (uintptr_t)rqpair, event.event_type);
			nvmf_rdma_update_ibv_state(rqpair);
			break;
		default:
			break;
		}
		break;
	case IBV_EVENT_CQ_ERR:
	case IBV_EVENT_DEVICE_FATAL:
	case IBV_EVENT_PORT_ACTIVE: