Commit a9fc7e1d authored by Seth Howell's avatar Seth Howell Committed by Ben Walker
Browse files

rdma: use LAST_WQE_REACHED event in the SRQ path



This event is generated by NICs utilizing the SRQ feature when the last
RECV for that qpair is processed. I have confirmed this feature.

Change-Id: Ib6d6b6d02987f789b4d5dd3daf734e3351ee1974
Signed-off-by: default avatarSeth Howell <seth.howell@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/448063


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
Reviewed-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
parent add76a35
Loading
Loading
Loading
Loading
+28 −6
Original line number Diff line number Diff line
@@ -368,6 +368,8 @@ struct spdk_nvmf_rdma_qpair {
	 * that we only initialize one of these paths.
	 */
	bool					disconnect_started;
	/* Lets us know that we have received the last_wqe event. */
	bool					last_wqe_reached;
};

struct spdk_nvmf_rdma_poller {
@@ -2332,12 +2334,22 @@ static void nvmf_rdma_destroy_drained_qpair(void *ctx)
	struct spdk_nvmf_rdma_transport *rtransport = SPDK_CONTAINEROF(rqpair->qpair.transport,
			struct spdk_nvmf_rdma_transport, transport);

	if (rqpair->current_send_depth == 0 && rqpair->current_recv_depth == rqpair->max_queue_depth) {
		/* The qpair has been drained. Free the resources. */
	/* In non SRQ path, we will reach rqpair->max_queue_depth. In SRQ path, we will get the last_wqe event. */
	if (rqpair->current_send_depth != 0) {
		return;
	}

	if (rqpair->srq == NULL && rqpair->current_recv_depth != rqpair->max_queue_depth) {
		return;
	}

	if (rqpair->srq != NULL && rqpair->last_wqe_reached == false) {
		return;
	}

	spdk_nvmf_rdma_qpair_process_pending(rtransport, rqpair, true);
	spdk_nvmf_rdma_qpair_destroy(rqpair);
}
}


static int
@@ -2473,7 +2485,7 @@ static void
spdk_nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
{
	int				rc;
	struct spdk_nvmf_rdma_qpair	*rqpair;
	struct spdk_nvmf_rdma_qpair	*rqpair = NULL;
	struct ibv_async_event		event;
	enum ibv_qp_state		state;

@@ -2497,7 +2509,17 @@ spdk_nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
		spdk_nvmf_rdma_start_disconnect(rqpair);
		break;
	case IBV_EVENT_QP_LAST_WQE_REACHED:
		/* This event only occurs for shared receive queues, which are not currently supported. */
		/* This event only occurs for shared receive queues. */
		rqpair = event.element.qp->qp_context;
		rqpair->last_wqe_reached = true;

		/* This must be handled on the polling thread if it exists. Otherwise the timeout will catch it. */
		if (rqpair->qpair.group) {
			spdk_thread_send_msg(rqpair->qpair.group->thread, nvmf_rdma_destroy_drained_qpair, rqpair);
		} else {
			SPDK_ERRLOG("Unable to destroy the qpair %p since it does not have a poll group.\n", rqpair);
		}

		break;
	case IBV_EVENT_SQ_DRAINED:
		/* This event occurs frequently in both error and non-error states.