Commit 58f43df1 authored by Alexey Marchuk's avatar Alexey Marchuk Committed by Tomasz Zawadzki
Browse files

nvmf/rdma: Handle several ibv events in a row



Currently rdma acceptor handles only one ibv event per poll
Taking into account the default acceptor poll rate (10ms), it can
take a long time to handle e.g. LAST_WQE_REACHED events when we
close huge amount of qpairs at the same time.
This patch allows to handle up to 32 ibv events per acceptor poll.

Change-Id: Ic2884dfc5b54c6aec0655aaa547b491a9934a386
Signed-off-by: default avatarAlexey Marchuk <alexeymar@mellanox.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/3821


Community-CI: Mellanox Build Bot
Community-CI: Broadcom CI
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
parent 8e43a261
Loading
Loading
Loading
Loading
+22 −5
Original line number Diff line number Diff line
@@ -3050,7 +3050,7 @@ nvmf_rdma_send_qpair_async_event(struct spdk_nvmf_rdma_qpair *rqpair,
	return rc;
}

static void
static int
nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
{
	int				rc;
@@ -3060,9 +3060,8 @@ nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
	rc = ibv_get_async_event(device->context, &event);

	if (rc) {
		SPDK_ERRLOG("Failed to get async_event (%d): %s\n",
			    errno, spdk_strerror(errno));
		return;
		/* In non-blocking mode -1 means there are no events available */
		return rc;
	}

	switch (event.event_type) {
@@ -3125,6 +3124,24 @@ nvmf_process_ib_event(struct spdk_nvmf_rdma_device *device)
		break;
	}
	ibv_ack_async_event(&event);

	return 0;
}

static void
nvmf_process_ib_events(struct spdk_nvmf_rdma_device *device, uint32_t max_events)
{
	int rc = 0;
	uint32_t i = 0;

	for (i = 0; i < max_events; i++) {
		rc = nvmf_process_ib_event(device);
		if (rc) {
			break;
		}
	}

	SPDK_DEBUGLOG(SPDK_LOG_RDMA, "Device %s: %u events processed\n", device->context->device->name, i);
}

static uint32_t
@@ -3155,7 +3172,7 @@ nvmf_rdma_accept(struct spdk_nvmf_transport *transport)
	/* Second and subsequent poll descriptors are IB async events */
	TAILQ_FOREACH_SAFE(device, &rtransport->devices, link, tmp) {
		if (rtransport->poll_fds[i++].revents & POLLIN) {
			nvmf_process_ib_event(device);
			nvmf_process_ib_events(device, 32);
			nfds--;
		}
	}