Commit cd1b7ab0 authored by liuqinfei's avatar liuqinfei Committed by Tomasz Zawadzki
Browse files

nvmf: balance the get optimal poll group



Fixes #issue 2636.

The existing allocation method (nvmf_rdma_get_optimal_poll_group())
is traversal and unperceived link disconnection. A more fair method
considering the number of real-time connections to allocate a poll
group is implemented.

Signed-off-by: default avatarliuqinfei <18138800392@163.com>
Signed-off-by: default avatarluo rixin <luorixin@huawei.com>
Change-Id: Ic1e6283e386dbb0dd6655bedebe26aeedb16c333
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/14002


Community-CI: Mellanox Build Bot
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarAleksey Marchuk <alexeymar@nvidia.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
parent 55bc243f
Loading
Loading
Loading
Loading
+24 −0
Original line number Diff line number Diff line
@@ -3508,7 +3508,31 @@ nvmf_rdma_get_optimal_poll_group(struct spdk_nvmf_qpair *qpair)
	if (qpair->qid == 0) {
		pg = &rtransport->conn_sched.next_admin_pg;
	} else {
		struct spdk_nvmf_rdma_poll_group *pg_min, *pg_start, *pg_current;
		uint32_t min_value;

		pg = &rtransport->conn_sched.next_io_pg;
		pg_min = *pg;
		pg_start = *pg;
		pg_current = *pg;
		min_value = (*pg)->group.group->stat.current_io_qpairs;

		while (pg_current->group.group->stat.current_io_qpairs) {
			pg_current = TAILQ_NEXT(pg_current, link);
			if (pg_current == NULL) {
				pg_current = TAILQ_FIRST(&rtransport->poll_groups);
			}

			if (pg_current->group.group->stat.current_io_qpairs < min_value) {
				min_value = pg_current->group.group->stat.current_io_qpairs;
				pg_min = pg_current;
			}

			if (pg_current == pg_start) {
				break;
			}
		}
		*pg = pg_min;
	}

	assert(*pg != NULL);
+2 −0
Original line number Diff line number Diff line
@@ -789,6 +789,7 @@ test_nvmf_rdma_get_optimal_poll_group(void)
	struct spdk_nvmf_transport_poll_group *groups[TEST_GROUPS_COUNT];
	struct spdk_nvmf_rdma_poll_group *rgroups[TEST_GROUPS_COUNT];
	struct spdk_nvmf_transport_poll_group *result;
	struct spdk_nvmf_poll_group group = {};
	uint32_t i;

	rqpair.qpair.transport = transport;
@@ -797,6 +798,7 @@ test_nvmf_rdma_get_optimal_poll_group(void)
	for (i = 0; i < TEST_GROUPS_COUNT; i++) {
		groups[i] = nvmf_rdma_poll_group_create(transport, NULL);
		CU_ASSERT(groups[i] != NULL);
		groups[i]->group = &group;
		rgroups[i] = SPDK_CONTAINEROF(groups[i], struct spdk_nvmf_rdma_poll_group, group);
		groups[i]->transport = transport;
	}