Commit 11c9b396 authored by Tomasz Zawadzki's avatar Tomasz Zawadzki Committed by Ben Walker
Browse files

scheduler_dynamic: move thread to least busy core



In cases when all cores are already doing too much work
to fit a thread, active threads should still be balanced
over all cores.

When current core is overloaded, place the thread
on another that is less busy.

The core limit is set to 95% to catch only ones that are
fully busy.
Decreasing that value would make spreading out the threads
move aggressive.

Changed thread load in one of the unit tests to reflect the
95% limit.

Signed-off-by: default avatarTomasz Zawadzki <tomasz.zawadzki@intel.com>
Change-Id: I3b3bc5f7fbd22725441fa811d61446950000cc46
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8113


Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarPaul Luse <paul.e.luse@intel.com>
Reviewed-by: default avatarKrzysztof Karas <krzysztof.karas@intel.com>
Reviewed-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-by: default avatarMaciej Szwed <maciej.szwed@intel.com>
Reviewed-by: default avatarKonrad Sztyber <konrad.sztyber@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
parent cf8405fc
Loading
Loading
Loading
Loading
+40 −0
Original line number Diff line number Diff line
@@ -54,6 +54,7 @@ static struct core_stats *g_cores;

#define SCHEDULER_THREAD_BUSY 100
#define SCHEDULER_LOAD_LIMIT 50
#define SCHEDULER_CORE_LIMIT 95

static uint32_t
_get_next_target_core(void)
@@ -126,6 +127,33 @@ _move_thread(struct spdk_lw_thread *lw_thread, uint32_t dst_core)
	lw_thread->lcore = dst_core;
}

static bool
_is_core_over_limit(uint32_t core_id)
{
	struct core_stats *core = &g_cores[core_id];
	uint64_t busy, idle;

	/* Core with no or single thread cannot be over the limit. */
	if (core->thread_count <= 1) {
		return false;
	}

	busy = core->busy;
	idle = core->idle;

	/* No work was done, exit before possible division by 0. */
	if (busy == 0) {
		return false;
	}

	/* Work done was less than the limit */
	if (busy * 100 / (busy + idle) < SCHEDULER_CORE_LIMIT) {
		return false;
	}

	return true;
}

static bool
_can_core_fit_thread(struct spdk_lw_thread *lw_thread, uint32_t dst_core)
{
@@ -159,6 +187,7 @@ _find_optimal_core(struct spdk_lw_thread *lw_thread)
	uint32_t i;
	uint32_t target_lcore;
	uint32_t current_lcore = lw_thread->lcore;
	uint32_t least_busy_lcore = lw_thread->lcore;
	struct spdk_thread *thread = spdk_thread_get_from_ctx(lw_thread);
	struct spdk_cpuset *cpumask = spdk_thread_get_cpumask(thread);

@@ -171,6 +200,11 @@ _find_optimal_core(struct spdk_lw_thread *lw_thread)
			continue;
		}

		/* Search for least busy core. */
		if (g_cores[target_lcore].busy < g_cores[least_busy_lcore].busy) {
			least_busy_lcore = target_lcore;
		}

		/* Skip cores that cannot fit the thread and current one. */
		if (!_can_core_fit_thread(lw_thread, target_lcore) || target_lcore == current_lcore) {
			continue;
@@ -179,6 +213,12 @@ _find_optimal_core(struct spdk_lw_thread *lw_thread)
		return target_lcore;
	}

	/* For cores over the limit, place the thread on least busy core
	 * to balance threads. */
	if (_is_core_over_limit(current_lcore)) {
		return least_busy_lcore;
	}

	/* If no better core is found, remain on the same one. */
	return current_lcore;
}
+1 −1
Original line number Diff line number Diff line
@@ -894,7 +894,7 @@ test_governor(void)
	/* TEST 3 */
	/* Make second thread very busy so that it will be moved to second core */
	spdk_set_thread(thread[1]);
	busy = spdk_poller_register(poller_run_busy, (void *)1000, 0);
	busy = spdk_poller_register(poller_run_busy, (void *)2000, 0);
	_reactor_run(reactor);
	spdk_poller_unregister(&busy);