Commit e979d56f authored by Jim Harris's avatar Jim Harris
Browse files

env_dpdk: retry failed numa-id specific allocations



We want to start allocating memory local to NUMA nodes where
possible, without breaking setups that may not have hugepages allocated
for each NUMA node.

So when a node_id is actually specified (i.e. not SOCKET_ID_ANY),
and the allocation fails, try it again but with SOCKET_ID_ANY.

(Note: using SOCKET_ID_ANY in this file instead of
SPDK_ENV_NUMA_ID_ANY, since the former is the DPDK definition.)

Later we will add a patch that enforces the NUMA node when specified,
to help users who definitely want NUMA optimizations so that these
setups can be ensured to have memory allocated per node. Note that
setup.sh has already been updated to allocate memory evenly across
NUMA nodes by default (originally it would only allocate from node 0
by default).

Signed-off-by: default avatarJim Harris <jim.harris@samsung.com>
Change-Id: I6b852532ac93230de6a91d3eb829448fbd903e8b
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/24578


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarKonrad Sztyber <konrad.sztyber@intel.com>
Reviewed-by: default avatarBen Walker <ben@nvidia.com>
Community-CI: Mellanox Build Bot
parent dc2dd173
Loading
Loading
Loading
Loading
+28 −3
Original line number Diff line number Diff line
@@ -27,23 +27,35 @@ SPDK_STATIC_ASSERT(SOCKET_ID_ANY == SPDK_ENV_NUMA_ID_ANY, "SOCKET_ID_ANY mismatc
void *
spdk_malloc(size_t size, size_t align, uint64_t *unused, int numa_id, uint32_t flags)
{
	void *buf;

	if (flags == 0 || unused != NULL) {
		return NULL;
	}

	align = spdk_max(align, RTE_CACHE_LINE_SIZE);
	return rte_malloc_socket(NULL, size, align, numa_id);
	buf = rte_malloc_socket(NULL, size, align, numa_id);
	if (buf == NULL && numa_id != SOCKET_ID_ANY) {
		buf = rte_malloc_socket(NULL, size, align, SOCKET_ID_ANY);
	}
	return buf;
}

void *
spdk_zmalloc(size_t size, size_t align, uint64_t *unused, int numa_id, uint32_t flags)
{
	void *buf;

	if (flags == 0 || unused != NULL) {
		return NULL;
	}

	align = spdk_max(align, RTE_CACHE_LINE_SIZE);
	return rte_zmalloc_socket(NULL, size, align, numa_id);
	buf = rte_zmalloc_socket(NULL, size, align, numa_id);
	if (buf == NULL && numa_id != SOCKET_ID_ANY) {
		buf = rte_zmalloc_socket(NULL, size, align, SOCKET_ID_ANY);
	}
	return buf;
}

void *
@@ -115,6 +127,9 @@ spdk_memzone_reserve_aligned(const char *name, size_t len, int numa_id,
	}

	mz = rte_memzone_reserve_aligned(name, len, numa_id, dpdk_flags, align);
	if (mz == NULL && numa_id != SOCKET_ID_ANY) {
		mz = rte_memzone_reserve_aligned(name, len, SOCKET_ID_ANY, dpdk_flags, align);
	}

	if (mz != NULL) {
		memset(mz->addr, 0, len);
@@ -186,6 +201,11 @@ spdk_mempool_create_ctor(const char *name, size_t count,
	mp = rte_mempool_create(name, count, ele_size, cache_size,
				0, NULL, NULL, (rte_mempool_obj_cb_t *)obj_init, obj_init_arg,
				numa_id, 0);
	if (mp == NULL && numa_id != SOCKET_ID_ANY) {
		mp = rte_mempool_create(name, count, ele_size, cache_size,
					0, NULL, NULL, (rte_mempool_obj_cb_t *)obj_init, obj_init_arg,
					SOCKET_ID_ANY, 0);
	}

	return (struct spdk_mempool *)mp;
}
@@ -373,6 +393,7 @@ spdk_ring_create(enum spdk_ring_type type, size_t count, int numa_id)
	char ring_name[64];
	static uint32_t ring_num = 0;
	unsigned flags = RING_F_EXACT_SZ;
	struct rte_ring *ring;

	switch (type) {
	case SPDK_RING_TYPE_SP_SC:
@@ -391,7 +412,11 @@ spdk_ring_create(enum spdk_ring_type type, size_t count, int numa_id)
	snprintf(ring_name, sizeof(ring_name), "ring_%u_%d",
		 __atomic_fetch_add(&ring_num, 1, __ATOMIC_RELAXED), getpid());

	return (struct spdk_ring *)rte_ring_create(ring_name, count, numa_id, flags);
	ring = rte_ring_create(ring_name, count, numa_id, flags);
	if (ring == NULL && numa_id != SOCKET_ID_ANY) {
		ring = rte_ring_create(ring_name, count, SOCKET_ID_ANY, flags);
	}
	return (struct spdk_ring *)ring;
}

void