Commit 846ca675 authored by Tomasz Zawadzki's avatar Tomasz Zawadzki
Browse files

lib/blob: remove requirement for md_page alignment



The md_page alignment is not really required for md_page
buffers.
Allocating 4k aligned buffers all the time, causes memory
to be heavily fragmented. Due to DPDK keeping track of the
allocation in the same DMA region as the allocation themselves.
Removing this alignment requirement will help DPDK when searching
for the right part of memory in the heap.

Signed-off-by: default avatarTomasz Zawadzki <tomasz.zawadzki@intel.com>
Reported-by: Mike Cui
Change-Id: If2f4ca2be38d432d5740f6145b5e0ff46237806b
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/3853


Community-CI: Mellanox Build Bot
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
Reviewed-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
parent 0e91da2f
Loading
Loading
Loading
Loading
+9 −12
Original line number Diff line number Diff line
@@ -815,14 +815,12 @@ blob_serialize_add_page(const struct spdk_blob *blob,
	if (*page_count == 0) {
		assert(*pages == NULL);
		*page_count = 1;
		*pages = spdk_malloc(SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE,
		*pages = spdk_malloc(SPDK_BS_PAGE_SIZE, 0,
				     NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
	} else {
		assert(*pages != NULL);
		(*page_count)++;
		*pages = spdk_realloc(*pages,
				      SPDK_BS_PAGE_SIZE * (*page_count),
				      SPDK_BS_PAGE_SIZE);
		*pages = spdk_realloc(*pages, SPDK_BS_PAGE_SIZE * (*page_count), 0);
	}

	if (*pages == NULL) {
@@ -1327,8 +1325,8 @@ blob_load_cpl_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)

	if (ctx->pages == NULL) {
		/* First iteration of this function, allocate buffer for single EXTENT_PAGE */
		ctx->pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE, NULL, SPDK_ENV_SOCKET_ID_ANY,
					  SPDK_MALLOC_DMA);
		ctx->pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0,
					  NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
		if (!ctx->pages) {
			blob_load_final(ctx, -ENOMEM);
			return;
@@ -1431,8 +1429,7 @@ blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)

		/* Read the next page */
		ctx->num_pages++;
		ctx->pages = spdk_realloc(ctx->pages, (sizeof(*page) * ctx->num_pages),
					  sizeof(*page));
		ctx->pages = spdk_realloc(ctx->pages, (sizeof(*page) * ctx->num_pages), 0);
		if (ctx->pages == NULL) {
			blob_load_final(ctx, -ENOMEM);
			return;
@@ -1500,7 +1497,7 @@ blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob,
	}

	ctx->blob = blob;
	ctx->pages = spdk_realloc(ctx->pages, SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE);
	ctx->pages = spdk_realloc(ctx->pages, SPDK_BS_PAGE_SIZE, 0);
	if (!ctx->pages) {
		free(ctx);
		cb_fn(seq, cb_arg, -ENOMEM);
@@ -3992,7 +3989,7 @@ bs_load_replay_extent_pages(struct spdk_bs_load_ctx *ctx)
	uint64_t lba;
	uint64_t i;

	ctx->extent_pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE * ctx->num_extent_pages, SPDK_BS_PAGE_SIZE,
	ctx->extent_pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE * ctx->num_extent_pages, 0,
					 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
	if (!ctx->extent_pages) {
		bs_load_ctx_fail(ctx, -ENOMEM);
@@ -4068,7 +4065,7 @@ bs_load_replay_md(struct spdk_bs_load_ctx *ctx)
{
	ctx->page_index = 0;
	ctx->cur_page = 0;
	ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE,
	ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0,
				 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
	if (!ctx->page) {
		bs_load_ctx_fail(ctx, -ENOMEM);
@@ -4479,7 +4476,7 @@ bs_dump_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
	fprintf(ctx->fp, "Metadata Length: %" PRIu32 "\n", ctx->super->md_len);

	ctx->cur_page = 0;
	ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE,
	ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0,
				 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
	if (!ctx->page) {
		bs_dump_finish(seq, ctx, -ENOMEM);