Commit 9ec9e130 authored by Jim Harris's avatar Jim Harris
Browse files

blob: track last md_page index correctly during resize



During resize, we correctly determine if we have enough
md_pages for new extent pages, before proceeding with
actually allocating clusters and associated extent
pages.

But during actual allocation, we were incrementing
the lfmd output parameter, which was incorrect.
Technically we should increment it any time
bs_allocate_cluster() allocated an md_page.  But
it's also fine to just not increment it at the
call site at all - worst case, we just check that
bit index again which isn't going to cause a
performance problem.

Also add a unit test that demonstrated the original
problem, and works fine with this patch.

Fixes issue #2932.

Signed-off-by: default avatarJim Harris <james.r.harris@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/17150

 (master)

(cherry picked from commit 7c3c0b66)
Change-Id: Iba177a66e880fb99363944ee44d3d060a44a03a4
Signed-off-by: default avatarKrzysztof Karas <krzysztof.karas@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/17702


Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarKonrad Sztyber <konrad.sztyber@intel.com>
parent 9353fe35
Loading
Loading
Loading
Loading
+6 −1
Original line number Diff line number Diff line
@@ -2070,7 +2070,12 @@ blob_resize(struct spdk_blob *blob, uint64_t sz)
		lfmd = 0;
		for (i = num_clusters; i < sz; i++) {
			bs_allocate_cluster(blob, i, &cluster, &lfmd, true);
			lfmd++;
			/* Do not increment lfmd here.  lfmd will get updated
			 * to the md_page allocated (if any) when a new extent
			 * page is needed.  Just pass that value again,
			 * bs_allocate_cluster will just start at that index
			 * to find the next free md_page when needed.
			 */
		}
	}

+43 −0
Original line number Diff line number Diff line
@@ -7485,6 +7485,48 @@ blob_nested_freezes(void)
	g_blobid = 0;
}

static void
blob_ext_md_pages(void)
{
	struct spdk_blob_store *bs;
	struct spdk_bs_dev *dev;
	struct spdk_blob *blob;
	struct spdk_blob_opts opts;
	struct spdk_bs_opts bs_opts;
	uint64_t free_clusters;

	dev = init_dev();
	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
	/* Issue #2932 was a bug in how we use bs_allocate_cluster() during resize.
	 * It requires num_md_pages that is much smaller than the number of clusters.
	 * Make sure we can create a blob that uses all of the free clusters.
	 */
	bs_opts.cluster_sz = 65536;
	bs_opts.num_md_pages = 16;

	/* Initialize a new blob store */
	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
	poll_threads();
	CU_ASSERT(g_bserrno == 0);
	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
	bs = g_bs;

	free_clusters = spdk_bs_free_cluster_count(bs);

	ut_spdk_blob_opts_init(&opts);
	opts.num_clusters = free_clusters;

	blob = ut_blob_create_and_open(bs, &opts);
	spdk_blob_close(blob, blob_op_complete, NULL);
	CU_ASSERT(g_bserrno == 0);

	spdk_bs_unload(bs, bs_op_complete, NULL);
	poll_threads();
	CU_ASSERT(g_bserrno == 0);
	g_bs = NULL;
}

static void
suite_bs_setup(void)
{
@@ -7662,6 +7704,7 @@ main(int argc, char **argv)
	CU_ADD_TEST(suite_bs, blob_decouple_snapshot);
	CU_ADD_TEST(suite_bs, blob_seek_io_unit);
	CU_ADD_TEST(suite_bs, blob_nested_freezes);
	CU_ADD_TEST(suite, blob_ext_md_pages);

	allocate_threads(2);
	set_thread(0);