Commit 2dbbbbd8 authored by Damiano Cipriani's avatar Damiano Cipriani Committed by Tomasz Zawadzki
Browse files

blob: keep count of allocated clusters number



A new variable has been added in spdk_blob_mut_data to store the
number of allocated clusters in clusters array. Also a new method
to get this value has been added.

Change-Id: Ibac9344bcf7e4c5e6e12cf78a6eae0a4d6755acb
Signed-off-by: default avatarDamiano Cipriani <damiano.cipriani@suse.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/19712


Reviewed-by: default avatarJim Harris <jim.harris@samsung.com>
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarTomasz Zawadzki <tomasz.zawadzki@intel.com>
Community-CI: Mellanox Build Bot
parent 1666fcfc
Loading
Loading
Loading
Loading
+15 −1
Original line number Diff line number Diff line
@@ -495,7 +495,9 @@ uint64_t spdk_blob_get_num_pages(struct spdk_blob *blob);
uint64_t spdk_blob_get_num_io_units(struct spdk_blob *blob);

/**
 * Get the number of clusters allocated to the blob.
 * Get the number of clusters in the blob.
 *
 * This value represents the size of the blob in number of clusters.
 *
 * \param blob Blob struct to query.
 *
@@ -503,6 +505,18 @@ uint64_t spdk_blob_get_num_io_units(struct spdk_blob *blob);
 */
uint64_t spdk_blob_get_num_clusters(struct spdk_blob *blob);

/**
 * Get the number of allocated clusters to the blob.
 *
 * In case of a thin-provisioned blob, this value is less than or equal
 * to the number of clusters in the blob, otherwise they are equal.
 *
 * \param blob Blob struct to query.
 *
 * \return the number of clusters actually allocated to the blob.
 */
uint64_t spdk_blob_get_num_allocated_clusters(struct spdk_blob *blob);

/**
 * Get next allocated io_unit
 *
+36 −0
Original line number Diff line number Diff line
@@ -164,6 +164,8 @@ blob_insert_cluster(struct spdk_blob *blob, uint32_t cluster_num, uint64_t clust
	}

	*cluster_lba = bs_cluster_to_lba(blob->bs, cluster);
	blob->active.num_allocated_clusters++;

	return 0;
}

@@ -547,6 +549,7 @@ blob_mark_clean(struct spdk_blob *blob)
	blob->clean.extent_pages = blob->active.extent_pages;
	blob->clean.num_clusters = blob->active.num_clusters;
	blob->clean.clusters = blob->active.clusters;
	blob->clean.num_allocated_clusters = blob->active.num_allocated_clusters;
	blob->clean.num_pages = blob->active.num_pages;
	blob->clean.pages = blob->active.pages;

@@ -701,6 +704,7 @@ blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob)
					if (desc_extent_rle->extents[i].cluster_idx != 0) {
						blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs,
								desc_extent_rle->extents[i].cluster_idx + j);
						blob->active.num_allocated_clusters++;
					} else if (spdk_blob_is_thin_provisioned(blob)) {
						blob->active.clusters[blob->active.num_clusters++] = 0;
					} else {
@@ -818,6 +822,7 @@ blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob)
				if (desc_extent->cluster_idx[i] != 0) {
					blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs,
							desc_extent->cluster_idx[i]);
					blob->active.num_allocated_clusters++;
				} else if (spdk_blob_is_thin_provisioned(blob)) {
					blob->active.clusters[blob->active.num_clusters++] = 0;
				} else {
@@ -2254,6 +2259,13 @@ blob_resize(struct spdk_blob *blob, uint64_t sz)
		}
	}

	/* If we are shrinking the blob, we must adjust num_allocated_clusters */
	for (i = sz; i < num_clusters; i++) {
		if (blob->active.clusters[i] != 0) {
			blob->active.num_allocated_clusters--;
		}
	}

	blob->active.num_clusters = sz;
	blob->active.num_extent_pages = new_num_ep;

@@ -4092,6 +4104,8 @@ bs_delete_corrupted_blob(void *cb_arg, int bserrno)
		ctx->blob->active.extent_pages[i] = 0;
	}

	ctx->blob->active.num_allocated_clusters = 0;

	ctx->blob->md_ro = false;

	blob_set_thin_provision(ctx->blob);
@@ -6032,6 +6046,14 @@ spdk_blob_get_num_clusters(struct spdk_blob *blob)
	return blob->active.num_clusters;
}

uint64_t
spdk_blob_get_num_allocated_clusters(struct spdk_blob *blob)
{
	assert(blob != NULL);

	return blob->active.num_allocated_clusters;
}

static uint64_t
blob_find_io_unit(struct spdk_blob *blob, uint64_t offset, bool is_allocated)
{
@@ -6401,12 +6423,17 @@ static void
bs_snapshot_swap_cluster_maps(struct spdk_blob *blob1, struct spdk_blob *blob2)
{
	uint64_t *cluster_temp;
	uint64_t num_allocated_clusters_temp;
	uint32_t *extent_page_temp;

	cluster_temp = blob1->active.clusters;
	blob1->active.clusters = blob2->active.clusters;
	blob2->active.clusters = cluster_temp;

	num_allocated_clusters_temp = blob1->active.num_allocated_clusters;
	blob1->active.num_allocated_clusters = blob2->active.num_allocated_clusters;
	blob2->active.num_allocated_clusters = num_allocated_clusters_temp;

	extent_page_temp = blob1->active.extent_pages;
	blob1->active.extent_pages = blob2->active.extent_pages;
	blob2->active.extent_pages = extent_page_temp;
@@ -7598,6 +7625,9 @@ delete_snapshot_sync_clone_cpl(void *cb_arg, int bserrno)
	/* Clear cluster map entries for snapshot */
	for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) {
		if (ctx->clone->active.clusters[i] == ctx->snapshot->active.clusters[i]) {
			if (ctx->snapshot->active.clusters[i] != 0) {
				ctx->snapshot->active.num_allocated_clusters--;
			}
			ctx->snapshot->active.clusters[i] = 0;
		}
	}
@@ -7719,6 +7749,9 @@ delete_snapshot_sync_snapshot_xattr_cpl(void *cb_arg, int bserrno)
	for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) {
		if (ctx->clone->active.clusters[i] == 0) {
			ctx->clone->active.clusters[i] = ctx->snapshot->active.clusters[i];
			if (ctx->clone->active.clusters[i] != 0) {
				ctx->clone->active.num_allocated_clusters++;
			}
		}
	}
	ctx->next_extent_page = 0;
@@ -8430,6 +8463,9 @@ blob_free_cluster_msg(void *arg)

	ctx->cluster = ctx->blob->active.clusters[ctx->cluster_num];
	ctx->blob->active.clusters[ctx->cluster_num] = 0;
	if (ctx->cluster != 0) {
		ctx->blob->active.num_allocated_clusters--;
	}

	if (ctx->blob->use_extent_table == false) {
		/* Extent table is not used, proceed with sync of md that will only use extents_rle. */
+3 −0
Original line number Diff line number Diff line
@@ -52,6 +52,9 @@ struct spdk_blob_mut_data {
	 */
	size_t		cluster_array_size;

	/* The number of allocated clusters in the clusters array */
	uint64_t	num_allocated_clusters;

	/* Number of extent pages */
	uint64_t	num_extent_pages;

+1 −0
Original line number Diff line number Diff line
@@ -21,6 +21,7 @@
	spdk_blob_get_num_pages;
	spdk_blob_get_num_io_units;
	spdk_blob_get_num_clusters;
	spdk_blob_get_num_allocated_clusters;
	spdk_blob_get_next_allocated_io_unit;
	spdk_blob_get_next_unallocated_io_unit;
	spdk_blob_opts_init;
+132 −0
Original line number Diff line number Diff line
@@ -407,6 +407,7 @@ blob_create(void)
	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
	blob = g_blob;
	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);

	spdk_blob_close(blob, blob_op_complete, NULL);
	poll_threads();
@@ -429,6 +430,7 @@ blob_create(void)
	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
	blob = g_blob;
	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);

	spdk_blob_close(blob, blob_op_complete, NULL);
	poll_threads();
@@ -448,6 +450,7 @@ blob_create(void)
	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
	blob = g_blob;
	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);

	spdk_blob_close(blob, blob_op_complete, NULL);
	poll_threads();
@@ -700,6 +703,7 @@ blob_thin_provision(void)
	blob = ut_blob_create_and_open(bs, &opts);
	blobid = spdk_blob_get_id(blob);
	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
	/* In thin provisioning with num_clusters is set, if not using the
	 * extent table, there is no allocation. If extent table is used,
	 * there is related allocation happened. */
@@ -726,6 +730,7 @@ blob_thin_provision(void)
	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
	blob = g_blob;
	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);

	ut_blob_close_and_delete(bs, blob);

@@ -778,8 +783,10 @@ blob_snapshot(void)
	CU_ASSERT(snapshot->data_ro == true);
	CU_ASSERT(snapshot->md_ro == true);
	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot) == 10);

	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
	CU_ASSERT(spdk_mem_all_zero(blob->active.clusters,
				    blob->active.num_clusters * sizeof(blob->active.clusters[0])));
@@ -803,6 +810,7 @@ blob_snapshot(void)
	CU_ASSERT(snapshot2->data_ro == true);
	CU_ASSERT(snapshot2->md_ro == true);
	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 10);
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot2) == 0);

	/* Confirm that blob is backed by snapshot2 and snapshot2 is backed by snapshot */
	CU_ASSERT(snapshot->back_bs_dev == NULL);
@@ -1002,6 +1010,7 @@ blob_clone(void)
	CU_ASSERT(clone->data_ro == false);
	CU_ASSERT(clone->md_ro == false);
	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(clone) == 0);

	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[0], &value, &value_len);
	CU_ASSERT(rc == 0);
@@ -1081,6 +1090,7 @@ _blob_inflate(bool decouple_parent)
	blobid = spdk_blob_get_id(blob);
	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);

	/* 1) Blob with no parent */
	if (decouple_parent) {
@@ -1094,6 +1104,7 @@ _blob_inflate(bool decouple_parent)
		poll_threads();
		CU_ASSERT(g_bserrno == 0);
		CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == false);
		CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
	}

	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
@@ -1128,6 +1139,7 @@ _blob_inflate(bool decouple_parent)
		CU_ASSERT(g_bserrno == 0);
		/* all 10 clusters should be allocated */
		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 10);
		CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
	} else {
		/* Decouple parent of blob */
		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
@@ -1135,6 +1147,7 @@ _blob_inflate(bool decouple_parent)
		CU_ASSERT(g_bserrno == 0);
		/* when only parent is removed, none of the clusters should be allocated */
		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters);
		CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
	}

	/* Now, it should be possible to delete snapshot */
@@ -1194,6 +1207,7 @@ blob_resize_test(void)

	blob = ut_blob_create_and_open(bs, NULL);
	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);

	/* Confirm that resize fails if blob is marked read-only. */
	blob->md_ro = true;
@@ -1207,6 +1221,7 @@ blob_resize_test(void)
	poll_threads();
	CU_ASSERT(g_bserrno == 0);
	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 5);

	/* Shrink the blob to 3 clusters. This will not actually release
	 * the old clusters until the blob is synced.
@@ -1216,6 +1231,7 @@ blob_resize_test(void)
	CU_ASSERT(g_bserrno == 0);
	/* Verify there are still 5 clusters in use */
	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 3);

	spdk_blob_sync_md(blob, blob_op_complete, NULL);
	poll_threads();
@@ -1228,6 +1244,7 @@ blob_resize_test(void)
	poll_threads();
	CU_ASSERT(g_bserrno == 0);
	CU_ASSERT((free_clusters - 10) == spdk_bs_free_cluster_count(bs));
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);

	/* Try to resize the blob to size larger than blobstore. */
	spdk_blob_resize(blob, bs->total_clusters + 1, blob_op_complete, NULL);
@@ -1237,6 +1254,88 @@ blob_resize_test(void)
	ut_blob_close_and_delete(bs, blob);
}

static void
blob_resize_thin_test(void)
{
	struct spdk_blob_store *bs = g_bs;
	struct spdk_blob *blob;
	struct spdk_blob_opts opts;
	struct spdk_io_channel *blob_ch;
	uint64_t free_clusters;
	uint64_t io_units_per_cluster;
	uint64_t offset;
	uint8_t buf1[DEV_BUFFER_BLOCKLEN];

	free_clusters = spdk_bs_free_cluster_count(bs);

	blob_ch = spdk_bs_alloc_io_channel(bs);
	SPDK_CU_ASSERT_FATAL(blob_ch != NULL);

	/* Create blob with thin provisioning enabled */
	ut_spdk_blob_opts_init(&opts);
	opts.thin_provision = true;
	opts.num_clusters = 0;

	blob = ut_blob_create_and_open(bs, &opts);
	CU_ASSERT((free_clusters) == spdk_bs_free_cluster_count(bs));
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
	io_units_per_cluster = bs_io_units_per_cluster(blob);

	/* The blob started at 0 clusters. Resize it to be 6. */
	spdk_blob_resize(blob, 6, blob_op_complete, NULL);
	poll_threads();
	CU_ASSERT(g_bserrno == 0);
	CU_ASSERT((free_clusters) == spdk_bs_free_cluster_count(bs));
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);

	/* Write on cluster 0,2,4 and 5 of blob */
	for (offset = 0; offset < io_units_per_cluster; offset++) {
		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
		poll_threads();
		CU_ASSERT(g_bserrno == 0);
	}
	for (offset = 2 * io_units_per_cluster; offset < 3 * io_units_per_cluster; offset++) {
		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
		poll_threads();
		CU_ASSERT(g_bserrno == 0);
	}
	for (offset = 4 * io_units_per_cluster; offset < 5 * io_units_per_cluster; offset++) {
		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
		poll_threads();
		CU_ASSERT(g_bserrno == 0);
	}
	for (offset = 5 * io_units_per_cluster; offset < 6 * io_units_per_cluster; offset++) {
		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
		poll_threads();
		CU_ASSERT(g_bserrno == 0);
	}

	/* Check allocated clusters after write */
	CU_ASSERT((free_clusters - 4) == spdk_bs_free_cluster_count(bs));
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 4);

	/* Shrink the blob to 2 clusters. This will not actually release
	 * the old clusters until the blob is synced.
	 */
	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
	poll_threads();
	CU_ASSERT(g_bserrno == 0);
	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 2);
	CU_ASSERT((free_clusters - 4) == spdk_bs_free_cluster_count(bs));
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 1);

	/* Sync blob: 4 clusters were truncated but only 3 of them was allocated */
	spdk_blob_sync_md(blob, blob_op_complete, NULL);
	poll_threads();
	CU_ASSERT(g_bserrno == 0);
	CU_ASSERT((free_clusters - 1) == spdk_bs_free_cluster_count(bs));
	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 2);
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 1);

	spdk_bs_free_io_channel(blob_ch);
	ut_blob_close_and_delete(bs, blob);
}

static void
blob_read_only(void)
{
@@ -1991,6 +2090,7 @@ blob_unmap(void)
	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
	poll_threads();
	CU_ASSERT(g_bserrno == 0);
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);

	memset(payload, 0, sizeof(payload));
	payload[0] = 0xFF;
@@ -2019,6 +2119,7 @@ blob_unmap(void)
	blob->active.clusters[3] = 0;
	blob->active.clusters[6] = 0;
	blob->active.clusters[8] = 0;
	blob->active.num_allocated_clusters -= 5;

	/* Unmap clusters by resizing to 0 */
	spdk_blob_resize(blob, 0, blob_op_complete, NULL);
@@ -2028,6 +2129,7 @@ blob_unmap(void)
	spdk_blob_sync_md(blob, blob_op_complete, NULL);
	poll_threads();
	CU_ASSERT(g_bserrno == 0);
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);

	/* Confirm that only 'allocated' clusters were unmapped */
	for (i = 1; i < 11; i++) {
@@ -2359,6 +2461,7 @@ bs_load(void)
	CU_ASSERT(rc == -ENOENT);

	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);

	spdk_blob_close(blob, blob_op_complete, NULL);
	poll_threads();
@@ -4170,6 +4273,7 @@ blob_thin_prov_alloc(void)

	CU_ASSERT(blob->active.num_clusters == 0);
	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);

	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
@@ -4178,6 +4282,7 @@ blob_thin_prov_alloc(void)
	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
	CU_ASSERT(blob->active.num_clusters == 5);
	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);

	/* Grow it to 1TB - still unallocated */
	spdk_blob_resize(blob, 262144, blob_op_complete, NULL);
@@ -4186,6 +4291,7 @@ blob_thin_prov_alloc(void)
	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
	CU_ASSERT(blob->active.num_clusters == 262144);
	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);

	spdk_blob_sync_md(blob, blob_op_complete, NULL);
	poll_threads();
@@ -4194,6 +4300,7 @@ blob_thin_prov_alloc(void)
	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
	CU_ASSERT(blob->active.num_clusters == 262144);
	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
	/* Since clusters are not allocated,
	 * number of metadata pages is expected to be minimal.
	 */
@@ -4206,6 +4313,7 @@ blob_thin_prov_alloc(void)
	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
	CU_ASSERT(blob->active.num_clusters == 3);
	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);

	spdk_blob_sync_md(blob, blob_op_complete, NULL);
	poll_threads();
@@ -4214,6 +4322,7 @@ blob_thin_prov_alloc(void)
	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
	CU_ASSERT(blob->active.num_clusters == 3);
	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);

	spdk_blob_close(blob, blob_op_complete, NULL);
	poll_threads();
@@ -4325,6 +4434,7 @@ blob_thin_prov_rw(void)
	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));

	CU_ASSERT(blob->active.num_clusters == 0);
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);

	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
@@ -4332,6 +4442,7 @@ blob_thin_prov_rw(void)
	CU_ASSERT(g_bserrno == 0);
	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
	CU_ASSERT(blob->active.num_clusters == 5);
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);

	spdk_blob_sync_md(blob, blob_op_complete, NULL);
	poll_threads();
@@ -4339,6 +4450,7 @@ blob_thin_prov_rw(void)
	/* Sync must not change anything */
	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
	CU_ASSERT(blob->active.num_clusters == 5);
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);

	/* Payload should be all zeros from unallocated clusters */
	memset(payload_read, 0xFF, sizeof(payload_read));
@@ -4366,6 +4478,7 @@ blob_thin_prov_rw(void)
	poll_threads();
	CU_ASSERT(g_bserrno == 0);
	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 1);
	/* For thin-provisioned blob we need to write 20 pages plus one page metadata and
	 * read 0 bytes */
	if (g_use_extent_table) {
@@ -4492,6 +4605,7 @@ blob_thin_prov_write_count_io(void)
		poll_threads();
		CU_ASSERT(g_bserrno == 0);
		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
		CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 2 * i + 1);

		CU_ASSERT(g_dev_read_bytes == read_bytes);
		CU_ASSERT(g_dev_write_bytes == write_bytes);
@@ -4503,6 +4617,7 @@ blob_thin_prov_write_count_io(void)
		poll_threads();
		CU_ASSERT(g_bserrno == 0);
		CU_ASSERT(free_clusters - (2 * i + 2) == spdk_bs_free_cluster_count(bs));
		CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 2 * i + 2);

		CU_ASSERT(g_dev_read_bytes == read_bytes);
		/*
@@ -6061,12 +6176,19 @@ blob_relations2(void)

	/* 10. Remove snapshot 1 */

	/* Check snapshot 1 and snapshot 2 allocated clusters */
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot1) == 10);
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot2) == 0);

	ut_blob_close_and_delete(bs, snapshot1);

	/* Check if relations are back to state from before creating snapshot 4 (before step 6) */
	CU_ASSERT(snapshot2->parent_id == SPDK_BLOBID_INVALID);
	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);

	/* Check that snapshot 2 has the clusters that were allocated to snapshot 1 */
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot2) == 10);

	count = SPDK_COUNTOF(ids);
	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
	CU_ASSERT(rc == 0);
@@ -6454,12 +6576,15 @@ blob_delete_snapshot_power_failure(void)
			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
			CU_ASSERT(rc != 0);
			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
			CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
			CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot) == 10);

			spdk_blob_close(snapshot, blob_op_complete, NULL);
			poll_threads();
			CU_ASSERT(g_bserrno == 0);
		} else {
			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
			CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
			/* Snapshot might have been left in unrecoverable state, so it does not open.
			 * Yet delete might perform further changes to the clone after that.
			 * This UT should test until snapshot is deleted and delete call succeeds. */
@@ -6553,6 +6678,8 @@ blob_create_snapshot_power_failure(void)
			snapshot = g_blob;
			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
			CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
			CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot) == 10);
			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
			count = SPDK_COUNTOF(ids);
			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
@@ -6571,6 +6698,7 @@ blob_create_snapshot_power_failure(void)
		} else {
			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == false);
			CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
		}

		spdk_blob_close(blob, blob_op_complete, NULL);
@@ -9215,11 +9343,13 @@ blob_shallow_copy(void)
		poll_threads();
		CU_ASSERT(g_bserrno == 0);
	}
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 2);

	/* Make a snapshot over blob */
	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
	poll_threads();
	CU_ASSERT(g_bserrno == 0);
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);

	/* Write on cluster 1 and 3 of blob */
	for (offset = 0; offset < io_units_per_cluster; offset++) {
@@ -9234,6 +9364,7 @@ blob_shallow_copy(void)
		poll_threads();
		CU_ASSERT(g_bserrno == 0);
	}
	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 2);

	/* Shallow copy with a not read only blob */
	ext_dev = init_ext_dev(num_clusters * 1024 * 1024, DEV_BUFFER_BLOCKLEN);
@@ -9525,6 +9656,7 @@ main(int argc, char **argv)
		CU_ADD_TEST(suite_bs, blob_inflate);
		CU_ADD_TEST(suite_bs, blob_delete);
		CU_ADD_TEST(suite_bs, blob_resize_test);
		CU_ADD_TEST(suite_bs, blob_resize_thin_test);
		CU_ADD_TEST(suite, blob_read_only);
		CU_ADD_TEST(suite_bs, channel_ops);
		CU_ADD_TEST(suite_bs, blob_super);