Commit a5c04e6d authored by Mateusz Kozlowski's avatar Mateusz Kozlowski Committed by Tomasz Zawadzki
Browse files

lib/ftl: NV chunk metadata upgrade



Introduce a new metadata version for NV cache chunks metadata.
It abandons the chunk version stored in VSS. The version is migrated
to the metadata stored in the chunk itself.

Change-Id: I5fdf5107db98e506dd0c8ddfd75c444cd1c12255
Signed-off-by: default avatarLukasz Lasek <lukasz.lasek@solidigmtechnology.com>
Signed-off-by: default avatarMariusz Barczak <Mariusz.Barczak@solidigmtechnology.com>
Signed-off-by: default avatarMateusz Kozlowski <mateusz.kozlowski@solidigm.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/19601


Reviewed-by: default avatarArtur Paszkiewicz <artur.paszkiewicz@intel.com>
Reviewed-by: default avatarJim Harris <jim.harris@samsung.com>
Community-CI: Mellanox Build Bot
Reviewed-by: default avatarShuhei Matsumoto <smatsumoto@nvidia.com>
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
parent 0303cf57
Loading
Loading
Loading
Loading
+9 −35
Original line number Diff line number Diff line
@@ -14,6 +14,7 @@
#include "ftl_sb.h"
#include "nvc/ftl_nvc_dev.h"
#include "utils/ftl_layout_tracker_bdev.h"
#include "upgrade/ftl_layout_upgrade.h"

enum ftl_layout_setup_mode {
	FTL_LAYOUT_SETUP_MODE_LOAD_CURRENT = 0,
@@ -174,23 +175,6 @@ get_num_user_lbas(struct spdk_ftl_dev *dev)
	return blocks;
}

static uint64_t
layout_blocks_left(struct spdk_ftl_dev *dev, struct ftl_layout_tracker_bdev *layout_tracker)
{
	uint64_t max_reg_size = 0;
	const struct ftl_layout_tracker_bdev_region_props *reg_search_ctx = NULL;

	while (true) {
		ftl_layout_tracker_bdev_find_next_region(layout_tracker, FTL_LAYOUT_REGION_TYPE_FREE,
				&reg_search_ctx);
		if (!reg_search_ctx) {
			break;
		}
		max_reg_size = spdk_max(max_reg_size, reg_search_ctx->blk_sz);
	}
	return max_reg_size;
}

struct ftl_layout_region *
ftl_layout_region_get(struct spdk_ftl_dev *dev, enum ftl_layout_region_type reg_type)
{
@@ -362,8 +346,6 @@ layout_setup_legacy_default_nvc(struct spdk_ftl_dev *dev)
	/*
	 * Initialize NV Cache metadata
	 */
	layout->nvc.chunk_count = chunk_count;

	if (legacy_layout_region_open_nvc(dev, FTL_LAYOUT_REGION_TYPE_NVC_MD, FTL_NVC_VERSION_1,
					  sizeof(struct ftl_nv_cache_chunk_md), chunk_count)) {
		goto error;
@@ -430,7 +412,7 @@ static int
layout_setup_default_nvc(struct spdk_ftl_dev *dev)
{
	int region_type;
	uint64_t left, l2p_blocks;
	uint64_t l2p_blocks;
	struct ftl_layout *layout = &dev->layout;

	/* Initialize L2P region */
@@ -483,9 +465,6 @@ layout_setup_default_nvc(struct spdk_ftl_dev *dev)
	/*
	 * Initialize NV Cache metadata
	 */
	left = layout_blocks_left(dev, dev->nvc_layout_tracker);
	layout->nvc.chunk_count = (left * FTL_BLOCK_SIZE) /
				  FTL_NV_CACHE_CHUNK_SIZE(ftl_get_num_blocks_in_band(dev));
	if (0 == layout->nvc.chunk_count) {
		goto error;
	}
@@ -503,14 +482,6 @@ layout_setup_default_nvc(struct spdk_ftl_dev *dev)
	}
	layout->region[FTL_LAYOUT_REGION_TYPE_NVC_MD].mirror_type = FTL_LAYOUT_REGION_TYPE_NVC_MD_MIRROR;

	/*
	 * Initialize data region on NV cache
	 */
	if (layout_region_create_nvc(dev, FTL_LAYOUT_REGION_TYPE_DATA_NVC, 0,
				     layout->nvc.chunk_data_blocks * FTL_BLOCK_SIZE, layout->nvc.chunk_count)) {
		goto error;
	}

	return 0;

error:
@@ -558,7 +529,6 @@ layout_load(struct spdk_ftl_dev *dev)
	if (ftl_superblock_load_blob_area(dev)) {
		return -1;
	}
	dev->layout.nvc.chunk_count = dev->layout.region[FTL_LAYOUT_REGION_TYPE_DATA_NVC].num_entries;
	if (ftl_superblock_md_layout_apply(dev)) {
		return -1;
	}
@@ -632,9 +602,8 @@ ftl_layout_setup(struct spdk_ftl_dev *dev)
	/* Setup P2L ckpt */
	layout->p2l.ckpt_pages = spdk_divide_round_up(ftl_get_num_blocks_in_band(dev), dev->xfer_size);

	layout->nvc.chunk_data_blocks =
		FTL_NV_CACHE_CHUNK_DATA_SIZE(ftl_get_num_blocks_in_band(dev)) / FTL_BLOCK_SIZE;
	layout->nvc.chunk_meta_size = FTL_NV_CACHE_CHUNK_MD_SIZE;
	layout->nvc.chunk_data_blocks = ftl_get_num_blocks_in_band(dev);
	layout->nvc.chunk_count = layout->nvc.total_blocks / ftl_get_num_blocks_in_band(dev);
	layout->nvc.chunk_tail_md_num_blocks = ftl_nv_cache_chunk_tail_md_num_blocks(&dev->nv_cache);

	layout->base.num_usable_blocks = ftl_get_num_blocks_in_band(dev);
@@ -668,6 +637,11 @@ ftl_layout_setup(struct spdk_ftl_dev *dev)
		return -EINVAL;
	}

	/* Now drop the unused regions in preparation for the layout upgrade */
	if (ftl_layout_upgrade_drop_regions(dev)) {
		return -EINVAL;
	}

	rc = ftl_superblock_store_blob_area(dev);

	FTL_NOTICELOG(dev, "Base device capacity:         %.2f MiB\n",
+0 −5
Original line number Diff line number Diff line
@@ -12,10 +12,6 @@
struct spdk_ftl_dev;
struct ftl_md;

#define FTL_NV_CACHE_CHUNK_DATA_SIZE(blocks) ((uint64_t)blocks * FTL_BLOCK_SIZE)
#define FTL_NV_CACHE_CHUNK_SIZE(blocks) \
	(FTL_NV_CACHE_CHUNK_DATA_SIZE(blocks) + (2 * FTL_NV_CACHE_CHUNK_MD_SIZE))

#define FTL_LAYOUT_REGION_TYPE_P2L_COUNT \
	(FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MAX - FTL_LAYOUT_REGION_TYPE_P2L_CKPT_MIN + 1)

@@ -132,7 +128,6 @@ struct ftl_layout {
	struct {
		uint64_t total_blocks;
		uint64_t chunk_data_blocks;
		uint64_t chunk_meta_size;
		uint64_t chunk_count;
		uint64_t chunk_tail_md_num_blocks;
	} nvc;
+141 −25
Original line number Diff line number Diff line
@@ -26,15 +26,6 @@ static void ftl_property_dump_cache_dev(struct spdk_ftl_dev *dev,
					const struct ftl_property *property,
					struct spdk_json_write_ctx *w);

static inline const struct ftl_layout_region *
nvc_data_region(struct ftl_nv_cache *nv_cache)
{
	struct spdk_ftl_dev *dev;

	dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
	return ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_DATA_NVC);
}

static inline void
nvc_validate_md(struct ftl_nv_cache *nv_cache,
		struct ftl_nv_cache_chunk_md *chunk_md)
@@ -57,13 +48,13 @@ nvc_validate_md(struct ftl_nv_cache *nv_cache,
static inline uint64_t
nvc_data_offset(struct ftl_nv_cache *nv_cache)
{
	return nvc_data_region(nv_cache)->current.offset;
	return 0;
}

static inline uint64_t
nvc_data_blocks(struct ftl_nv_cache *nv_cache)
{
	return nvc_data_region(nv_cache)->current.blocks;
	return nv_cache->chunk_blocks * nv_cache->chunk_count;
}

size_t
@@ -109,6 +100,118 @@ ftl_nv_cache_init_update_limits(struct spdk_ftl_dev *dev)
				 100);
}

struct nvc_scrub_ctx {
	uint64_t chunk_no;
	nvc_scrub_cb cb;
	void *cb_ctx;

	struct ftl_layout_region reg_chunk;
	struct ftl_md *md_chunk;
};

static int
nvc_scrub_find_next_chunk(struct spdk_ftl_dev *dev, struct nvc_scrub_ctx *scrub_ctx)
{
	while (scrub_ctx->chunk_no < dev->layout.nvc.chunk_count) {
		if (dev->nv_cache.nvc_type->ops.is_chunk_active(dev, scrub_ctx->reg_chunk.current.offset)) {
			return 0;
		}

		/* Move the dummy region along with the active chunk */
		scrub_ctx->reg_chunk.current.offset += dev->layout.nvc.chunk_data_blocks;
		scrub_ctx->chunk_no++;
	}
	return -ENOENT;
}

static void
nvc_scrub_clear_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
{
	struct nvc_scrub_ctx *scrub_ctx = md->owner.cb_ctx;
	union ftl_md_vss vss;

	/* Move to the next chunk */
	scrub_ctx->chunk_no++;
	scrub_ctx->reg_chunk.current.offset += dev->layout.nvc.chunk_data_blocks;

	FTL_DEBUGLOG(dev, "Scrub progress: %"PRIu64"/%"PRIu64" chunks\n",
		     scrub_ctx->chunk_no, dev->layout.nvc.chunk_count);

	if (status || nvc_scrub_find_next_chunk(dev, scrub_ctx)) {
		/* IO error or no more active chunks found. Scrubbing finished. */
		scrub_ctx->cb(dev, scrub_ctx->cb_ctx, status);
		ftl_md_destroy(scrub_ctx->md_chunk, 0);
		free(scrub_ctx);
		return;
	}

	/* Scrub the next chunk */
	vss.version.md_version = 0;
	vss.nv_cache.lba = FTL_ADDR_INVALID;

	scrub_ctx->md_chunk->cb = nvc_scrub_clear_cb;
	scrub_ctx->md_chunk->owner.cb_ctx = scrub_ctx;

	ftl_md_clear(scrub_ctx->md_chunk, 0, &vss);
}

void
ftl_nv_cache_scrub(struct spdk_ftl_dev *dev, nvc_scrub_cb cb, void *cb_ctx)
{
	struct nvc_scrub_ctx *scrub_ctx = calloc(1, sizeof(*scrub_ctx));
	union ftl_md_vss vss;

	if (!scrub_ctx) {
		cb(dev, cb_ctx, -ENOMEM);
		return;
	}

	scrub_ctx->cb = cb;
	scrub_ctx->cb_ctx = cb_ctx;

	/* Setup a dummy region for the first chunk */
	scrub_ctx->reg_chunk.name = ftl_md_region_name(FTL_LAYOUT_REGION_TYPE_DATA_NVC);
	scrub_ctx->reg_chunk.type = FTL_LAYOUT_REGION_TYPE_DATA_NVC;
	scrub_ctx->reg_chunk.mirror_type = FTL_LAYOUT_REGION_TYPE_INVALID;
	scrub_ctx->reg_chunk.current.version = 0;
	scrub_ctx->reg_chunk.current.offset = 0;
	scrub_ctx->reg_chunk.current.blocks = dev->layout.nvc.chunk_data_blocks;
	scrub_ctx->reg_chunk.entry_size = FTL_BLOCK_SIZE;
	scrub_ctx->reg_chunk.num_entries = dev->layout.nvc.chunk_data_blocks;
	scrub_ctx->reg_chunk.vss_blksz = dev->nv_cache.md_size;
	scrub_ctx->reg_chunk.bdev_desc = dev->nv_cache.bdev_desc;
	scrub_ctx->reg_chunk.ioch = dev->nv_cache.cache_ioch;

	/* Setup an MD object for the region */
	scrub_ctx->md_chunk = ftl_md_create(dev, scrub_ctx->reg_chunk.current.blocks,
					    scrub_ctx->reg_chunk.vss_blksz, scrub_ctx->reg_chunk.name, FTL_MD_CREATE_NO_MEM,
					    &scrub_ctx->reg_chunk);

	if (!scrub_ctx->md_chunk) {
		free(scrub_ctx);
		cb(dev, cb_ctx, -ENOMEM);
		return;
	}

	if (nvc_scrub_find_next_chunk(dev, scrub_ctx)) {
		/* No active chunks found */
		ftl_md_destroy(scrub_ctx->md_chunk, 0);
		free(scrub_ctx);
		cb(dev, cb_ctx, -ENOENT);
		return;
	}

	/* Scrub the first chunk */
	vss.version.md_version = 0;
	vss.nv_cache.lba = FTL_ADDR_INVALID;

	scrub_ctx->md_chunk->cb = nvc_scrub_clear_cb;
	scrub_ctx->md_chunk->owner.cb_ctx = scrub_ctx;

	ftl_md_clear(scrub_ctx->md_chunk, 0, &vss);
	return;
}

int
ftl_nv_cache_init(struct spdk_ftl_dev *dev)
{
@@ -168,11 +271,12 @@ ftl_nv_cache_init(struct spdk_ftl_dev *dev)
	for (i = 0; i < nv_cache->chunk_count; i++, chunk++, md++) {
		chunk->nv_cache = nv_cache;
		chunk->md = md;
		chunk->md->version = FTL_NVC_VERSION_CURRENT;
		nvc_validate_md(nv_cache, md);
		chunk->offset = offset;
		offset += nv_cache->chunk_blocks;

		if (nv_cache->nvc_type->ops.is_chunk_active(dev, chunk)) {
		if (nv_cache->nvc_type->ops.is_chunk_active(dev, chunk->offset)) {
			nv_cache->chunk_free_count++;
			TAILQ_INSERT_TAIL(&nv_cache->chunk_free_list, chunk, entry);
		} else {
@@ -392,9 +496,7 @@ static int
ftl_chunk_alloc_md_entry(struct ftl_nv_cache_chunk *chunk)
{
	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
	struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_NVC_MD);

	p2l_map->chunk_dma_md = ftl_mempool_get(nv_cache->chunk_md_pool);

@@ -402,7 +504,7 @@ ftl_chunk_alloc_md_entry(struct ftl_nv_cache_chunk *chunk)
		return -ENOMEM;
	}

	memset(p2l_map->chunk_dma_md, 0, region->entry_size * FTL_BLOCK_SIZE);
	ftl_nv_cache_chunk_md_initialize(p2l_map->chunk_dma_md);
	return 0;
}

@@ -421,7 +523,7 @@ ftl_chunk_free(struct ftl_nv_cache_chunk *chunk)
	struct ftl_nv_cache *nv_cache = chunk->nv_cache;

	/* Reset chunk */
	memset(chunk->md, 0, sizeof(*chunk->md));
	ftl_nv_cache_chunk_md_initialize(chunk->md);

	TAILQ_INSERT_TAIL(&nv_cache->needs_free_persist_list, chunk, entry);
	nv_cache->chunk_free_persist_count++;
@@ -431,17 +533,14 @@ static int
ftl_chunk_alloc_chunk_free_entry(struct ftl_nv_cache_chunk *chunk)
{
	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
	struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_NVC_MD);

	p2l_map->chunk_dma_md = ftl_mempool_get(nv_cache->free_chunk_md_pool);

	if (!p2l_map->chunk_dma_md) {
		return -ENOMEM;
	}

	memset(p2l_map->chunk_dma_md, 0, region->entry_size * FTL_BLOCK_SIZE);
	ftl_nv_cache_chunk_md_initialize(p2l_map->chunk_dma_md);
	return 0;
}

@@ -1525,14 +1624,14 @@ ftl_nv_cache_load_state(struct ftl_nv_cache *nv_cache)

	if (dev->sb->upgrade_ready) {
		/*
		 * During upgrade some transition are allowed:
		 * During upgrade some transitions are allowed:
		 *
		 * 1. FREE -> INACTIVE
		 * 2. INACTIVE -> FREE
		 */
		chunk = nv_cache->chunks;
		for (i = 0; i < nv_cache->chunk_count; i++, chunk++) {
			active = nv_cache->nvc_type->ops.is_chunk_active(dev, chunk);
			active = nv_cache->nvc_type->ops.is_chunk_active(dev, chunk->offset);

			if (chunk->md->state == FTL_CHUNK_STATE_FREE) {
				if (!active) {
@@ -1556,7 +1655,12 @@ ftl_nv_cache_load_state(struct ftl_nv_cache *nv_cache)
			goto error;
		}

		active = nv_cache->nvc_type->ops.is_chunk_active(dev, chunk);
		if (chunk->md->version != FTL_NVC_VERSION_CURRENT) {
			status = -EINVAL;
			goto error;
		}

		active = nv_cache->nvc_type->ops.is_chunk_active(dev, chunk->offset);
		if (false == active) {
			if (chunk->md->state != FTL_CHUNK_STATE_INACTIVE) {
				status = -EINVAL;
@@ -2226,12 +2330,17 @@ restore_chunk_state_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
	for (i = 0; i < nvc->chunk_count; i++) {
		chunk = &nvc->chunks[i];

		if (false == nvc->nvc_type->ops.is_chunk_active(dev, chunk) &&
		if (false == nvc->nvc_type->ops.is_chunk_active(dev, chunk->offset) &&
		    chunk->md->state != FTL_CHUNK_STATE_INACTIVE) {
			status = -EINVAL;
			break;
		}

		if (chunk->md->version != FTL_NVC_VERSION_CURRENT) {
			status = -EINVAL;
			break;
		}

		switch (chunk->md->state) {
		case FTL_CHUNK_STATE_FREE:
			break;
@@ -2395,7 +2504,7 @@ ftl_nv_cache_halt(struct ftl_nv_cache *nv_cache)

		TAILQ_REMOVE(&nv_cache->chunk_open_list, chunk, entry);
		chunk_free_p2l_map(chunk);
		memset(chunk->md, 0, sizeof(*chunk->md));
		ftl_nv_cache_chunk_md_initialize(chunk->md);
		assert(nv_cache->chunk_open_count > 0);
		nv_cache->chunk_open_count--;
	}
@@ -2495,3 +2604,10 @@ ftl_property_dump_cache_dev(struct spdk_ftl_dev *dev, const struct ftl_property
	}
	spdk_json_write_array_end(w);
}

void
ftl_nv_cache_chunk_md_initialize(struct ftl_nv_cache_chunk_md *md)
{
	memset(md, 0, sizeof(*md));
	md->version = FTL_NVC_VERSION_CURRENT;
}
+13 −4
Original line number Diff line number Diff line
@@ -25,8 +25,9 @@

#define FTL_NVC_VERSION_0	0
#define FTL_NVC_VERSION_1	1
#define FTL_NVC_VERSION_2	2

#define FTL_NVC_VERSION_CURRENT FTL_NVC_VERSION_1
#define FTL_NVC_VERSION_CURRENT FTL_NVC_VERSION_2

#define FTL_NV_CACHE_NUM_COMPACTORS 8

@@ -60,6 +61,9 @@ enum ftl_chunk_state {
};

struct ftl_nv_cache_chunk_md {
	/* Chunk metadata version */
	uint64_t version;

	/* Sequence id of writing */
	uint64_t seq_id;

@@ -88,11 +92,10 @@ struct ftl_nv_cache_chunk_md {
	uint32_t p2l_map_checksum;

	/* Reserved */
	uint8_t reserved[4052];
	uint8_t reserved[4044];
} __attribute__((packed));

#define FTL_NV_CACHE_CHUNK_MD_SIZE sizeof(struct ftl_nv_cache_chunk_md)
SPDK_STATIC_ASSERT(FTL_NV_CACHE_CHUNK_MD_SIZE == FTL_BLOCK_SIZE,
SPDK_STATIC_ASSERT(sizeof(struct ftl_nv_cache_chunk_md) == FTL_BLOCK_SIZE,
		   "FTL NV Chunk metadata size is invalid");

struct ftl_nv_cache_chunk {
@@ -228,6 +231,10 @@ struct ftl_nv_cache {
	} throttle;
};

typedef void (*nvc_scrub_cb)(struct spdk_ftl_dev *dev, void *cb_ctx, int status);

void ftl_nv_cache_scrub(struct spdk_ftl_dev *dev, nvc_scrub_cb cb, void *cb_ctx);

int ftl_nv_cache_init(struct spdk_ftl_dev *dev);
void ftl_nv_cache_deinit(struct spdk_ftl_dev *dev);
bool ftl_nv_cache_write(struct ftl_io *io);
@@ -286,4 +293,6 @@ struct ftl_nv_cache_chunk *ftl_nv_cache_get_chunk_from_addr(struct spdk_ftl_dev

uint64_t ftl_nv_cache_acquire_trim_seq_id(struct ftl_nv_cache *nv_cache);

void ftl_nv_cache_chunk_md_initialize(struct ftl_nv_cache_chunk_md *md);

#endif  /* FTL_NV_CACHE_H */
+18 −19
Original line number Diff line number Diff line
@@ -142,9 +142,9 @@ ftl_mngt_deinit_nv_cache(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt
}

static void
user_clear_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
user_clear_cb(struct spdk_ftl_dev *dev, void *cb_ctx, int status)
{
	struct ftl_mngt_process *mngt = md->owner.cb_ctx;
	struct ftl_mngt_process *mngt = cb_ctx;

	if (status) {
		FTL_ERRLOG(ftl_mngt_get_dev(mngt), "FTL NV Cache: ERROR of clearing user cache data\n");
@@ -157,23 +157,22 @@ user_clear_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
void
ftl_mngt_scrub_nv_cache(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
{
	struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_DATA_NVC);
	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_DATA_NVC];
	union ftl_md_vss vss;
	bool is_first_start = (dev->conf.mode & SPDK_FTL_MODE_CREATE) != 0;
	bool is_major_upgrade = dev->sb->clean == 1 && dev->sb_shm->shm_clean == 0 &&
				dev->sb->upgrade_ready == 1;

	FTL_NOTICELOG(dev, "First startup needs to scrub nv cache data region, this may take some time.\n");
	FTL_NOTICELOG(dev, "Scrubbing %lluGiB\n", region->current.blocks * FTL_BLOCK_SIZE / GiB);
	if (is_first_start || is_major_upgrade) {
		FTL_NOTICELOG(dev, "NV cache data region needs scrubbing, this may take a while.\n");
		FTL_NOTICELOG(dev, "Scrubbing %"PRIu64" chunks\n", dev->layout.nvc.chunk_count);

		/* Need to scrub user data, so in case of dirty shutdown the recovery won't
		 * pull in data during open chunks recovery from any previous instance (since during short
		 * tests it's very likely that chunks seq_id will be in line between new head md and old VSS)
		 */
	md->cb = user_clear_cb;
	md->owner.cb_ctx = mngt;

	vss.version.md_version = region->current.version;
	vss.nv_cache.lba = FTL_ADDR_INVALID;
	ftl_md_clear(md, 0, &vss);
		ftl_nv_cache_scrub(dev, user_clear_cb, mngt);
	} else {
		ftl_mngt_skip_step(mngt);
	}
}

void
Loading