Commit b3e5d8a7 authored by Kozlowski Mateusz's avatar Kozlowski Mateusz Committed by Jim Harris
Browse files

ftl: Add recovery and restart path for trim



Restores necessary metadata and sets L2P during clean/dirty shutdown recovery
process.

Signed-off-by: default avatarKozlowski Mateusz <mateusz.kozlowski@intel.com>
Signed-off-by: default avatarArtur Paszkiewicz <artur.paszkiewicz@intel.com>
Change-Id: Iaa44025250b44f424ac9de5859d1db82900ecaa9
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/13380


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
parent 2c7c8b6c
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -93,6 +93,12 @@ ftl_l2p_persist(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx)
	FTL_L2P_OP(persist)(dev, cb, cb_ctx);
}

void
ftl_l2p_unmap(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx)
{
	FTL_L2P_OP(unmap)(dev, cb, cb_ctx);
}

void
ftl_l2p_process(struct spdk_ftl_dev *dev)
{
+115 −0
Original line number Diff line number Diff line
@@ -567,6 +567,24 @@ process_finish(struct ftl_l2p_cache *cache)
static void process_page_out_retry(void *_page);
static void process_persist(struct ftl_l2p_cache *cache);

static void
process_page_in(struct ftl_l2p_page *page, spdk_bdev_io_completion_cb cb)
{
	struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)page->ctx.cache;
	int rc;

	assert(page->page_buffer);

	rc = ftl_nv_cache_bdev_read_blocks_with_md(cache->dev, ftl_l2p_cache_get_bdev_desc(cache),
			ftl_l2p_cache_get_bdev_iochannel(cache),
			page->page_buffer, NULL, ftl_l2p_cache_page_get_bdev_offset(cache, page),
			1, cb, page);

	if (rc) {
		cb(NULL, false, page);
	}
}

static void
process_persist_page_out_cb(struct spdk_bdev_io *bdev_io, bool success, void *arg)
{
@@ -638,6 +656,103 @@ process_page_out_retry(void *_page)
	process_page_out(page, page->ctx.cb);
}

static void process_unmap(struct ftl_l2p_cache *cache);

static void
process_unmap_page_out_cb(struct spdk_bdev_io *bdev_io, bool success, void *ctx_page)
{
	struct ftl_l2p_page *page = (struct ftl_l2p_page *)ctx_page;
	struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)page->ctx.cache;
	struct spdk_ftl_dev *dev = cache->dev;
	struct ftl_l2p_cache_process_ctx *ctx = &cache->mctx;

	assert(bdev_io);
	spdk_bdev_free_io(bdev_io);

	if (!success) {
		ctx->status = -EIO;
	}

	assert(!page->on_lru_list);
	assert(ftl_bitmap_get(dev->unmap_map, page->page_no));
	ftl_bitmap_clear(dev->unmap_map, page->page_no);
	ftl_l2p_cache_page_remove(cache, page);

	ctx->qd--;
	process_unmap(cache);
}

static void
process_unmap_page_in_cb(struct spdk_bdev_io *bdev_io, bool success, void *ctx_page)
{
	struct ftl_l2p_page *page = (struct ftl_l2p_page *)ctx_page;
	struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)page->ctx.cache;
	struct spdk_ftl_dev *dev = cache->dev;
	struct ftl_l2p_cache_process_ctx *ctx = &cache->mctx;

	if (bdev_io) {
		spdk_bdev_free_io(bdev_io);
	}
	if (success) {
		assert(ftl_bitmap_get(dev->unmap_map, page->page_no));
		ftl_l2p_page_set_invalid(dev, page);
		process_page_out(page, process_unmap_page_out_cb);
	} else {
		ctx->status = -EIO;
		ctx->qd--;
		process_unmap(cache);
	}
}

static void
process_unmap(struct ftl_l2p_cache *cache)
{
	struct ftl_l2p_cache_process_ctx *ctx = &cache->mctx;

	while (ctx->idx < cache->num_pages && ctx->qd < 64) {
		struct ftl_l2p_page *page;

		if (!ftl_bitmap_get(cache->dev->unmap_map, ctx->idx)) {
			/* Page had not been unmapped, continue */
			ctx->idx++;
			continue;
		}

		/* All pages were removed in persist phase */
		assert(get_l2p_page_by_df_id(cache, ctx->idx) == NULL);

		/* Allocate page to invalidate it */
		page = ftl_l2p_cache_page_alloc(cache, ctx->idx);
		if (!page) {
			/* All pages utilized so far, continue when they will be back available */
			assert(ctx->qd);
			break;
		}

		page->state = L2P_CACHE_PAGE_CLEARING;
		page->ctx.cache = cache;

		ftl_l2p_cache_page_insert(cache, page);
		process_page_in(page, process_unmap_page_in_cb);

		ctx->qd++;
		ctx->idx++;
	}

	if (0 == ctx->qd) {
		process_finish(cache);
	}
}

void
ftl_l2p_cache_unmap(struct spdk_ftl_dev *dev, ftl_l2p_cb cb, void *cb_ctx)
{
	struct ftl_l2p_cache *cache = (struct ftl_l2p_cache *)dev->l2p;

	process_init_ctx(dev, cache, cb, cb_ctx);
	process_unmap(cache);
}

static void
clear_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
{
+6 −0
Original line number Diff line number Diff line
@@ -52,6 +52,12 @@ ftl_mngt_persist_l2p(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
	ftl_l2p_persist(dev, l2p_cb, mngt);
}

void
ftl_mngt_unmap_l2p(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
{
	ftl_l2p_unmap(dev, l2p_cb, mngt);
}

void
ftl_mngt_restore_l2p(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
{
+29 −0
Original line number Diff line number Diff line
@@ -236,6 +236,12 @@ ftl_mngt_persist_band_info_metadata(struct spdk_ftl_dev *dev, struct ftl_mngt_pr
	persist(dev, mngt, FTL_LAYOUT_REGION_TYPE_BAND_MD);
}

static void
ftl_mngt_persist_trim_metadata(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
{
	persist(dev, mngt, FTL_LAYOUT_REGION_TYPE_TRIM_MD);
}

static uint32_t
get_sb_crc(struct ftl_superblock *sb)
{
@@ -294,6 +300,10 @@ static const struct ftl_mngt_process_desc desc_persist = {
			.name = "persist band info metadata",
			.action = ftl_mngt_persist_band_info_metadata,
		},
		{
			.name = "persist trim metadata",
			.action = ftl_mngt_persist_trim_metadata,
		},
		{
			.name = "Persist superblock",
			.action = ftl_mngt_persist_super_block,
@@ -674,6 +684,21 @@ ftl_mngt_restore_band_info_metadata(struct spdk_ftl_dev *dev, struct ftl_mngt_pr
	restore(dev, mngt, FTL_LAYOUT_REGION_TYPE_BAND_MD);
}

static void
ftl_mngt_restore_trim_metadata(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
{
	if (ftl_fast_startup(dev)) {
		FTL_DEBUGLOG(dev, "SHM: found trim md\n");
		if (ftl_md_restore_region(dev, FTL_LAYOUT_REGION_TYPE_TRIM_MD)) {
			ftl_mngt_fail_step(mngt);
			return;
		}
		ftl_mngt_next_step(mngt);
		return;
	}
	restore(dev, mngt, FTL_LAYOUT_REGION_TYPE_TRIM_MD);
}



#ifdef SPDK_FTL_VSS_EMU
@@ -708,6 +733,10 @@ static const struct ftl_mngt_process_desc desc_restore = {
			.name = "Restore band info metadata",
			.action = ftl_mngt_restore_band_info_metadata,
		},
		{
			.name = "Restore trim metadata",
			.action = ftl_mngt_restore_trim_metadata,
		},
		{}
	}
};
+126 −2
Original line number Diff line number Diff line
@@ -321,9 +321,31 @@ static void
ftl_mngt_recovery_iteration_init_seq_ids(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
{
	struct ftl_mngt_recovery_ctx *ctx = ftl_mngt_get_caller_ctx(mngt);
	size_t size = sizeof(ctx->l2p_snippet.seq_id[0]) * ctx->l2p_snippet.count;
	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_TRIM_MD];
	uint64_t *trim_map = ftl_md_get_buffer(md);
	uint64_t page_id, trim_seq_id;
	uint32_t lbas_in_page = FTL_BLOCK_SIZE / dev->layout.l2p.addr_size;
	uint64_t lba, lba_off;

	if (dev->sb->ckpt_seq_id) {
		FTL_ERRLOG(dev, "Checkpoint recovery not supported!\n");
		ftl_mngt_fail_step(mngt);
		return;
	}

	memset(ctx->l2p_snippet.seq_id, 0, size);
	for (lba = ctx->iter.lba_first; lba < ctx->iter.lba_last; lba++) {
		lba_off = lba - ctx->iter.lba_first;
		page_id = lba / lbas_in_page;

		assert(page_id < ftl_md_get_buffer_size(md) / sizeof(*trim_map));
		assert(page_id < dev->layout.region[FTL_LAYOUT_REGION_TYPE_L2P].current.blocks);
		assert(lba_off < ctx->l2p_snippet.count);

		trim_seq_id = trim_map[page_id];

		ctx->l2p_snippet.seq_id[lba_off] = trim_seq_id;
		ftl_addr_store(dev, ctx->l2p_snippet.l2p, lba_off, FTL_ADDR_INVALID);
	}

	ftl_mngt_next_step(mngt);
}
@@ -691,6 +713,100 @@ ftl_mngt_restore_valid_counters(struct spdk_ftl_dev *dev, struct ftl_mngt_proces
	ftl_mngt_next_step(mngt);
}

static void
ftl_mngt_complete_unmap_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
{
	struct ftl_mngt_process *mngt = md->owner.cb_ctx;

	dev->sb_shm->trim.in_progress = false;

	if (!status) {
		ftl_mngt_next_step(mngt);
	} else {
		ftl_mngt_fail_step(mngt);
	}
}

static void
ftl_mngt_complete_unmap(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
{
	uint64_t start_lba, num_blocks, seq_id;
	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_TRIM_MD];

	if (dev->sb_shm->trim.in_progress) {
		start_lba = dev->sb_shm->trim.start_lba;
		num_blocks = dev->sb_shm->trim.num_blocks;
		seq_id = dev->sb_shm->trim.seq_id;

		assert(seq_id <= dev->sb->seq_id);

		FTL_NOTICELOG(dev, "Uncomplete unmap detected lba: %"PRIu64" num_blocks: %"PRIu64"\n",
			      start_lba, num_blocks);

		ftl_set_unmap_map(dev, start_lba, num_blocks, seq_id);
	}

	md->owner.cb_ctx = mngt;
	md->cb = ftl_mngt_complete_unmap_cb;

	ftl_md_persist(md);
}

static void
ftl_mngt_recover_unmap_map_cb(struct spdk_ftl_dev *dev, struct ftl_md *md, int status)
{
	struct ftl_mngt_process *mngt = md->owner.cb_ctx;
	uint64_t num_md_blocks, first_page, num_pages;
	uint32_t lbas_in_page = FTL_BLOCK_SIZE / dev->layout.l2p.addr_size;
	uint64_t *page = ftl_md_get_buffer(md);
	union ftl_md_vss *page_vss = ftl_md_get_vss_buffer(md);
	uint64_t lba, num_blocks, vss_seq_id;
	size_t i, j;

	if (status) {
		ftl_mngt_fail_step(mngt);
		return;
	}

	num_md_blocks = ftl_md_get_buffer_size(md) / lbas_in_page;

	for (i = 0; i < num_md_blocks; ++i, page_vss++) {
		lba = page_vss->unmap.start_lba;
		num_blocks = page_vss->unmap.num_blocks;
		vss_seq_id = page_vss->unmap.seq_id;

		first_page = lba / lbas_in_page;
		num_pages = num_blocks / lbas_in_page;

		if (lba % lbas_in_page || num_blocks % lbas_in_page) {
			ftl_mngt_fail_step(mngt);
			return;
		}

		for (j = first_page; j < first_page + num_pages; ++j) {
			page[j] = spdk_max(vss_seq_id, page[j]);
		}
	}

	ftl_mngt_next_step(mngt);
}

static void
ftl_mngt_recover_unmap_map(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
{
	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_TRIM_MD];

	if (ftl_fast_recovery(dev)) {
		FTL_DEBUGLOG(dev, "SHM: skipping unmap map recovery\n");
		ftl_mngt_next_step(mngt);
		return;
	}

	md->owner.cb_ctx = mngt;
	md->cb = ftl_mngt_recover_unmap_map_cb;
	ftl_md_restore(md);
}

static void
ftl_mngt_recovery_shm_l2p(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
{
@@ -780,6 +896,10 @@ static const struct ftl_mngt_process_desc g_desc_recovery = {
			.name = "Recover max seq ID",
			.action = ftl_mngt_recover_seq_id
		},
		{
			.name = "Recover unmap map",
			.action = ftl_mngt_recover_unmap_map
		},
		{
			.name = "Recover open chunks P2L",
			.action = ftl_mngt_nv_cache_recover_open_chunk
@@ -842,6 +962,10 @@ static const struct ftl_mngt_process_desc g_desc_recovery_shm = {
			.name = "Restore valid maps counters",
			.action = ftl_mngt_restore_valid_counters,
		},
		{
			.name = "Complete unmap transaction",
			.action = ftl_mngt_complete_unmap,
		},
		{}
	}
};
Loading