Commit d80bf110 authored by Mateusz Kozlowski's avatar Mateusz Kozlowski Committed by Konrad Sztyber
Browse files

lib/ftl: Update chunk recovery procedure



It updates the path of chunk recovery. The NV cache executes common
activities (allocating p2l map, persisting p2l map, closing chunk).
Restoring P2L map is delegated to the NV cache device implementation.

Change-Id: Ic8fa8f305cf5ff01179760c808cb737a6398808a
Signed-off-by: default avatarMariusz Barczak <Mariusz.Barczak@solidigmtechnology.com>
Signed-off-by: default avatarMateusz Kozlowski <mateusz.kozlowski@solidigm.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/19615


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarJim Harris <jim.harris@samsung.com>
Community-CI: Mellanox Build Bot
Reviewed-by: default avatarKonrad Sztyber <konrad.sztyber@intel.com>
parent ea9cec36
Loading
Loading
Loading
Loading
+153 −234
Original line number Diff line number Diff line
@@ -15,6 +15,7 @@
#include "ftl_core.h"
#include "ftl_band.h"
#include "utils/ftl_addr_utils.h"
#include "utils/ftl_defs.h"
#include "mngt/ftl_mngt.h"

static inline uint64_t nvc_data_blocks(struct ftl_nv_cache *nv_cache) __attribute__((unused));
@@ -1359,8 +1360,8 @@ ftl_chunk_map_get_lba(struct ftl_nv_cache_chunk *chunk, uint64_t offset)
	return ftl_lba_load(dev, p2l_map->chunk_map, offset);
}

static void
ftl_chunk_set_addr(struct ftl_nv_cache_chunk *chunk, uint64_t lba, ftl_addr addr)
void
ftl_nv_cache_chunk_set_addr(struct ftl_nv_cache_chunk *chunk, uint64_t lba, ftl_addr addr)
{
	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
	uint64_t cache_offset = ftl_addr_to_nvc_offset(dev, addr);
@@ -1393,7 +1394,7 @@ ftl_nv_cache_set_addr(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr addr)

	assert(lba != FTL_LBA_INVALID);

	ftl_chunk_set_addr(chunk, lba, addr);
	ftl_nv_cache_chunk_set_addr(chunk, lba, addr);
	ftl_bitmap_set(dev->valid_map, addr);
}

@@ -2000,192 +2001,6 @@ ftl_chunk_close(struct ftl_nv_cache_chunk *chunk)
	ftl_chunk_basic_rq_write(chunk, brq);
}

static void read_tail_md_cb(struct ftl_basic_rq *brq);
static void recover_open_chunk_cb(struct ftl_basic_rq *brq);

static void
restore_chunk_close_cb(int status, void *ctx)
{
	struct ftl_basic_rq *parent = (struct ftl_basic_rq *)ctx;
	struct ftl_nv_cache_chunk *chunk = parent->io.chunk;
	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;

	if (spdk_unlikely(status)) {
		parent->success = false;
	} else {
		chunk->md->p2l_map_checksum = p2l_map->chunk_dma_md->p2l_map_checksum;
		chunk->md->state = FTL_CHUNK_STATE_CLOSED;
	}

	read_tail_md_cb(parent);
}

static void
restore_fill_p2l_map_cb(struct ftl_basic_rq *parent)
{
	struct ftl_nv_cache_chunk *chunk = parent->io.chunk;
	struct ftl_p2l_map *p2l_map = &chunk->p2l_map;
	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
	struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_NVC_MD);
	uint32_t chunk_map_crc;

	/* Set original callback */
	ftl_basic_rq_set_owner(parent, recover_open_chunk_cb, parent->owner.priv);

	if (spdk_unlikely(!parent->success)) {
		read_tail_md_cb(parent);
		return;
	}

	chunk_map_crc = spdk_crc32c_update(p2l_map->chunk_map,
					   chunk->nv_cache->tail_md_chunk_blocks * FTL_BLOCK_SIZE, 0);
	memcpy(p2l_map->chunk_dma_md, chunk->md, region->entry_size * FTL_BLOCK_SIZE);
	p2l_map->chunk_dma_md->state = FTL_CHUNK_STATE_CLOSED;
	p2l_map->chunk_dma_md->write_pointer = chunk->nv_cache->chunk_blocks;
	p2l_map->chunk_dma_md->blocks_written = chunk->nv_cache->chunk_blocks;
	p2l_map->chunk_dma_md->p2l_map_checksum = chunk_map_crc;

	ftl_md_persist_entries(md, get_chunk_idx(chunk), 1, p2l_map->chunk_dma_md, NULL,
			       restore_chunk_close_cb, parent, &chunk->md_persist_entry_ctx);
}

static void
restore_fill_tail_md(struct ftl_basic_rq *parent, struct ftl_nv_cache_chunk *chunk)
{
	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
	void *metadata;

	chunk->md->close_seq_id = ftl_get_next_seq_id(dev);

	metadata = chunk->p2l_map.chunk_map;
	ftl_basic_rq_init(dev, parent, metadata, chunk->nv_cache->tail_md_chunk_blocks);
	ftl_basic_rq_set_owner(parent, restore_fill_p2l_map_cb, parent->owner.priv);

	parent->io.addr = chunk->offset + chunk_tail_md_offset(chunk->nv_cache);
	parent->io.chunk = chunk;

	ftl_chunk_basic_rq_write(chunk, parent);
}

static void
read_open_chunk_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
{
	struct ftl_rq *rq = (struct ftl_rq *)cb_arg;
	struct ftl_basic_rq *parent = (struct ftl_basic_rq *)rq->owner.priv;
	struct ftl_nv_cache_chunk *chunk = parent->io.chunk;
	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(chunk->nv_cache, struct spdk_ftl_dev, nv_cache);
	union ftl_md_vss *md;
	uint64_t cache_offset = bdev_io->u.bdev.offset_blocks;
	uint64_t len = bdev_io->u.bdev.num_blocks;
	ftl_addr addr = ftl_addr_from_nvc_offset(dev, cache_offset);
	int rc;

	ftl_stats_bdev_io_completed(dev, FTL_STATS_TYPE_USER, bdev_io);

	spdk_bdev_free_io(bdev_io);

	if (!success) {
		parent->success = false;
		read_tail_md_cb(parent);
		return;
	}

	while (rq->iter.idx < rq->iter.count) {
		/* Get metadata */
		md = rq->entries[rq->iter.idx].io_md;
		if (md->nv_cache.seq_id != chunk->md->seq_id) {
			md->nv_cache.lba = FTL_LBA_INVALID;
		}
		/*
		 * The p2l map contains effectively random data at this point (since it contains arbitrary
		 * blocks from potentially not even filled tail md), so even LBA_INVALID needs to be set explicitly
		 */

		ftl_chunk_set_addr(chunk,  md->nv_cache.lba, addr + rq->iter.idx);
		rq->iter.idx++;
	}

	if (cache_offset + len < chunk->offset + chunk_tail_md_offset(nv_cache)) {
		cache_offset += len;
		len = spdk_min(dev->xfer_size, chunk->offset + chunk_tail_md_offset(nv_cache) - cache_offset);
		rq->iter.idx = 0;
		rq->iter.count = len;

		rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, nv_cache->bdev_desc,
				nv_cache->cache_ioch,
				rq->io_payload,
				rq->io_md,
				cache_offset, len,
				read_open_chunk_cb,
				rq);

		if (rc) {
			ftl_rq_del(rq);
			parent->success = false;
			read_tail_md_cb(parent);
			return;
		}
	} else {
		ftl_rq_del(rq);
		restore_fill_tail_md(parent, chunk);
	}
}

static void
restore_open_chunk(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *parent)
{
	struct ftl_nv_cache *nv_cache = chunk->nv_cache;
	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
	struct ftl_rq *rq;
	uint64_t addr;
	uint64_t len = dev->xfer_size;
	int rc;

	/*
	 * We've just read the p2l map, prefill it with INVALID LBA
	 * TODO we need to do this because tail md blocks (p2l map) are also represented in the p2l map, instead of just user data region
	 */
	memset(chunk->p2l_map.chunk_map, -1, FTL_BLOCK_SIZE * nv_cache->tail_md_chunk_blocks);

	/* Need to read user data, recalculate chunk's P2L and write tail md with it */
	rq = ftl_rq_new(dev, dev->nv_cache.md_size);
	if (!rq) {
		parent->success = false;
		read_tail_md_cb(parent);
		return;
	}

	rq->owner.priv = parent;
	rq->iter.idx = 0;
	rq->iter.count = len;

	addr = chunk->offset;

	len = spdk_min(dev->xfer_size, chunk->offset + chunk_tail_md_offset(nv_cache) - addr);

	rc = ftl_nv_cache_bdev_read_blocks_with_md(dev, nv_cache->bdev_desc,
			nv_cache->cache_ioch,
			rq->io_payload,
			rq->io_md,
			addr, len,
			read_open_chunk_cb,
			rq);

	if (rc) {
		ftl_rq_del(rq);
		parent->success = false;
		read_tail_md_cb(parent);
	}
}

static void
read_tail_md_cb(struct ftl_basic_rq *brq)
{
	brq->owner.cb(brq);
}

static int
ftl_chunk_read_tail_md(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq,
		       void (*cb)(struct ftl_basic_rq *brq), void *cb_ctx)
@@ -2412,23 +2227,68 @@ ftl_mngt_nv_cache_restore_chunk_state(struct spdk_ftl_dev *dev, struct ftl_mngt_
	ftl_md_restore(md);
}

struct recover_open_chunk_ctx {
	struct ftl_nv_cache_chunk *chunk;
};

static void
recover_open_chunk_cb(struct ftl_basic_rq *brq)
recover_open_chunk_prepare(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
{
	struct ftl_mngt_process *mngt = brq->owner.priv;
	struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
	struct ftl_nv_cache *nvc = chunk->nv_cache;
	struct spdk_ftl_dev *dev = ftl_mngt_get_dev(mngt);
	struct ftl_nv_cache *nvc = &dev->nv_cache;
	struct recover_open_chunk_ctx *ctx = ftl_mngt_get_process_ctx(mngt);

	chunk_free_p2l_map(chunk);
	ftl_bug(TAILQ_EMPTY(&nvc->chunk_open_list));
	ctx->chunk = TAILQ_FIRST(&nvc->chunk_open_list);

	if (!brq->success) {
		FTL_ERRLOG(dev, "Recovery chunk ERROR, offset = %"PRIu64", seq id %"PRIu64"\n", chunk->offset,
			   chunk->md->seq_id);
	FTL_NOTICELOG(dev, "Start recovery open chunk, offset = %"PRIu64", seq id %"PRIu64"\n",
		      ctx->chunk->offset, ctx->chunk->md->seq_id);

	if (chunk_alloc_p2l_map(ctx->chunk)) {
		ftl_mngt_fail_step(mngt);
		return;
	} else {
		ftl_mngt_next_step(mngt);
	}
}

static void
recover_open_chunk_persist_p2l_map_cb(struct ftl_basic_rq *rq)
{
	struct ftl_mngt_process *mngt = rq->owner.priv;

	if (rq->success) {
		ftl_mngt_next_step(mngt);
	} else {
		ftl_mngt_fail_step(mngt);
	}
}

static void
recover_open_chunk_persist_p2l_map(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
{
	struct recover_open_chunk_ctx *ctx = ftl_mngt_get_process_ctx(mngt);
	struct ftl_nv_cache_chunk *chunk = ctx->chunk;
	struct ftl_basic_rq *rq = ftl_mngt_get_step_ctx(mngt);
	void *p2l_map = chunk->p2l_map.chunk_map;

	ftl_basic_rq_init(dev, rq, p2l_map, chunk->nv_cache->tail_md_chunk_blocks);
	ftl_basic_rq_set_owner(rq, recover_open_chunk_persist_p2l_map_cb, mngt);

	rq->io.addr = chunk->offset + chunk_tail_md_offset(chunk->nv_cache);
	ftl_chunk_basic_rq_write(chunk, rq);
}

static void
recover_open_chunk_close_chunk_cb(int status, void *cb_arg)
{
	struct ftl_mngt_process *mngt = cb_arg;
	struct recover_open_chunk_ctx *ctx = ftl_mngt_get_process_ctx(mngt);
	struct ftl_nv_cache_chunk *chunk = ctx->chunk;
	struct ftl_nv_cache *nvc = chunk->nv_cache;
	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nvc, struct spdk_ftl_dev, nv_cache);

	if (0 == status) {
		*chunk->md = *chunk->p2l_map.chunk_dma_md;

		FTL_NOTICELOG(dev, "Recovered chunk, offset = %"PRIu64", seq id %"PRIu64"\n", chunk->offset,
			      chunk->md->seq_id);

@@ -2438,35 +2298,101 @@ recover_open_chunk_cb(struct ftl_basic_rq *brq)
		TAILQ_INSERT_TAIL(&nvc->chunk_full_list, chunk, entry);
		nvc->chunk_full_count++;

	/* This is closed chunk */
	chunk->md->write_pointer = nvc->chunk_blocks;
	chunk->md->blocks_written = nvc->chunk_blocks;
		ftl_mngt_next_step(mngt);
	} else {
		ftl_mngt_fail_step(mngt);
	}
}

	ftl_mngt_continue_step(mngt);
static void
recover_open_chunk_close_chunk(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
{
	struct recover_open_chunk_ctx *ctx = ftl_mngt_get_process_ctx(mngt);
	struct ftl_nv_cache_chunk *chunk = ctx->chunk;
	struct ftl_nv_cache_chunk_md *chunk_md = chunk->p2l_map.chunk_dma_md;
	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];

	*chunk_md = *chunk->md;

	chunk_md->state = FTL_CHUNK_STATE_CLOSED;
	chunk_md->write_pointer = chunk->nv_cache->chunk_blocks;
	chunk_md->blocks_written = chunk->nv_cache->chunk_blocks;
	chunk_md->p2l_map_checksum = spdk_crc32c_update(chunk->p2l_map.chunk_map,
				     chunk->nv_cache->tail_md_chunk_blocks * FTL_BLOCK_SIZE, 0);

	ftl_md_persist_entries(md, get_chunk_idx(chunk), 1, chunk_md, NULL,
			       recover_open_chunk_close_chunk_cb, mngt,
			       &chunk->md_persist_entry_ctx);
}

void
ftl_mngt_nv_cache_recover_open_chunk(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
static void
recover_open_chunk_execute(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
{
	struct ftl_nv_cache *nvc = &dev->nv_cache;
	struct ftl_nv_cache_chunk *chunk;
	struct ftl_basic_rq *brq = ftl_mngt_get_step_ctx(mngt);
	struct recover_open_chunk_ctx *ctx = ftl_mngt_get_process_ctx(mngt);
	struct ftl_nv_cache *nvc = ctx->chunk->nv_cache;

	if (!brq) {
		if (TAILQ_EMPTY(&nvc->chunk_open_list)) {
			FTL_NOTICELOG(dev, "No open chunks to recover P2L\n");
	nvc->nvc_type->ops.recover_open_chunk(dev, mngt, ctx->chunk);
}

static void
recover_open_chunk_cleanup(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
{
	struct recover_open_chunk_ctx *ctx = ftl_mngt_get_process_ctx(mngt);
	struct ftl_nv_cache_chunk *chunk = ctx->chunk;

	if (chunk->p2l_map.chunk_map) {
		chunk_free_p2l_map(ctx->chunk);
	}
	ftl_mngt_next_step(mngt);
			return;
}

		if (ftl_mngt_alloc_step_ctx(mngt, sizeof(*brq))) {
static const struct ftl_mngt_process_desc desc_recover_open_chunk = {
	.name = "Recover open chunk",
	.ctx_size = sizeof(struct recover_open_chunk_ctx),
	.steps = {
		{
			.name = "Chunk recovery, prepare",
			.action = recover_open_chunk_prepare,
			.cleanup = recover_open_chunk_cleanup
		},
		{
			.name = "Chunk recovery, execute",
			.action = recover_open_chunk_execute,
		},
		{
			.name = "Chunk recovery, persist P2L map",
			.ctx_size = sizeof(struct ftl_basic_rq),
			.action = recover_open_chunk_persist_p2l_map,
		},
		{
			.name = "Chunk recovery, close chunk",
			.action = recover_open_chunk_close_chunk,
		},
		{
			.name = "Chunk recovery, cleanup",
			.action = recover_open_chunk_cleanup,
		},
		{}
	}
};

static void
ftl_mngt_nv_cache_recover_open_chunk_cb(struct spdk_ftl_dev *dev, void *ctx, int status)
{
	struct ftl_mngt_process *mngt = ctx;

	if (status) {
		ftl_mngt_fail_step(mngt);
			return;
	} else {
		ftl_mngt_continue_step(mngt);
	}
		brq = ftl_mngt_get_step_ctx(mngt);
		ftl_basic_rq_set_owner(brq, recover_open_chunk_cb, mngt);
}

void
ftl_mngt_nv_cache_recover_open_chunk(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt)
{
	struct ftl_nv_cache *nvc = &dev->nv_cache;

	if (TAILQ_EMPTY(&nvc->chunk_open_list)) {
		if (!is_chunk_count_valid(nvc)) {
			FTL_ERRLOG(dev, "Recovery ERROR, invalid number of chunk\n");
@@ -2484,17 +2410,10 @@ ftl_mngt_nv_cache_recover_open_chunk(struct spdk_ftl_dev *dev, struct ftl_mngt_p
			ftl_mngt_next_step(mngt);
		}
	} else {
		chunk = TAILQ_FIRST(&nvc->chunk_open_list);
		if (chunk_alloc_p2l_map(chunk)) {
		if (ftl_mngt_process_execute(dev, &desc_recover_open_chunk,
					     ftl_mngt_nv_cache_recover_open_chunk_cb, mngt)) {
			ftl_mngt_fail_step(mngt);
			return;
		}

		brq->io.chunk = chunk;

		FTL_NOTICELOG(dev, "Start recovery open chunk, offset = %"PRIu64", seq id %"PRIu64"\n",
			      chunk->offset, chunk->md->seq_id);
		restore_open_chunk(chunk, brq);
	}
}

+2 −0
Original line number Diff line number Diff line
@@ -251,6 +251,8 @@ uint64_t ftl_chunk_map_get_lba(struct ftl_nv_cache_chunk *chunk, uint64_t offset

void ftl_nv_cache_set_addr(struct spdk_ftl_dev *dev, uint64_t lba, ftl_addr addr);

void ftl_nv_cache_chunk_set_addr(struct ftl_nv_cache_chunk *chunk, uint64_t lba, ftl_addr addr);

int ftl_nv_cache_save_state(struct ftl_nv_cache *nv_cache);

int ftl_nv_cache_load_state(struct ftl_nv_cache *nv_cache);
+116 −0
Original line number Diff line number Diff line
@@ -6,6 +6,7 @@
#include "ftl_core.h"
#include "ftl_layout.h"
#include "utils/ftl_layout_tracker_bdev.h"
#include "mngt/ftl_mngt.h"

static bool
is_bdev_compatible(struct spdk_ftl_dev *dev, struct spdk_bdev *bdev)
@@ -176,6 +177,120 @@ write_io(struct ftl_io *io)
	}
}

struct nvc_recover_open_chunk_ctx {
	struct ftl_nv_cache_chunk *chunk;
	struct ftl_rq *rq;
	uint64_t addr;
	uint64_t to_read;
};

static void
nvc_recover_open_chunk_read_vss_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
{
	struct ftl_mngt_process *mngt = cb_arg;
	struct spdk_ftl_dev *dev = ftl_mngt_get_dev(mngt);
	struct nvc_recover_open_chunk_ctx *ctx = ftl_mngt_get_process_ctx(mngt);
	struct ftl_nv_cache_chunk *chunk = ctx->chunk;
	struct ftl_rq *rq = ctx->rq;
	union ftl_md_vss *md;
	uint64_t cache_offset = bdev_io->u.bdev.offset_blocks;
	uint64_t blocks = bdev_io->u.bdev.num_blocks;
	ftl_addr addr = ftl_addr_from_nvc_offset(dev, cache_offset);

	spdk_bdev_free_io(bdev_io);
	if (!success) {
		ftl_mngt_fail_step(mngt);
		return;
	}

	/* Rebuild P2L map */
	for (rq->iter.idx = 0; rq->iter.idx < blocks; rq->iter.idx++) {
		md = rq->entries[rq->iter.idx].io_md;
		if (md->nv_cache.seq_id != chunk->md->seq_id) {
			md->nv_cache.lba = FTL_LBA_INVALID;
			md->nv_cache.seq_id = 0;
		}

		ftl_nv_cache_chunk_set_addr(chunk, md->nv_cache.lba, addr + rq->iter.idx);
	}

	assert(ctx->to_read >= blocks);
	ctx->addr += blocks;
	ctx->to_read -= blocks;
	ftl_mngt_continue_step(mngt);

}

static void
nvc_recover_open_chunk_read_vss(struct spdk_ftl_dev *dev,
				struct ftl_mngt_process *mngt)
{
	struct nvc_recover_open_chunk_ctx *ctx = ftl_mngt_get_process_ctx(mngt);
	uint64_t blocks = spdk_min(ctx->rq->num_blocks, ctx->to_read);
	int rc;

	if (blocks) {
		rc = spdk_bdev_read_blocks_with_md(dev->nv_cache.bdev_desc, dev->nv_cache.cache_ioch,
						   ctx->rq->io_payload, ctx->rq->io_md, ctx->addr, blocks,
						   nvc_recover_open_chunk_read_vss_cb, mngt);
		if (rc) {
			ftl_mngt_fail_step(mngt);
			return;
		}
	} else {
		ftl_mngt_next_step(mngt);
	}
}

static int
nvc_recover_open_chunk_init_handler(struct spdk_ftl_dev *dev,
				    struct ftl_mngt_process *mngt, void *init_ctx)
{
	struct nvc_recover_open_chunk_ctx *ctx = ftl_mngt_get_process_ctx(mngt);

	ctx->chunk = init_ctx;
	ctx->rq = ftl_rq_new(dev, dev->nv_cache.md_size);
	if (NULL == ctx->rq) {
		return -ENOMEM;
	}

	ctx->addr = ctx->chunk->offset;
	ctx->to_read = chunk_tail_md_offset(&dev->nv_cache);

	return 0;
}

static void
nvc_recover_open_chunk_deinit_handler(struct spdk_ftl_dev *dev,
				      struct ftl_mngt_process *mngt)
{
	struct nvc_recover_open_chunk_ctx *ctx = ftl_mngt_get_process_ctx(mngt);

	ftl_rq_del(ctx->rq);
}

static const struct ftl_mngt_process_desc desc_recover_open_chunk = {
	.name = "Recover open chunk",
	.ctx_size = sizeof(struct nvc_recover_open_chunk_ctx),
	.init_handler = nvc_recover_open_chunk_init_handler,
	.deinit_handler = nvc_recover_open_chunk_deinit_handler,
	.steps = {
		{
			.name = "Chunk recovery, read vss",
			.action = nvc_recover_open_chunk_read_vss
		},
		{}
	}
};

static void
nvc_recover_open_chunk(struct spdk_ftl_dev *dev,
		       struct ftl_mngt_process *mngt,
		       struct ftl_nv_cache_chunk *chunk)
{
	ftl_mngt_call_process(mngt, &desc_recover_open_chunk, chunk);
}

struct ftl_nv_cache_device_type nvc_bdev_vss = {
	.name = "bdev",
	.features = {
@@ -188,6 +303,7 @@ struct ftl_nv_cache_device_type nvc_bdev_vss = {
			.region_open = md_region_open,
		},
		.write = write_io,
		.recover_open_chunk = nvc_recover_open_chunk,
	}
};
FTL_NV_CACHE_DEVICE_TYPE_REGISTER(nvc_bdev_vss)
+15 −0
Original line number Diff line number Diff line
@@ -12,6 +12,7 @@
struct spdk_ftl_dev;
struct ftl_nv_cache_chunk;
struct ftl_io;
struct ftl_mngt_process;

/**
 * @brief NV Cache device features and capabilities
@@ -56,6 +57,20 @@ struct ftl_nv_cache_device_ops {
	 */
	void (*write)(struct ftl_io *io);

	/**
	 * @brief Recover open chunk
	 *
	 * @param dev ftl device
	 * @param mngt FTL management precess handler
	 * @param chunk NV cache chunk to be recovered
	 *
	 * @note When the recovery finished successfully then the procedure
	 * shall invoke ftl_mngt_next_step(mngt). If a failure occurred then
	 * the process shall call ftl_mngt_fail_step(mngt)
	 */
	void (*recover_open_chunk)(struct spdk_ftl_dev *dev, struct ftl_mngt_process *mngt,
				   struct ftl_nv_cache_chunk *chunk);

	struct ftl_md_layout_ops md_layout_ops;
};