Commit 85c5ce74 authored by Mateusz Kozlowski's avatar Mateusz Kozlowski Committed by Konrad Sztyber
Browse files

lib/ftl: Read chunk P2L map first in compaction



Change-Id: I48c3e6422f4341a0d9e7bbc80a665d9670b1976e
Signed-off-by: default avatarWojciech Malikowski <wojciech.malikowski@solidigm.com>
Signed-off-by: default avatarMariusz Barczak <Mariusz.Barczak@solidigmtechnology.com>
Signed-off-by: default avatarMateusz Kozlowski <mateusz.kozlowski@solidigm.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/19611


Reviewed-by: default avatarJim Harris <jim.harris@samsung.com>
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarKonrad Sztyber <konrad.sztyber@intel.com>
Community-CI: Mellanox Build Bot
parent 5a8a57c1
Loading
Loading
Loading
Loading
+85 −21
Original line number Diff line number Diff line
@@ -301,7 +301,8 @@ ftl_nv_cache_init(struct spdk_ftl_dev *dev)
	}

#define FTL_MAX_OPEN_CHUNKS 2
	nv_cache->p2l_pool = ftl_mempool_create(FTL_MAX_OPEN_CHUNKS,
#define FTL_MAX_COMPACTED_CHUNKS 2
	nv_cache->p2l_pool = ftl_mempool_create(FTL_MAX_OPEN_CHUNKS + FTL_MAX_COMPACTED_CHUNKS,
						nv_cache_p2l_map_pool_elem_size(nv_cache),
						FTL_BLOCK_SIZE,
						SPDK_ENV_NUMA_ID_ANY);
@@ -310,7 +311,7 @@ ftl_nv_cache_init(struct spdk_ftl_dev *dev)
	}

	/* One entry per open chunk */
	nv_cache->chunk_md_pool = ftl_mempool_create(FTL_MAX_OPEN_CHUNKS,
	nv_cache->chunk_md_pool = ftl_mempool_create(FTL_MAX_OPEN_CHUNKS + FTL_MAX_COMPACTED_CHUNKS,
				  sizeof(struct ftl_nv_cache_chunk_md),
				  FTL_BLOCK_SIZE,
				  SPDK_ENV_NUMA_ID_ANY);
@@ -517,6 +518,8 @@ ftl_chunk_free_md_entry(struct ftl_nv_cache_chunk *chunk)
	p2l_map->chunk_dma_md = NULL;
}

static void chunk_free_p2l_map(struct ftl_nv_cache_chunk *chunk);

static void
ftl_chunk_free(struct ftl_nv_cache_chunk *chunk)
{
@@ -580,12 +583,12 @@ chunk_free_cb(int status, void *ctx)
static void
ftl_chunk_persist_free_state(struct ftl_nv_cache *nv_cache)
{
	int rc;
	struct spdk_ftl_dev *dev = SPDK_CONTAINEROF(nv_cache, struct spdk_ftl_dev, nv_cache);
	struct ftl_p2l_map *p2l_map;
	struct ftl_md *md = dev->layout.md[FTL_LAYOUT_REGION_TYPE_NVC_MD];
	struct ftl_layout_region *region = ftl_layout_region_get(dev, FTL_LAYOUT_REGION_TYPE_NVC_MD);
	struct ftl_nv_cache_chunk *tchunk, *chunk = NULL;
	int rc;

	TAILQ_FOREACH_SAFE(chunk, &nv_cache->needs_free_persist_list, entry, tchunk) {
		p2l_map = &chunk->p2l_map;
@@ -657,6 +660,8 @@ chunk_compaction_advance(struct ftl_nv_cache_chunk *chunk, uint64_t num_blocks)

	compaction_stats_update(chunk);

	chunk_free_p2l_map(chunk);

	ftl_chunk_free(chunk);
}

@@ -811,31 +816,84 @@ is_chunk_to_read(struct ftl_nv_cache_chunk *chunk)
	return true;
}

static struct ftl_nv_cache_chunk *
get_chunk_for_compaction(struct ftl_nv_cache *nv_cache)
static void
read_chunk_p2l_map_cb(struct ftl_basic_rq *brq)
{
	struct ftl_nv_cache_chunk *chunk = NULL;
	struct ftl_nv_cache_chunk *chunk = brq->io.chunk;
	struct ftl_nv_cache *nv_cache = chunk->nv_cache;

	if (!TAILQ_EMPTY(&nv_cache->chunk_comp_list)) {
		chunk = TAILQ_FIRST(&nv_cache->chunk_comp_list);
		if (is_chunk_to_read(chunk)) {
			return chunk;
	if (!brq->success) {
#ifdef SPDK_FTL_RETRY_ON_ERROR
		read_chunk_p2l_map(chunk);
#else
		ftl_abort();
#endif
	}

	TAILQ_INSERT_HEAD(&nv_cache->chunk_comp_list, chunk, entry);
}

static int chunk_alloc_p2l_map(struct ftl_nv_cache_chunk *chunk);
static int ftl_chunk_read_tail_md(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq,
				  void (*cb)(struct ftl_basic_rq *brq), void *cb_ctx);

static void
read_chunk_p2l_map(void *arg)
{
	struct ftl_nv_cache_chunk *chunk = arg;
	int rc;

	if (chunk_alloc_p2l_map(chunk)) {
		ftl_abort();
	}

	rc = ftl_chunk_read_tail_md(chunk, &chunk->metadata_rq, read_chunk_p2l_map_cb, NULL);
	if (rc) {
		if (rc == -ENOMEM) {
			struct ftl_nv_cache *nv_cache = chunk->nv_cache;
			struct spdk_bdev *bdev = spdk_bdev_desc_get_bdev(nv_cache->bdev_desc);
			struct spdk_bdev_io_wait_entry *wait_entry = &chunk->metadata_rq.io.bdev_io_wait;

			wait_entry->bdev = bdev;
			wait_entry->cb_fn = read_chunk_p2l_map;
			wait_entry->cb_arg = chunk;
			spdk_bdev_queue_io_wait(bdev, nv_cache->cache_ioch, wait_entry);
		} else {
			ftl_abort();
		}
	}
}

static void
prepare_chunk_for_compaction(struct ftl_nv_cache *nv_cache)
{
	struct ftl_nv_cache_chunk *chunk = NULL;

	if (TAILQ_EMPTY(&nv_cache->chunk_full_list)) {
		return;
	}

	if (!TAILQ_EMPTY(&nv_cache->chunk_full_list)) {
	chunk = TAILQ_FIRST(&nv_cache->chunk_full_list);
	TAILQ_REMOVE(&nv_cache->chunk_full_list, chunk, entry);

	assert(chunk->md->write_pointer);
	} else {

	nv_cache->chunk_comp_count++;
	read_chunk_p2l_map(chunk);
}


static struct ftl_nv_cache_chunk *
get_chunk_for_compaction(struct ftl_nv_cache *nv_cache)
{
	struct ftl_nv_cache_chunk *chunk = NULL;

	if (TAILQ_EMPTY(&nv_cache->chunk_comp_list)) {
		return NULL;
	}

	if (spdk_likely(chunk)) {
		assert(chunk->md->write_pointer != 0);
		TAILQ_INSERT_HEAD(&nv_cache->chunk_comp_list, chunk, entry);
		nv_cache->chunk_comp_count++;
	chunk = TAILQ_FIRST(&nv_cache->chunk_comp_list);
	if (!is_chunk_to_read(chunk)) {
		return NULL;
	}

	return chunk;
@@ -1012,6 +1070,14 @@ compaction_process(struct ftl_nv_cache *nv_cache)
		return;
	}

	if (nv_cache->chunk_comp_count < FTL_MAX_COMPACTED_CHUNKS) {
		prepare_chunk_for_compaction(nv_cache);
	}

	if (TAILQ_EMPTY(&nv_cache->chunk_comp_list)) {
		return;
	}

	compactor = TAILQ_FIRST(&nv_cache->compactor_list);
	if (!compactor) {
		return;
@@ -1936,8 +2002,6 @@ ftl_chunk_close(struct ftl_nv_cache_chunk *chunk)
	ftl_chunk_basic_rq_write(chunk, brq);
}

static int ftl_chunk_read_tail_md(struct ftl_nv_cache_chunk *chunk, struct ftl_basic_rq *brq,
				  void (*cb)(struct ftl_basic_rq *brq), void *cb_ctx);
static void read_tail_md_cb(struct ftl_basic_rq *brq);
static void recover_open_chunk_cb(struct ftl_basic_rq *brq);