Commit 0f0af480 authored by Konrad Sztyber's avatar Konrad Sztyber Committed by Ben Walker
Browse files

lib/ftl: keep reloc traffic out of non-volatile cache



Moving data from one band to the other doesn't need to be stored on the
non-volatile cache. Not only does it add unnecessary traffic to the
cache (wearing it out and reducing its throughput), but it requires us
to synchronize it with user writes to the same LBAs.

To avoid all that, this patch adds the FTL_IO_BYPASS_CACHE flag to all
writes coming from the reloc module. However, to be sure that the moved
data is stored on disk and can be restored in case of power loss, we
need to make sure that each free band have all of its data moved to a
closed band before it can be erased. It's done by keeping track of the
number of outstanding IOs moving data from particular band
(num_reloc_blocks), as well as the number of open bands that contains
data from this band (num_reloc_bands). Only when both of these are at
zero and the band has zero valid blocks it can be erased.

Change-Id: I7c106011ffc9685eb8e5ff497919237a305e4478
Signed-off-by: default avatarKonrad Sztyber <konrad.sztyber@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/458101


Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-by: default avatarMateusz Kozlowski <mateusz.kozlowski@intel.com>
Reviewed-by: default avatarWojciech Malikowski <wojciech.malikowski@intel.com>
Reviewed-by: default avatarDarek Stojaczyk <dariusz.stojaczyk@intel.com>
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
parent 4d113ee5
Loading
Loading
Loading
Loading
+7 −0
Original line number Diff line number Diff line
@@ -188,6 +188,13 @@ struct ftl_band {
	/* End metadata start ppa */
	struct ftl_ppa				tail_md_ppa;

	/* Bitmap of all bands that have its data moved onto this band */
	struct spdk_bit_array			*reloc_bitmap;
	/* Number of open bands containing data moved from this band */
	size_t					num_reloc_bands;
	/* Number of blocks currently being moved from this band */
	size_t					num_reloc_blocks;

	/* Free/shut bands' lists */
	LIST_ENTRY(ftl_band)			list_entry;

+47 −12
Original line number Diff line number Diff line
@@ -203,20 +203,22 @@ ftl_md_write_cb(struct ftl_io *io, void *arg, int status)
{
	struct spdk_ftl_dev *dev = io->dev;
	struct ftl_nv_cache *nv_cache = &dev->nv_cache;
	struct ftl_band *band = io->band;
	struct ftl_wptr *wptr;
	size_t id;

	wptr = ftl_wptr_from_band(io->band);
	wptr = ftl_wptr_from_band(band);

	if (status) {
		ftl_md_write_fail(io, status);
		return;
	}

	ftl_band_set_next_state(io->band);
	if (io->band->state == FTL_BAND_STATE_CLOSED) {
	ftl_band_set_next_state(band);
	if (band->state == FTL_BAND_STATE_CLOSED) {
		if (nv_cache->bdev_desc) {
			pthread_spin_lock(&nv_cache->lock);
			nv_cache->num_available += ftl_band_user_lbks(io->band);
			nv_cache->num_available += ftl_band_user_lbks(band);

			if (spdk_unlikely(nv_cache->num_available > nv_cache->num_data_blocks)) {
				nv_cache->num_available = nv_cache->num_data_blocks;
@@ -224,6 +226,20 @@ ftl_md_write_cb(struct ftl_io *io, void *arg, int status)
			pthread_spin_unlock(&nv_cache->lock);
		}

		/*
		 * Go through the reloc_bitmap, checking for all the bands that had its data moved
		 * onto current band and update their counters to allow them to be used for writing
		 * (once they're closed and empty).
		 */
		for (id = 0; id < ftl_dev_num_bands(dev); ++id) {
			if (spdk_bit_array_get(band->reloc_bitmap, id)) {
				assert(dev->bands[id].num_reloc_bands > 0);
				dev->bands[id].num_reloc_bands--;

				spdk_bit_array_clear(band->reloc_bitmap, id);
			}
		}

		ftl_remove_wptr(wptr);
	}
}
@@ -362,11 +378,17 @@ ftl_next_write_band(struct spdk_ftl_dev *dev)
{
	struct ftl_band *band;

	band = LIST_FIRST(&dev->free_bands);
	if (!band) {
	/* Find a free band that has all of its data moved onto other closed bands */
	LIST_FOREACH(band, &dev->free_bands, list_entry) {
		assert(band->state == FTL_BAND_STATE_FREE);
		if (band->num_reloc_bands == 0 && band->num_reloc_blocks == 0) {
			break;
		}
	}

	if (spdk_unlikely(!band)) {
		return NULL;
	}
	assert(band->state == FTL_BAND_STATE_FREE);

	if (ftl_band_erase(band)) {
		/* TODO: handle erase failure */
@@ -1205,6 +1227,7 @@ ftl_write_cb(struct ftl_io *io, void *arg, int status)
	struct spdk_ftl_dev *dev = io->dev;
	struct ftl_rwb_batch *batch = io->rwb_batch;
	struct ftl_rwb_entry *entry;
	struct ftl_band *band;

	if (status) {
		ftl_write_fail(io, status);
@@ -1213,11 +1236,17 @@ ftl_write_cb(struct ftl_io *io, void *arg, int status)

	assert(io->lbk_cnt == dev->xfer_size);
	ftl_rwb_foreach(entry, batch) {
		band = entry->band;
		if (!(io->flags & FTL_IO_MD) && !(entry->flags & FTL_IO_PAD)) {
			/* Verify that the LBA is set for user lbks */
			assert(entry->lba != FTL_LBA_INVALID);
		}

		if (band != NULL) {
			assert(band->num_reloc_blocks > 0);
			band->num_reloc_blocks--;
		}

		SPDK_DEBUGLOG(SPDK_LOG_FTL_CORE, "Write ppa:%lu, lba:%lu\n",
			      entry->ppa.ppa, entry->lba);
	}
@@ -1488,8 +1517,15 @@ ftl_wptr_process_writes(struct ftl_wptr *wptr)

	ppa = wptr->ppa;
	ftl_rwb_foreach(entry, batch) {
		entry->ppa = ppa;
		/* Update band's relocation stats if the IO comes from reloc */
		if (entry->flags & FTL_IO_WEAK) {
			if (!spdk_bit_array_get(wptr->band->reloc_bitmap, entry->band->id)) {
				spdk_bit_array_set(wptr->band->reloc_bitmap, entry->band->id);
				entry->band->num_reloc_bands++;
			}
		}

		entry->ppa = ppa;
		if (entry->lba != FTL_LBA_INVALID) {
			pthread_spin_lock(&entry->lock);
			prev_ppa = ftl_l2p_get(dev, entry->lba);
@@ -1556,13 +1592,12 @@ ftl_process_writes(struct spdk_ftl_dev *dev)
static void
ftl_rwb_entry_fill(struct ftl_rwb_entry *entry, struct ftl_io *io)
{
	struct ftl_band *band;

	memcpy(entry->data, ftl_io_iovec_addr(io), FTL_BLOCK_SIZE);

	if (ftl_rwb_entry_weak(entry)) {
		band = ftl_band_from_ppa(io->dev, io->ppa);
		entry->ppa = ftl_band_next_ppa(band, io->ppa, io->pos);
		entry->band = ftl_band_from_ppa(io->dev, io->ppa);
		entry->ppa = ftl_band_next_ppa(entry->band, io->ppa, io->pos);
		entry->band->num_reloc_blocks++;
	}

	entry->trace = io->trace;
+7 −0
Original line number Diff line number Diff line
@@ -352,6 +352,12 @@ ftl_dev_init_bands(struct spdk_ftl_dev *dev)
			SPDK_ERRLOG("Failed to initialize metadata structures for band [%u]\n", i);
			goto out;
		}

		band->reloc_bitmap = spdk_bit_array_create(ftl_dev_num_bands(dev));
		if (!band->reloc_bitmap) {
			SPDK_ERRLOG("Failed to allocate band relocation bitmap\n");
			goto out;
		}
	}

	for (i = 0; i < ftl_dev_num_punits(dev); ++i) {
@@ -1242,6 +1248,7 @@ ftl_dev_free_sync(struct spdk_ftl_dev *dev)
		for (i = 0; i < ftl_dev_num_bands(dev); ++i) {
			free(dev->bands[i].chunk_buf);
			spdk_bit_array_free(&dev->bands[i].lba_map.vld);
			spdk_bit_array_free(&dev->bands[i].reloc_bitmap);
		}
	}

+3 −1
Original line number Diff line number Diff line
@@ -448,9 +448,11 @@ ftl_reloc_io_init(struct ftl_band_reloc *breloc, struct ftl_reloc_move *move,
static int
ftl_reloc_write(struct ftl_band_reloc *breloc, struct ftl_reloc_move *move)
{
	int io_flags =  FTL_IO_WEAK | FTL_IO_VECTOR_LBA | FTL_IO_BYPASS_CACHE;

	if (spdk_likely(!move->io)) {
		move->io = ftl_reloc_io_init(breloc, move, ftl_reloc_write_cb,
					     FTL_IO_WRITE, FTL_IO_WEAK | FTL_IO_VECTOR_LBA);
					     FTL_IO_WRITE, io_flags);
		if (!move->io) {
			ftl_reloc_free_move(breloc, move);
			return -ENOMEM;
+1 −0
Original line number Diff line number Diff line
@@ -302,6 +302,7 @@ ftl_rwb_batch_release(struct ftl_rwb_batch *batch)
	ftl_rwb_foreach(entry, batch) {
		num_acquired = __atomic_fetch_sub(&rwb->num_acquired[ftl_rwb_entry_type(entry)], 1,
						  __ATOMIC_SEQ_CST);
		entry->band = NULL;
		assert(num_acquired  > 0);
	}

Loading