Commit 77d591e1 authored by Wojciech Malikowski's avatar Wojciech Malikowski Committed by Tomasz Zawadzki
Browse files

lib/ftl: Change ftl_chunk structure to ftl_zone



This is starting point for moving current FTL
implementation which is working on top of
Open Channel NVMe driver to work on top of
abstracted zoned bdev.

This patch is changing name of ftl_chunk structure
to ftl_zone and start using zone states from zdev
interface.

Change-Id: I5429f489cc08a1ac27f09aba3dca4b40ea95eeb3
Signed-off-by: default avatarWojciech Malikowski <wojciech.malikowski@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/467391


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
Reviewed-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: default avatarKonrad Sztyber <konrad.sztyber@intel.com>
Community-CI: SPDK CI Jenkins <sys_sgci@intel.com>
parent 2938dc14
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -150,10 +150,10 @@ struct spdk_ftl_attrs {
	size_t					lbk_size;
	/* Write buffer cache */
	struct spdk_bdev_desc			*cache_bdev_desc;
	/* Number of chunks per parallel unit in the underlying device (including any offline ones) */
	size_t					num_chunks;
	/* Number of sectors per chunk */
	size_t					chunk_size;
	/* Number of zones per parallel unit in the underlying device (including any offline ones) */
	size_t					num_zones;
	/* Number of logical blocks per zone */
	size_t					zone_size;
	/* Device specific configuration */
	struct spdk_ftl_conf			conf;
};
+3 −3
Original line number Diff line number Diff line
@@ -137,7 +137,7 @@ ftl_anm_event_alloc(struct spdk_ftl_dev *dev, struct ftl_ppa ppa,
		break;
	case FTL_ANM_RANGE_CHK:
	case FTL_ANM_RANGE_PU:
		event->num_lbks = ftl_dev_lbks_in_chunk(dev);
		event->num_lbks = ftl_dev_lbks_in_zone(dev);
		break;
	default:
		assert(false);
@@ -160,9 +160,9 @@ ftl_anm_process_log(struct ftl_anm_poller *poller,
	num_bands = range != FTL_ANM_RANGE_PU ? 1 : ftl_dev_num_bands(dev);

	for (i = 0; i < num_bands; ++i) {
		struct ftl_chunk *chk = ftl_band_chunk_from_ppa(&dev->bands[i], ppa);
		struct ftl_zone *zone = ftl_band_zone_from_ppa(&dev->bands[i], ppa);

		if (chk->state == FTL_CHUNK_STATE_BAD) {
		if (zone->state == SPDK_BDEV_ZONE_STATE_OFFLINE) {
			continue;
		}

+76 −76
Original line number Diff line number Diff line
@@ -156,7 +156,7 @@ ftl_band_free_lba_map(struct ftl_band *band)
	assert(!band->high_prio);

	/* Verify that band's metadata is consistent with l2p */
	if (band->num_chunks) {
	if (band->num_zones) {
		assert(ftl_band_validate_md(band) == true);
	}

@@ -224,7 +224,7 @@ static void
_ftl_band_set_closed(struct ftl_band *band)
{
	struct spdk_ftl_dev *dev = band->dev;
	struct ftl_chunk *chunk;
	struct ftl_zone *zone;

	/* Set the state as free_md() checks for that */
	band->state = FTL_BAND_STATE_CLOSED;
@@ -232,10 +232,10 @@ _ftl_band_set_closed(struct ftl_band *band)
	/* Free the lba map if there are no outstanding IOs */
	ftl_band_release_lba_map(band);

	if (spdk_likely(band->num_chunks)) {
	if (spdk_likely(band->num_zones)) {
		LIST_INSERT_HEAD(&dev->shut_bands, band, list_entry);
		CIRCLEQ_FOREACH(chunk, &band->chunks, circleq) {
			chunk->state = FTL_CHUNK_STATE_CLOSED;
		CIRCLEQ_FOREACH(zone, &band->zones, circleq) {
			zone->state = SPDK_BDEV_ZONE_STATE_CLOSED;
		}
	} else {
		LIST_REMOVE(band, list_entry);
@@ -335,7 +335,7 @@ ftl_unpack_tail_md(struct ftl_band *band)

	/*
	 * When restoring from a dirty shutdown it's possible old tail meta wasn't yet cleared -
	 * band had saved head meta, but didn't manage to send erase to all chunks.
	 * band had saved head meta, but didn't manage to send erase to all zones.
	 * The already found tail md header is valid, but inconsistent with the head meta. Treat
	 * such a band as open/without valid tail md.
	 */
@@ -386,28 +386,28 @@ struct ftl_ppa
ftl_band_tail_md_ppa(struct ftl_band *band)
{
	struct ftl_ppa ppa = {};
	struct ftl_chunk *chunk;
	struct ftl_zone *zone;
	struct spdk_ftl_dev *dev = band->dev;
	size_t xfer_size = dev->xfer_size;
	size_t num_req = ftl_band_tail_md_offset(band) / xfer_size;
	size_t i;

	if (spdk_unlikely(!band->num_chunks)) {
	if (spdk_unlikely(!band->num_zones)) {
		return ftl_to_ppa(FTL_PPA_INVALID);
	}

	/* Metadata should be aligned to xfer size */
	assert(ftl_band_tail_md_offset(band) % xfer_size == 0);

	chunk = CIRCLEQ_FIRST(&band->chunks);
	for (i = 0; i < num_req % band->num_chunks; ++i) {
		chunk = ftl_band_next_chunk(band, chunk);
	zone = CIRCLEQ_FIRST(&band->zones);
	for (i = 0; i < num_req % band->num_zones; ++i) {
		zone = ftl_band_next_zone(band, zone);
	}

	ppa.lbk = (num_req / band->num_chunks) * xfer_size;
	ppa.lbk = (num_req / band->num_zones) * xfer_size;
	ppa.chk = band->id;
	ppa.pu = chunk->punit->start_ppa.pu;
	ppa.grp = chunk->punit->start_ppa.grp;
	ppa.pu = zone->punit->start_ppa.pu;
	ppa.grp = zone->punit->start_ppa.grp;

	return ppa;
}
@@ -417,11 +417,11 @@ ftl_band_head_md_ppa(struct ftl_band *band)
{
	struct ftl_ppa ppa;

	if (spdk_unlikely(!band->num_chunks)) {
	if (spdk_unlikely(!band->num_zones)) {
		return ftl_to_ppa(FTL_PPA_INVALID);
	}

	ppa = CIRCLEQ_FIRST(&band->chunks)->punit->start_ppa;
	ppa = CIRCLEQ_FIRST(&band->zones)->punit->start_ppa;
	ppa.chk = band->id;

	return ppa;
@@ -482,7 +482,7 @@ ftl_band_age(const struct ftl_band *band)
size_t
ftl_band_num_usable_lbks(const struct ftl_band *band)
{
	return band->num_chunks * ftl_dev_lbks_in_chunk(band->dev);
	return band->num_zones * ftl_dev_lbks_in_zone(band->dev);
}

size_t
@@ -516,8 +516,8 @@ ftl_band_from_ppa(struct spdk_ftl_dev *dev, struct ftl_ppa ppa)
	return &dev->bands[ppa.chk];
}

struct ftl_chunk *
ftl_band_chunk_from_ppa(struct ftl_band *band, struct ftl_ppa ppa)
struct ftl_zone *
ftl_band_zone_from_ppa(struct ftl_band *band, struct ftl_ppa ppa)
{
	struct spdk_ftl_dev *dev = band->dev;
	unsigned int punit;
@@ -525,7 +525,7 @@ ftl_band_chunk_from_ppa(struct ftl_band *band, struct ftl_ppa ppa)
	punit = ftl_ppa_flatten_punit(dev, ppa);
	assert(punit < ftl_dev_num_punits(dev));

	return &band->chunk_buf[punit];
	return &band->zone_buf[punit];
}

uint64_t
@@ -537,67 +537,67 @@ ftl_band_lbkoff_from_ppa(struct ftl_band *band, struct ftl_ppa ppa)
	punit = ftl_ppa_flatten_punit(dev, ppa);
	assert(ppa.chk == band->id);

	return punit * ftl_dev_lbks_in_chunk(dev) + ppa.lbk;
	return punit * ftl_dev_lbks_in_zone(dev) + ppa.lbk;
}

struct ftl_ppa
ftl_band_next_xfer_ppa(struct ftl_band *band, struct ftl_ppa ppa, size_t num_lbks)
{
	struct spdk_ftl_dev *dev = band->dev;
	struct ftl_chunk *chunk;
	struct ftl_zone *zone;
	unsigned int punit_num;
	size_t num_xfers, num_stripes;

	assert(ppa.chk == band->id);

	punit_num = ftl_ppa_flatten_punit(dev, ppa);
	chunk = &band->chunk_buf[punit_num];
	zone = &band->zone_buf[punit_num];

	num_lbks += (ppa.lbk % dev->xfer_size);
	ppa.lbk  -= (ppa.lbk % dev->xfer_size);

#if defined(DEBUG)
	/* Check that the number of chunks has not been changed */
	struct ftl_chunk *_chunk;
	size_t _num_chunks = 0;
	CIRCLEQ_FOREACH(_chunk, &band->chunks, circleq) {
		if (spdk_likely(_chunk->state != FTL_CHUNK_STATE_BAD)) {
			_num_chunks++;
	/* Check that the number of zones has not been changed */
	struct ftl_zone *_zone;
	size_t _num_zones = 0;
	CIRCLEQ_FOREACH(_zone, &band->zones, circleq) {
		if (spdk_likely(_zone->state != SPDK_BDEV_ZONE_STATE_OFFLINE)) {
			_num_zones++;
		}
	}
	assert(band->num_chunks == _num_chunks);
	assert(band->num_zones == _num_zones);
#endif
	assert(band->num_chunks != 0);
	num_stripes = (num_lbks / dev->xfer_size) / band->num_chunks;
	assert(band->num_zones != 0);
	num_stripes = (num_lbks / dev->xfer_size) / band->num_zones;
	ppa.lbk  += num_stripes * dev->xfer_size;
	num_lbks -= num_stripes * dev->xfer_size * band->num_chunks;
	num_lbks -= num_stripes * dev->xfer_size * band->num_zones;

	if (ppa.lbk > ftl_dev_lbks_in_chunk(dev)) {
	if (ppa.lbk > ftl_dev_lbks_in_zone(dev)) {
		return ftl_to_ppa(FTL_PPA_INVALID);
	}

	num_xfers = num_lbks / dev->xfer_size;
	for (size_t i = 0; i < num_xfers; ++i) {
		/* When the last chunk is reached the lbk part of the address */
		/* When the last zone is reached the lbk part of the address */
		/* needs to be increased by xfer_size */
		if (ftl_band_chunk_is_last(band, chunk)) {
		if (ftl_band_zone_is_last(band, zone)) {
			ppa.lbk += dev->xfer_size;
			if (ppa.lbk > ftl_dev_lbks_in_chunk(dev)) {
			if (ppa.lbk > ftl_dev_lbks_in_zone(dev)) {
				return ftl_to_ppa(FTL_PPA_INVALID);
			}
		}

		chunk = ftl_band_next_operational_chunk(band, chunk);
		assert(chunk);
		ppa.grp = chunk->start_ppa.grp;
		ppa.pu = chunk->start_ppa.pu;
		zone = ftl_band_next_operational_zone(band, zone);
		assert(zone);
		ppa.grp = zone->start_ppa.grp;
		ppa.pu = zone->start_ppa.pu;

		num_lbks -= dev->xfer_size;
	}

	if (num_lbks) {
		ppa.lbk += num_lbks;
		if (ppa.lbk > ftl_dev_lbks_in_chunk(dev)) {
		if (ppa.lbk > ftl_dev_lbks_in_zone(dev)) {
			return ftl_to_ppa(FTL_PPA_INVALID);
		}
	}
@@ -608,18 +608,18 @@ ftl_band_next_xfer_ppa(struct ftl_band *band, struct ftl_ppa ppa, size_t num_lbk
static size_t
ftl_xfer_offset_from_ppa(struct ftl_band *band, struct ftl_ppa ppa)
{
	struct ftl_chunk *chunk, *current_chunk;
	struct ftl_zone *zone, *current_zone;
	unsigned int punit_offset = 0;
	size_t off, num_stripes, xfer_size = band->dev->xfer_size;

	assert(ppa.chk == band->id);

	num_stripes = (ppa.lbk / xfer_size) * band->num_chunks;
	num_stripes = (ppa.lbk / xfer_size) * band->num_zones;
	off = ppa.lbk % xfer_size;

	current_chunk = ftl_band_chunk_from_ppa(band, ppa);
	CIRCLEQ_FOREACH(chunk, &band->chunks, circleq) {
		if (current_chunk == chunk) {
	current_zone = ftl_band_zone_from_ppa(band, ppa);
	CIRCLEQ_FOREACH(zone, &band->zones, circleq) {
		if (current_zone == zone) {
			break;
		}
		punit_offset++;
@@ -635,9 +635,9 @@ ftl_band_ppa_from_lbkoff(struct ftl_band *band, uint64_t lbkoff)
	struct spdk_ftl_dev *dev = band->dev;
	uint64_t punit;

	punit = lbkoff / ftl_dev_lbks_in_chunk(dev) + dev->range.begin;
	punit = lbkoff / ftl_dev_lbks_in_zone(dev) + dev->range.begin;

	ppa.lbk = lbkoff % ftl_dev_lbks_in_chunk(dev);
	ppa.lbk = lbkoff % ftl_dev_lbks_in_zone(dev);
	ppa.chk = band->id;
	ppa.pu = punit / dev->geo.num_grp;
	ppa.grp = punit % dev->geo.num_grp;
@@ -823,7 +823,7 @@ ftl_band_read_md(struct ftl_band *band, size_t lbk_cnt, struct ftl_ppa start_ppa
	struct spdk_ftl_dev *dev = band->dev;
	struct ftl_md_io *io;

	if (spdk_unlikely(!band->num_chunks)) {
	if (spdk_unlikely(!band->num_zones)) {
		return -ENOENT;
	}

@@ -1036,46 +1036,46 @@ ftl_band_read_head_md(struct ftl_band *band, ftl_io_fn cb_fn, void *cb_ctx)
}

static void
ftl_band_remove_chunk(struct ftl_band *band, struct ftl_chunk *chunk)
ftl_band_remove_zone(struct ftl_band *band, struct ftl_zone *zone)
{
	CIRCLEQ_REMOVE(&band->chunks, chunk, circleq);
	band->num_chunks--;
	CIRCLEQ_REMOVE(&band->zones, zone, circleq);
	band->num_zones--;
}

static void
ftl_erase_fail(struct ftl_io *io, int status)
{
	struct ftl_chunk *chunk;
	struct ftl_zone *zone;
	struct ftl_band *band = io->band;
	char buf[128];

	SPDK_ERRLOG("Erase failed @ppa: %s, status: %d\n",
		    ftl_ppa2str(io->ppa, buf, sizeof(buf)), status);

	chunk = ftl_band_chunk_from_ppa(band, io->ppa);
	chunk->state = FTL_CHUNK_STATE_BAD;
	ftl_band_remove_chunk(band, chunk);
	zone = ftl_band_zone_from_ppa(band, io->ppa);
	zone->state = SPDK_BDEV_ZONE_STATE_OFFLINE;
	ftl_band_remove_zone(band, zone);
	band->tail_md_ppa = ftl_band_tail_md_ppa(band);
}

static void
ftl_band_erase_cb(struct ftl_io *io, void *ctx, int status)
{
	struct ftl_chunk *chunk;
	struct ftl_zone *zone;

	if (spdk_unlikely(status)) {
		ftl_erase_fail(io, status);
		return;
	}
	chunk = ftl_band_chunk_from_ppa(io->band, io->ppa);
	chunk->state = FTL_CHUNK_STATE_FREE;
	chunk->write_offset = 0;
	zone = ftl_band_zone_from_ppa(io->band, io->ppa);
	zone->state = SPDK_BDEV_ZONE_STATE_EMPTY;
	zone->write_offset = 0;
}

int
ftl_band_erase(struct ftl_band *band)
{
	struct ftl_chunk *chunk;
	struct ftl_zone *zone;
	struct ftl_io *io;
	int rc = 0;

@@ -1084,8 +1084,8 @@ ftl_band_erase(struct ftl_band *band)

	ftl_band_set_state(band, FTL_BAND_STATE_PREP);

	CIRCLEQ_FOREACH(chunk, &band->chunks, circleq) {
		if (chunk->state == FTL_CHUNK_STATE_FREE) {
	CIRCLEQ_FOREACH(zone, &band->zones, circleq) {
		if (zone->state == SPDK_BDEV_ZONE_STATE_EMPTY) {
			continue;
		}

@@ -1095,7 +1095,7 @@ ftl_band_erase(struct ftl_band *band)
			break;
		}

		io->ppa = chunk->start_ppa;
		io->ppa = zone->start_ppa;
		rc = ftl_io_erase(io);
		if (rc) {
			assert(0);
@@ -1120,27 +1120,27 @@ ftl_band_write_prep(struct ftl_band *band)
	return 0;
}

struct ftl_chunk *
ftl_band_next_operational_chunk(struct ftl_band *band, struct ftl_chunk *chunk)
struct ftl_zone *
ftl_band_next_operational_zone(struct ftl_band *band, struct ftl_zone *zone)
{
	struct ftl_chunk *result = NULL;
	struct ftl_chunk *entry;
	struct ftl_zone *result = NULL;
	struct ftl_zone *entry;

	if (spdk_unlikely(!band->num_chunks)) {
	if (spdk_unlikely(!band->num_zones)) {
		return NULL;
	}

	/* Erasing band may fail after it was assigned to wptr. */
	/* In such a case chunk is no longer in band->chunks queue. */
	if (spdk_likely(chunk->state != FTL_CHUNK_STATE_BAD)) {
		result = ftl_band_next_chunk(band, chunk);
	/* In such a case zone is no longer in band->zones queue. */
	if (spdk_likely(zone->state != SPDK_BDEV_ZONE_STATE_OFFLINE)) {
		result = ftl_band_next_zone(band, zone);
	} else {
		CIRCLEQ_FOREACH_REVERSE(entry, &band->chunks, circleq) {
			if (entry->pos > chunk->pos) {
		CIRCLEQ_FOREACH_REVERSE(entry, &band->zones, circleq) {
			if (entry->pos > zone->pos) {
				result = entry;
			} else {
				if (!result) {
					result = CIRCLEQ_FIRST(&band->chunks);
					result = CIRCLEQ_FIRST(&band->zones);
				}
				break;
			}
+27 −34
Original line number Diff line number Diff line
@@ -37,6 +37,7 @@
#include "spdk/stdinc.h"
#include "spdk/bit_array.h"
#include "spdk/queue.h"
#include "spdk/bdev_zone.h"

#include "ftl_io.h"
#include "ftl_ppa.h"
@@ -48,17 +49,9 @@
struct spdk_ftl_dev;
struct ftl_lba_map_request;

enum ftl_chunk_state {
	FTL_CHUNK_STATE_FREE,
	FTL_CHUNK_STATE_OPEN,
	FTL_CHUNK_STATE_CLOSED,
	FTL_CHUNK_STATE_BAD,
	FTL_CHUNK_STATE_VACANT,
};

struct ftl_chunk {
	/* Block state */
	enum ftl_chunk_state			state;
struct ftl_zone {
	/* Zone state */
	enum spdk_bdev_zone_state		state;

	/* Indicates that there is inflight write */
	bool					busy;
@@ -72,10 +65,10 @@ struct ftl_chunk {
	/* Pointer to parallel unit */
	struct ftl_punit			*punit;

	/* Position in band's chunk_buf */
	/* Position in band's zone_buf */
	uint32_t				pos;

	CIRCLEQ_ENTRY(ftl_chunk)		circleq;
	CIRCLEQ_ENTRY(ftl_zone)			circleq;
};

enum ftl_md_status {
@@ -154,14 +147,14 @@ struct ftl_band {
	/* Device this band belongs to */
	struct spdk_ftl_dev			*dev;

	/* Number of operational chunks */
	size_t					num_chunks;
	/* Number of operational zones */
	size_t					num_zones;

	/* Array of chunks */
	struct ftl_chunk			*chunk_buf;
	/* Array of zones */
	struct ftl_zone				*zone_buf;

	/* List of operational chunks */
	CIRCLEQ_HEAD(, ftl_chunk)		chunks;
	/* List of operational zones */
	CIRCLEQ_HEAD(, ftl_zone)		zones;

	/* LBA map */
	struct ftl_lba_map			lba_map;
@@ -223,7 +216,7 @@ size_t ftl_band_user_lbks(const struct ftl_band *band);
void		ftl_band_set_addr(struct ftl_band *band, uint64_t lba,
				  struct ftl_ppa ppa);
struct ftl_band *ftl_band_from_ppa(struct spdk_ftl_dev *dev, struct ftl_ppa ppa);
struct ftl_chunk *ftl_band_chunk_from_ppa(struct ftl_band *band, struct ftl_ppa);
struct ftl_zone *ftl_band_zone_from_ppa(struct ftl_band *band, struct ftl_ppa);
void		ftl_band_md_clear(struct ftl_band *band);
int		ftl_band_read_tail_md(struct ftl_band *band, struct ftl_ppa,
				      ftl_io_fn cb_fn, void *cb_ctx);
@@ -236,8 +229,8 @@ void ftl_band_write_failed(struct ftl_band *band);
int		ftl_band_full(struct ftl_band *band, size_t offset);
int		ftl_band_erase(struct ftl_band *band);
int		ftl_band_write_prep(struct ftl_band *band);
struct ftl_chunk *ftl_band_next_operational_chunk(struct ftl_band *band,
		struct ftl_chunk *chunk);
struct ftl_zone *ftl_band_next_operational_zone(struct ftl_band *band,
		struct ftl_zone *zone);
size_t		ftl_lba_map_pool_elem_size(struct spdk_ftl_dev *dev);

static inline int
@@ -246,11 +239,11 @@ ftl_band_empty(const struct ftl_band *band)
	return band->lba_map.num_vld == 0;
}

static inline struct ftl_chunk *
ftl_band_next_chunk(struct ftl_band *band, struct ftl_chunk *chunk)
static inline struct ftl_zone *
ftl_band_next_zone(struct ftl_band *band, struct ftl_zone *zone)
{
	assert(chunk->state != FTL_CHUNK_STATE_BAD);
	return CIRCLEQ_LOOP_NEXT(&band->chunks, chunk, circleq);
	assert(zone->state != SPDK_BDEV_ZONE_STATE_OFFLINE);
	return CIRCLEQ_LOOP_NEXT(&band->zones, zone, circleq);
}

static inline void
@@ -282,23 +275,23 @@ ftl_band_lbkoff_valid(struct ftl_band *band, size_t lbkoff)
}

static inline int
ftl_band_chunk_is_last(struct ftl_band *band, struct ftl_chunk *chunk)
ftl_band_zone_is_last(struct ftl_band *band, struct ftl_zone *zone)
{
	return chunk == CIRCLEQ_LAST(&band->chunks);
	return zone == CIRCLEQ_LAST(&band->zones);
}

static inline int
ftl_band_chunk_is_first(struct ftl_band *band, struct ftl_chunk *chunk)
ftl_band_zone_is_first(struct ftl_band *band, struct ftl_zone *zone)
{
	return chunk == CIRCLEQ_FIRST(&band->chunks);
	return zone == CIRCLEQ_FIRST(&band->zones);
}

static inline int
ftl_chunk_is_writable(const struct ftl_chunk *chunk)
ftl_zone_is_writable(const struct ftl_zone *zone)
{
	return (chunk->state == FTL_CHUNK_STATE_OPEN ||
		chunk->state == FTL_CHUNK_STATE_FREE) &&
	       !chunk->busy;
	return (zone->state == SPDK_BDEV_ZONE_STATE_OPEN ||
		zone->state == SPDK_BDEV_ZONE_STATE_EMPTY) &&
	       !zone->busy;
}

#endif /* FTL_BAND_H */
+26 −27
Original line number Diff line number Diff line
@@ -74,8 +74,8 @@ struct ftl_wptr {
	/* Current logical block's offset */
	uint64_t			offset;

	/* Current erase block */
	struct ftl_chunk		*chunk;
	/* Current zone */
	struct ftl_zone			*zone;

	/* Pending IO queue */
	TAILQ_HEAD(, ftl_io)		pending_queue;
@@ -295,7 +295,7 @@ ftl_ppa_read_next_ppa(struct ftl_io *io, struct ftl_ppa *ppa)
	assert(!ftl_ppa_invalid(*ppa));

	/* Metadata has to be read in the way it's written (jumping across */
	/* the chunks in xfer_size increments) */
	/* the zones in xfer_size increments) */
	if (io->flags & FTL_IO_MD) {
		max_lbks = dev->xfer_size - (ppa->lbk % dev->xfer_size);
		lbk_cnt = spdk_min(ftl_io_iovec_len_left(io), max_lbks);
@@ -322,7 +322,7 @@ ftl_wptr_open_band(struct ftl_wptr *wptr)
{
	struct ftl_band *band = wptr->band;

	assert(ftl_band_chunk_is_first(band, wptr->chunk));
	assert(ftl_band_zone_is_first(band, wptr->zone));
	assert(band->lba_map.num_vld == 0);

	ftl_band_clear_lba_map(band);
@@ -339,17 +339,16 @@ ftl_submit_erase(struct ftl_io *io)
	struct spdk_ftl_dev *dev = io->dev;
	struct ftl_band *band = io->band;
	struct ftl_ppa ppa = io->ppa;
	struct ftl_chunk *chunk;
	struct ftl_zone *zone;
	uint64_t ppa_packed;
	int rc = 0;
	size_t i;

	for (i = 0; i < io->lbk_cnt; ++i) {
		if (i != 0) {
			chunk = ftl_band_next_chunk(band, ftl_band_chunk_from_ppa(band, ppa));
			assert(chunk->state == FTL_CHUNK_STATE_CLOSED ||
			       chunk->state == FTL_CHUNK_STATE_VACANT);
			ppa = chunk->start_ppa;
			zone = ftl_band_next_zone(band, ftl_band_zone_from_ppa(band, ppa));
			assert(zone->state == SPDK_BDEV_ZONE_STATE_CLOSED);
			ppa = zone->start_ppa;
		}

		assert(ppa.lbk == 0);
@@ -474,8 +473,8 @@ ftl_wptr_init(struct ftl_band *band)

	wptr->dev = dev;
	wptr->band = band;
	wptr->chunk = CIRCLEQ_FIRST(&band->chunks);
	wptr->ppa = wptr->chunk->start_ppa;
	wptr->zone = CIRCLEQ_FIRST(&band->zones);
	wptr->ppa = wptr->zone->start_ppa;
	TAILQ_INIT(&wptr->pending_queue);

	return wptr;
@@ -580,13 +579,13 @@ ftl_wptr_advance(struct ftl_wptr *wptr, size_t xfer_size)
		ftl_band_set_state(band, FTL_BAND_STATE_FULL);
	}

	wptr->chunk->busy = true;
	wptr->zone->busy = true;
	wptr->ppa = ftl_band_next_xfer_ppa(band, wptr->ppa, xfer_size);
	wptr->chunk = ftl_band_next_operational_chunk(band, wptr->chunk);
	wptr->zone = ftl_band_next_operational_zone(band, wptr->zone);

	assert(!ftl_ppa_invalid(wptr->ppa));

	SPDK_DEBUGLOG(SPDK_LOG_FTL_CORE, "wptr: grp:%d, pu:%d chunk:%d, lbk:%u\n",
	SPDK_DEBUGLOG(SPDK_LOG_FTL_CORE, "wptr: grp:%d, pu:%d zone:%d, lbk:%u\n",
		      wptr->ppa.grp, wptr->ppa.pu, wptr->ppa.chk, wptr->ppa.lbk);

	if (wptr->offset >= next_thld && !dev->next_band) {
@@ -607,9 +606,9 @@ ftl_wptr_ready(struct ftl_wptr *wptr)

	/* TODO: add handling of empty bands */

	if (spdk_unlikely(!ftl_chunk_is_writable(wptr->chunk))) {
	if (spdk_unlikely(!ftl_zone_is_writable(wptr->zone))) {
		/* Erasing band may fail after it was assigned to wptr. */
		if (spdk_unlikely(wptr->chunk->state == FTL_CHUNK_STATE_BAD)) {
		if (spdk_unlikely(wptr->zone->state == SPDK_BDEV_ZONE_STATE_OFFLINE)) {
			ftl_wptr_advance(wptr, wptr->dev->xfer_size);
		}
		return 0;
@@ -1477,14 +1476,14 @@ ftl_io_init_child_write(struct ftl_io *parent, struct ftl_ppa ppa,
static void
ftl_io_child_write_cb(struct ftl_io *io, void *ctx, int status)
{
	struct ftl_chunk *chunk;
	struct ftl_zone *zone;
	struct ftl_wptr *wptr;

	chunk = ftl_band_chunk_from_ppa(io->band, io->ppa);
	zone = ftl_band_zone_from_ppa(io->band, io->ppa);
	wptr = ftl_wptr_from_band(io->band);

	chunk->busy = false;
	chunk->write_offset += io->lbk_cnt;
	zone->busy = false;
	zone->write_offset += io->lbk_cnt;

	/* If some other write on the same band failed the write pointer would already be freed */
	if (spdk_likely(wptr)) {
@@ -1508,7 +1507,7 @@ ftl_submit_child_write(struct ftl_wptr *wptr, struct ftl_io *io, int lbk_cnt)
		ppa = io->ppa;
	}

	/* Split IO to child requests and release chunk immediately after child is completed */
	/* Split IO to child requests and release zone immediately after child is completed */
	child = ftl_io_init_child_write(io, ppa, ftl_io_iovec_addr(io),
					ftl_io_get_md(io), ftl_io_child_write_cb);
	if (!child) {
@@ -1545,8 +1544,8 @@ ftl_submit_write(struct ftl_wptr *wptr, struct ftl_io *io)

	while (io->iov_pos < io->iov_cnt) {
		/* There are no guarantees of the order of completion of NVMe IO submission queue */
		/* so wait until chunk is not busy before submitting another write */
		if (wptr->chunk->busy) {
		/* so wait until zone is not busy before submitting another write */
		if (wptr->zone->busy) {
			TAILQ_INSERT_TAIL(&wptr->pending_queue, io, retry_entry);
			rc = -EAGAIN;
			break;
@@ -1888,8 +1887,8 @@ spdk_ftl_dev_get_attrs(const struct spdk_ftl_dev *dev, struct spdk_ftl_attrs *at
	attrs->lbk_size = FTL_BLOCK_SIZE;
	attrs->range = dev->range;
	attrs->cache_bdev_desc = dev->nv_cache.bdev_desc;
	attrs->num_chunks = dev->geo.num_chk;
	attrs->chunk_size = dev->geo.clba;
	attrs->num_zones = dev->geo.num_chk;
	attrs->zone_size = dev->geo.clba;
	attrs->conf = dev->conf;
}

@@ -2150,9 +2149,9 @@ ftl_process_anm_event(struct ftl_anm_event *event)
bool
ftl_ppa_is_written(struct ftl_band *band, struct ftl_ppa ppa)
{
	struct ftl_chunk *chunk = ftl_band_chunk_from_ppa(band, ppa);
	struct ftl_zone *zone = ftl_band_zone_from_ppa(band, ppa);

	return ppa.lbk < chunk->write_offset;
	return ppa.lbk < zone->write_offset;
}

static void
Loading