Commit 78154d95 authored by Konrad Sztyber's avatar Konrad Sztyber Committed by Ben Walker
Browse files

lib/ftl: allow flushing active bands



This patch adds a function that marks all active open bands to be
flushed and once all of them are closed it notifies the caller. This
needs to be done before the data from non-volatile cache can be scrubbed
to make sure its stored on bands the device can be restored from.

Change-Id: I9658554ffce90c45dabe31f294879dc17ec670b9
Signed-off-by: default avatarKonrad Sztyber <konrad.sztyber@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/459619


Reviewed-by: default avatarWojciech Malikowski <wojciech.malikowski@intel.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-by: default avatarDarek Stojaczyk <dariusz.stojaczyk@intel.com>
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
parent 78097953
Loading
Loading
Loading
Loading
+77 −7
Original line number Diff line number Diff line
@@ -49,6 +49,18 @@
#include "ftl_debug.h"
#include "ftl_reloc.h"

struct ftl_band_flush {
	struct spdk_ftl_dev		*dev;
	/* Number of bands left to be flushed */
	size_t				num_bands;
	/* User callback */
	spdk_ftl_fn			cb_fn;
	/* Callback's argument */
	void				*cb_arg;
	/* List link */
	LIST_ENTRY(ftl_band_flush)	list_entry;
};

struct ftl_wptr {
	/* Owner device */
	struct spdk_ftl_dev		*dev;
@@ -79,6 +91,9 @@ struct ftl_wptr {

	/* Number of outstanding write requests */
	uint32_t			num_outstanding;

	/* Marks that the band related to this wptr needs to be closed as soon as possible */
	bool				flush;
};

struct ftl_flush {
@@ -127,6 +142,20 @@ ftl_wptr_free(struct ftl_wptr *wptr)
static void
ftl_remove_wptr(struct ftl_wptr *wptr)
{
	struct spdk_ftl_dev *dev = wptr->dev;
	struct ftl_band_flush *flush, *tmp;

	if (spdk_unlikely(wptr->flush)) {
		LIST_FOREACH_SAFE(flush, &dev->band_flush_list, list_entry, tmp) {
			assert(flush->num_bands > 0);
			if (--flush->num_bands == 0) {
				flush->cb_fn(flush->cb_arg, 0);
				LIST_REMOVE(flush, list_entry);
				free(flush);
			}
		}
	}

	LIST_REMOVE(wptr, list_entry);
	ftl_wptr_free(wptr);
}
@@ -599,6 +628,33 @@ ftl_wptr_ready(struct ftl_wptr *wptr)
	return 1;
}

int
ftl_flush_active_bands(struct spdk_ftl_dev *dev, spdk_ftl_fn cb_fn, void *cb_arg)
{
	struct ftl_wptr *wptr;
	struct ftl_band_flush *flush;

	assert(ftl_get_core_thread(dev) == spdk_get_thread());

	flush = calloc(1, sizeof(*flush));
	if (spdk_unlikely(!flush)) {
		return -ENOMEM;
	}

	LIST_INSERT_HEAD(&dev->band_flush_list, flush, list_entry);

	flush->cb_fn = cb_fn;
	flush->cb_arg = cb_arg;
	flush->dev = dev;

	LIST_FOREACH(wptr, &dev->wptr_list, list_entry) {
		wptr->flush = true;
		flush->num_bands++;
	}

	return 0;
}

static const struct spdk_ftl_limit *
ftl_get_limit(const struct spdk_ftl_dev *dev, int type)
{
@@ -692,6 +748,22 @@ ftl_remove_free_bands(struct spdk_ftl_dev *dev)
	dev->next_band = NULL;
}

static void
ftl_wptr_pad_band(struct ftl_wptr *wptr)
{
	struct spdk_ftl_dev *dev = wptr->dev;
	size_t size = ftl_rwb_num_acquired(dev->rwb, FTL_RWB_TYPE_INTERNAL) +
		      ftl_rwb_num_acquired(dev->rwb, FTL_RWB_TYPE_USER);
	size_t blocks_left, rwb_size, pad_size;

	blocks_left = ftl_wptr_user_lbks_left(wptr);
	rwb_size = ftl_rwb_size(dev->rwb) - size;
	pad_size = spdk_min(blocks_left, rwb_size);

	/* Pad write buffer until band is full */
	ftl_rwb_pad(dev, pad_size);
}

static void
ftl_wptr_process_shutdown(struct ftl_wptr *wptr)
{
@@ -699,7 +771,6 @@ ftl_wptr_process_shutdown(struct ftl_wptr *wptr)
	size_t size = ftl_rwb_num_acquired(dev->rwb, FTL_RWB_TYPE_INTERNAL) +
		      ftl_rwb_num_acquired(dev->rwb, FTL_RWB_TYPE_USER);
	size_t num_active = dev->xfer_size * ftl_rwb_get_active_batches(dev->rwb);
	size_t band_length, rwb_free_space, pad_length;

	num_active = num_active ? num_active : dev->xfer_size;
	if (size >= num_active) {
@@ -712,12 +783,7 @@ ftl_wptr_process_shutdown(struct ftl_wptr *wptr)
		ftl_remove_free_bands(dev);
	}

	band_length = ftl_wptr_user_lbks_left(wptr);
	rwb_free_space = ftl_rwb_size(dev->rwb) - size;
	pad_length = spdk_min(band_length, rwb_free_space);

	/* Pad write buffer until band is full */
	ftl_rwb_pad(dev, pad_length);
	ftl_wptr_pad_band(wptr);
}

static int
@@ -1510,6 +1576,10 @@ ftl_wptr_process_writes(struct ftl_wptr *wptr)
		ftl_wptr_process_shutdown(wptr);
	}

	if (spdk_unlikely(wptr->flush)) {
		ftl_wptr_pad_band(wptr);
	}

	batch = ftl_rwb_pop(dev->rwb);
	if (!batch) {
		/* If there are queued flush requests we need to pad the RWB to */
+4 −0
Original line number Diff line number Diff line
@@ -59,6 +59,7 @@ struct ftl_wptr;
struct ftl_flush;
struct ftl_reloc;
struct ftl_anm_event;
struct ftl_band_flush;

struct ftl_stats {
	/* Number of writes scheduled directly by the user */
@@ -221,6 +222,8 @@ struct spdk_ftl_dev {

	/* Flush list */
	LIST_HEAD(, ftl_flush)			flush_list;
	/* List of band flush requests */
	LIST_HEAD(, ftl_band_flush)		band_flush_list;

	/* Device specific md buffer */
	struct ftl_global_md			global_md;
@@ -290,6 +293,7 @@ int ftl_retrieve_chunk_info(struct spdk_ftl_dev *dev, struct ftl_ppa ppa,
				struct spdk_ocssd_chunk_information_entry *info,
				unsigned int num_entries);
bool	ftl_ppa_is_written(struct ftl_band *band, struct ftl_ppa ppa);
int	ftl_flush_active_bands(struct spdk_ftl_dev *dev, spdk_ftl_fn cb_fn, void *cb_arg);

#define ftl_to_ppa(addr) \
	(struct ftl_ppa) { .ppa = (uint64_t)(addr) }
+1 −0
Original line number Diff line number Diff line
@@ -648,6 +648,7 @@ ftl_init_wptr_list(struct spdk_ftl_dev *dev)
{
	LIST_INIT(&dev->wptr_list);
	LIST_INIT(&dev->flush_list);
	LIST_INIT(&dev->band_flush_list);
}

static size_t