Commit a3c78e15 authored by Mateusz Kozlowski's avatar Mateusz Kozlowski Committed by Darek Stojaczyk
Browse files

lib/ftl: Remove separate dma_buf allocation in band



Since 4k alignment is no longer required for I/O buffers, the
band doesn't need a separate lba map and dma buffer and can use the
same memory location.

Signed-off-by: default avatarMateusz Kozlowski <mateusz.kozlowski@intel.com>
Change-Id: Iea127e8c2f39e6de5d57258098b2dc6be56f439f
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/462042


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarKonrad Sztyber <konrad.sztyber@intel.com>
Reviewed-by: default avatarWojciech Malikowski <wojciech.malikowski@intel.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-by: default avatarDarek Stojaczyk <dariusz.stojaczyk@intel.com>
parent a4b34812
Loading
Loading
Loading
Loading
+17 −36
Original line number Diff line number Diff line
@@ -164,9 +164,7 @@ ftl_band_free_lba_map(struct ftl_band *band)
		assert(ftl_band_validate_md(band) == true);
	}

	memset(lba_map->map, 0, ftl_lba_map_pool_elem_size(band->dev));
	spdk_mempool_put(dev->lba_pool, lba_map->map);
	spdk_dma_free(lba_map->dma_buf);
	spdk_mempool_put(dev->lba_pool, lba_map->dma_buf);
	lba_map->map = NULL;
	lba_map->dma_buf = NULL;
}
@@ -292,22 +290,18 @@ ftl_pack_tail_md(struct ftl_band *band)
	struct spdk_ftl_dev *dev = band->dev;
	struct ftl_lba_map *lba_map = &band->lba_map;
	struct ftl_tail_md *tail = lba_map->dma_buf;
	size_t map_size;
	void *vld_offset, *map_offset;
	void *vld_offset;

	map_size = ftl_num_band_lbks(dev) * sizeof(uint64_t);
	vld_offset = (char *)tail + ftl_tail_md_hdr_num_lbks() * FTL_BLOCK_SIZE;
	map_offset = (char *)vld_offset + ftl_vld_map_num_lbks(dev) * FTL_BLOCK_SIZE;

	/* Clear out the buffer */
	memset(tail, 0, ftl_tail_md_num_lbks(dev) * FTL_BLOCK_SIZE);
	memset(tail, 0, ftl_tail_md_hdr_num_lbks() * FTL_BLOCK_SIZE);
	tail->num_lbks = ftl_num_band_lbks(dev);

	pthread_spin_lock(&lba_map->lock);
	spdk_bit_array_store_mask(lba_map->vld, vld_offset);
	pthread_spin_unlock(&lba_map->lock);

	memcpy(map_offset, lba_map->map, map_size);
	ftl_set_md_hdr(band, &tail->hdr, ftl_tail_md_num_lbks(dev) * FTL_BLOCK_SIZE);

	return FTL_MD_SUCCESS;
@@ -335,15 +329,12 @@ static int
ftl_unpack_tail_md(struct ftl_band *band)
{
	struct spdk_ftl_dev *dev = band->dev;
	size_t map_size;
	void *vld_offset, *map_offset;
	void *vld_offset;
	struct ftl_lba_map *lba_map = &band->lba_map;
	struct ftl_tail_md *tail = lba_map->dma_buf;
	int rc;

	map_size = ftl_num_band_lbks(dev) * sizeof(uint64_t);
	vld_offset = (char *)tail + ftl_tail_md_hdr_num_lbks() * FTL_BLOCK_SIZE;
	map_offset = (char *)vld_offset + ftl_vld_map_num_lbks(dev) * FTL_BLOCK_SIZE;

	rc = ftl_md_hdr_vld(dev, &tail->hdr, ftl_tail_md_num_lbks(dev) * FTL_BLOCK_SIZE);
	if (rc) {
@@ -364,13 +355,7 @@ ftl_unpack_tail_md(struct ftl_band *band)
		return FTL_MD_INVALID_SIZE;
	}

	if (lba_map->vld) {
	spdk_bit_array_load_mask(lba_map->vld, vld_offset);
	}

	if (lba_map->map) {
		memcpy(lba_map->map, map_offset, map_size);
	}

	return FTL_MD_SUCCESS;
}
@@ -691,19 +676,19 @@ ftl_band_alloc_lba_map(struct ftl_band *band)
	assert(lba_map->ref_cnt == 0);
	assert(lba_map->map == NULL);

	lba_map->map = spdk_mempool_get(dev->lba_pool);
	if (!lba_map->map) {
		return -1;
	}

	lba_map->segments = (char *)lba_map->map + ftl_lba_map_num_lbks(dev) * FTL_BLOCK_SIZE;
	lba_map->dma_buf = spdk_mempool_get(dev->lba_pool);

	lba_map->dma_buf = spdk_dma_zmalloc(ftl_tail_md_num_lbks(dev) * FTL_BLOCK_SIZE, 0, NULL);
	if (!lba_map->dma_buf) {
		spdk_mempool_put(dev->lba_pool, lba_map->map);
		return -1;
	}

	memset(lba_map->dma_buf, 0, ftl_lba_map_pool_elem_size(band->dev));

	lba_map->map = (uint64_t *)((char *)lba_map->dma_buf + FTL_BLOCK_SIZE *
				    (ftl_tail_md_hdr_num_lbks() + ftl_vld_map_num_lbks(dev)));

	lba_map->segments = (char *)lba_map->dma_buf + ftl_tail_md_num_lbks(dev) * FTL_BLOCK_SIZE;

	ftl_band_acquire_lba_map(band);
	return 0;
}
@@ -942,10 +927,6 @@ ftl_read_lba_map_cb(struct ftl_io *io, void *arg, int status)
	assert(lbk_off + io->lbk_cnt <= ftl_lba_map_num_lbks(io->dev));

	if (!status) {
		memcpy((char *)lba_map->map + lbk_off * FTL_BLOCK_SIZE,
		       io->iov[0].iov_base,
		       io->lbk_cnt * FTL_BLOCK_SIZE);

		ftl_lba_map_set_segment_state(lba_map, lbk_off, io->lbk_cnt,
					      FTL_LBA_MAP_SEG_CACHED);
	}
@@ -1027,7 +1008,7 @@ ftl_band_read_lba_map(struct ftl_band *band, size_t offset, size_t lba_cnt,
					      FTL_LBA_MAP_SEG_PENDING);

		rc = ftl_band_read_md(band, num_read, ftl_band_lba_map_ppa(band, lbk_off),
				      (char *)band->lba_map.dma_buf + lbk_off * FTL_BLOCK_SIZE,
				      (char *)band->lba_map.map + lbk_off * FTL_BLOCK_SIZE,
				      ftl_read_lba_map_cb, NULL, cb_fn, cb_ctx);
		if (rc) {
			ftl_lba_map_request_free(band->dev, request);
@@ -1184,7 +1165,7 @@ ftl_band_clear_lba_map(struct ftl_band *band)
	size_t num_segments;

	spdk_bit_array_clear_mask(lba_map->vld);
	memset(lba_map->map, 0, ftl_lba_map_pool_elem_size(band->dev));
	memset(lba_map->map, 0, ftl_lba_map_num_lbks(band->dev) * FTL_BLOCK_SIZE);

	/* For open band all lba map segments are already cached */
	assert(band->state == FTL_BAND_STATE_PREP);
@@ -1197,7 +1178,7 @@ ftl_band_clear_lba_map(struct ftl_band *band)
size_t
ftl_lba_map_pool_elem_size(struct spdk_ftl_dev *dev)
{
	/* lba map pool element has size capable to store lba map + segments map */
	return ftl_lba_map_num_lbks(dev) * FTL_BLOCK_SIZE +
	/* Map pool element holds the whole tail md + segments map */
	return ftl_tail_md_num_lbks(dev) * FTL_BLOCK_SIZE +
	       spdk_divide_round_up(ftl_num_band_lbks(dev), FTL_NUM_LBA_IN_BLOCK);
}
+12 −21
Original line number Diff line number Diff line
@@ -157,7 +157,6 @@ ftl_restore_free(struct ftl_restore *restore)
	}

	spdk_dma_free(restore->md_buf);
	free(restore->lba_map);
	free(restore->bands);
	free(restore);
}
@@ -167,7 +166,7 @@ ftl_restore_init(struct spdk_ftl_dev *dev, ftl_restore_fn cb)
{
	struct ftl_restore *restore;
	struct ftl_restore_band *rband;
	size_t i, md_size;
	size_t i;

	restore = calloc(1, sizeof(*restore));
	if (!restore) {
@@ -192,20 +191,13 @@ ftl_restore_init(struct spdk_ftl_dev *dev, ftl_restore_fn cb)
		rband->md_status = FTL_MD_NO_MD;
	}

	/* Allocate buffer capable of holding either tail md or head mds of all bands */
	md_size = spdk_max(ftl_dev_num_bands(dev) * ftl_head_md_num_lbks(dev) * FTL_BLOCK_SIZE,
			   ftl_tail_md_num_lbks(dev) * FTL_BLOCK_SIZE);

	restore->md_buf = spdk_dma_zmalloc(md_size, 0, NULL);
	/* Allocate buffer capable of holding head mds of all bands */
	restore->md_buf = spdk_dma_zmalloc(ftl_dev_num_bands(dev) * ftl_head_md_num_lbks(dev) *
					   FTL_BLOCK_SIZE, 0, NULL);
	if (!restore->md_buf) {
		goto error;
	}

	restore->lba_map = calloc(ftl_num_band_lbks(dev), sizeof(uint64_t));
	if (!restore->lba_map) {
		goto error;
	}

	return restore;
error:
	ftl_restore_free(restore);
@@ -1215,6 +1207,7 @@ ftl_restore_tail_md_cb(struct ftl_io *io, void *ctx, int status)
		if (!dev->conf.allow_open_bands) {
			SPDK_ERRLOG("%s while restoring tail md in band %u.\n",
				    spdk_strerror(-status), rband->band->id);
			ftl_band_release_lba_map(rband->band);
			ftl_restore_complete(restore, status);
			return;
		} else {
@@ -1226,16 +1219,11 @@ ftl_restore_tail_md_cb(struct ftl_io *io, void *ctx, int status)
	}

	if (!status && ftl_restore_l2p(rband->band)) {
		ftl_band_release_lba_map(rband->band);
		ftl_restore_complete(restore, -ENOTRECOVERABLE);
		return;
	}

	/*
	 * The LBA map for bands is assigned from ftl_restore->lba_map and needs to be set to NULL
	 * before successful restore, otherwise ftl_band_alloc_lba_map will fail after
	 * initialization finalizes.
	 */
	rband->band->lba_map.map = NULL;
	ftl_band_release_lba_map(rband->band);

	rband = ftl_restore_next_band(restore);
	if (!rband) {
@@ -1258,8 +1246,11 @@ ftl_restore_tail_md(struct ftl_restore_band *rband)
	struct ftl_restore *restore = rband->parent;
	struct ftl_band *band = rband->band;

	band->lba_map.map = restore->lba_map;
	band->lba_map.dma_buf = restore->md_buf;
	if (ftl_band_alloc_lba_map(band)) {
		SPDK_ERRLOG("Failed to allocate lba map\n");
		ftl_restore_complete(restore, -ENOMEM);
		return -ENOMEM;
	}

	if (ftl_band_read_tail_md(band, band->tail_md_ppa, ftl_restore_tail_md_cb, rband)) {
		SPDK_ERRLOG("Failed to send tail metadata read\n");
+7 −2
Original line number Diff line number Diff line
@@ -65,6 +65,11 @@ test_init_ftl_dev(const struct spdk_ocssd_geometry_data *geo,
	dev->punits = calloc(ftl_dev_num_punits(dev), sizeof(*dev->punits));
	SPDK_CU_ASSERT_FATAL(dev->punits != NULL);

	dev->lba_pool = spdk_mempool_create("ftl_ut", 2, 0x18000,
					    SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
					    SPDK_ENV_SOCKET_ID_ANY);
	SPDK_CU_ASSERT_FATAL(dev->lba_pool != NULL);

	for (size_t i = 0; i < ftl_dev_num_punits(dev); ++i) {
		punit = range->begin + i;
		dev->punits[i].dev = dev;
@@ -126,6 +131,7 @@ test_free_ftl_dev(struct spdk_ftl_dev *dev)
	spdk_set_thread(dev->core_thread.thread);
	spdk_thread_exit(dev->core_thread.thread);
	spdk_thread_destroy(dev->core_thread.thread);
	spdk_mempool_free(dev->lba_pool);
	free(dev->punits);
	free(dev->bands);
	free(dev);
@@ -138,8 +144,7 @@ test_free_ftl_band(struct ftl_band *band)
	spdk_bit_array_free(&band->lba_map.vld);
	spdk_bit_array_free(&band->reloc_bitmap);
	free(band->chunk_buf);
	free(band->lba_map.map);
	spdk_free(band->lba_map.dma_buf);
	spdk_dma_free(band->lba_map.dma_buf);
}

uint64_t