Commit 8346b573 authored by Shuhei Matsumoto's avatar Shuhei Matsumoto Committed by Jim Harris
Browse files

bdev/nvme: Unify _bdev_nvme_find_io_path() and bdev_nvme_find_next_io_path()



Unify _bdev_nvme_find_io_path() and bdev_nvme_find_next_io_path()
into _bdev_nvme_find_io_path() by modifying nvme_io_path_get_next().

For active/passive policy, _bdev_nvme_find_io_path() is called only if
nbdev_ch->current_io_path is NULL. Hence, the prev parameter is not
necessary anymore.

Signed-off-by: default avatarShuhei Matsumoto <smatsumoto@nvidia.com>
Change-Id: If61b8a24b768a1d571c0033b91d9d9bd487b5cf7
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/16189


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarAleksey Marchuk <alexeymar@nvidia.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
Reviewed-by: default avatarRichael <richael.zhuang@arm.com>
parent 7baa78c8
Loading
Loading
Loading
Loading
+15 −41
Original line number Diff line number Diff line
@@ -826,21 +826,22 @@ nvme_io_path_get_next(struct nvme_bdev_channel *nbdev_ch, struct nvme_io_path *p
{
	struct nvme_io_path *next_path;

	if (prev_path != NULL) {
		next_path = STAILQ_NEXT(prev_path, stailq);
		if (next_path != NULL) {
			return next_path;
	} else {
		return STAILQ_FIRST(&nbdev_ch->io_path_list);
		}
	}

	return STAILQ_FIRST(&nbdev_ch->io_path_list);
}

static struct nvme_io_path *
bdev_nvme_find_next_io_path(struct nvme_bdev_channel *nbdev_ch,
			    struct nvme_io_path *prev)
_bdev_nvme_find_io_path(struct nvme_bdev_channel *nbdev_ch)
{
	struct nvme_io_path *io_path, *start, *non_optimized = NULL;

	start = nvme_io_path_get_next(nbdev_ch, prev);
	start = nvme_io_path_get_next(nbdev_ch, nbdev_ch->current_io_path);

	io_path = start;
	do {
@@ -862,39 +863,12 @@ bdev_nvme_find_next_io_path(struct nvme_bdev_channel *nbdev_ch,
		io_path = nvme_io_path_get_next(nbdev_ch, io_path);
	} while (io_path != start);

	if (nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE) {
		/* We come here only if there is no optimized path. Cache even non_optimized
		 * path for load balance across multiple non_optimized paths.
		 */
		nbdev_ch->current_io_path = non_optimized;
	return non_optimized;
}

static struct nvme_io_path *
_bdev_nvme_find_io_path(struct nvme_bdev_channel *nbdev_ch)
{
	struct nvme_io_path *io_path, *start, *non_optimized = NULL;

	start = STAILQ_FIRST(&nbdev_ch->io_path_list);

	io_path = start;
	do {
		if (spdk_likely(nvme_io_path_is_connected(io_path) &&
				!io_path->nvme_ns->ana_state_updating)) {
			switch (io_path->nvme_ns->ana_state) {
			case SPDK_NVME_ANA_OPTIMIZED_STATE:
				nbdev_ch->current_io_path = io_path;
				return io_path;
			case SPDK_NVME_ANA_NON_OPTIMIZED_STATE:
				if (non_optimized == NULL) {
					non_optimized = io_path;
	}
				break;
			default:
				break;
			}
		}
		io_path = nvme_io_path_get_next(nbdev_ch, io_path);
	} while (io_path != start);

	return non_optimized;
}
@@ -909,7 +883,7 @@ bdev_nvme_find_io_path(struct nvme_bdev_channel *nbdev_ch)
	if (spdk_likely(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE)) {
		return nbdev_ch->current_io_path;
	} else {
		return bdev_nvme_find_next_io_path(nbdev_ch, nbdev_ch->current_io_path);
		return _bdev_nvme_find_io_path(nbdev_ch);
	}
}