Commit 8d8208d6 authored by Shuhei Matsumoto's avatar Shuhei Matsumoto Committed by Tomasz Zawadzki
Browse files

bdev/nvme: Clear caching io_path when removing io_path dynamically



User can remove io_path dynamically while processing I/O.

A to-be-retried I/O should clear io_path caching and get another io_path
from scratch for retry.

Verify this by adding a unit test case.

Signed-off-by: default avatarShuhei Matsumoto <smatsumoto@nvidia.com>
Change-Id: I891aafbb132c3beaef5cd4f55c9b4fde21aeaae9
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/17120


Community-CI: Mellanox Build Bot
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
Reviewed-by: default avatarAleksey Marchuk <alexeymar@nvidia.com>
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
parent f9d132a8
Loading
Loading
Loading
Loading
+16 −0
Original line number Diff line number Diff line
@@ -648,6 +648,21 @@ _bdev_nvme_add_io_path(struct nvme_bdev_channel *nbdev_ch, struct nvme_ns *nvme_
	return 0;
}

static void
bdev_nvme_clear_retry_io_path(struct nvme_bdev_channel *nbdev_ch,
			      struct nvme_io_path *io_path)
{
	struct spdk_bdev_io *bdev_io;
	struct nvme_bdev_io *bio;

	TAILQ_FOREACH(bdev_io, &nbdev_ch->retry_io_list, module_link) {
		bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
		if (bio->io_path == io_path) {
			bio->io_path = NULL;
		}
	}
}

static void
_bdev_nvme_delete_io_path(struct nvme_bdev_channel *nbdev_ch, struct nvme_io_path *io_path)
{
@@ -666,6 +681,7 @@ _bdev_nvme_delete_io_path(struct nvme_bdev_channel *nbdev_ch, struct nvme_io_pat
	pthread_mutex_unlock(&nbdev->mutex);

	bdev_nvme_clear_current_io_path(nbdev_ch);
	bdev_nvme_clear_retry_io_path(nbdev_ch, io_path);

	STAILQ_REMOVE(&nbdev_ch->io_path_list, io_path, nvme_io_path, stailq);
	io_path->nbdev_ch = NULL;
+43 −3
Original line number Diff line number Diff line
@@ -6432,7 +6432,7 @@ test_retry_io_to_same_path(void)
	SPDK_CU_ASSERT_FATAL(req != NULL);

	/* Set retry count to non-zero. */
	g_opts.bdev_retry_count = 1;
	g_opts.bdev_retry_count = 2;

	/* Inject an I/O error. */
	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
@@ -6456,12 +6456,52 @@ test_retry_io_to_same_path(void)
	CU_ASSERT(bio->io_path == io_path2);
	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1);

	req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio);
	SPDK_CU_ASSERT_FATAL(req != NULL);

	/* Inject an I/O error again. */
	req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
	req->cpl.status.sct = SPDK_NVME_SCT_GENERIC;
	req->cpl.status.crd = 1;

	ctrlr2->cdata.crdt[1] = 1;

	/* The 2nd I/O should be queued to nbdev_ch. */
	spdk_delay_us(1);
	poll_threads();
	poll_thread_times(0, 1);

	CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0);
	CU_ASSERT(bdev_io->internal.in_submit_request == true);
	CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list));

	/* The 2nd I/O should keep caching io_path2. */
	CU_ASSERT(bio->io_path == io_path2);

	/* Detach ctrlr2 dynamically. */
	rc = bdev_nvme_delete("nvme0", &path2);
	CU_ASSERT(rc == 0);

	spdk_delay_us(1000);
	poll_threads();
	spdk_delay_us(1000);
	poll_threads();
	spdk_delay_us(1000);
	poll_threads();
	spdk_delay_us(1000);
	poll_threads();

	CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL);

	poll_threads();
	spdk_delay_us(100000);
	poll_threads();
	spdk_delay_us(1);
	poll_threads();

	/* The 2nd I/O should succeed by io_path1. */
	CU_ASSERT(bdev_io->internal.in_submit_request == false);
	CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
	CU_ASSERT(bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS);
	CU_ASSERT(bio->io_path == io_path1);

	free(bdev_io);