Commit 7b8e7212 authored by Shuhei Matsumoto's avatar Shuhei Matsumoto Committed by Tomasz Zawadzki
Browse files

bdev/nvme: Abort the queued I/O for retry



The NVMe bdev module queues retried I/Os itself now.
bdev_nvme_abort() needs to check and abort the target I/O if it
is queued for retry.

This change will cover admin passthrough requests too because they
will be queued on the same thread as their callers and the public
API spdk_bdev_reset() requires to be submitted on the same thread
as the target I/O or admin passthrough requests.

Signed-off-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: If37e8188bd3875805cef436437439220698124b9
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/9913


Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-by: default avatarAleksey Marchuk <alexeymar@mellanox.com>
parent 22fa0c01
Loading
Loading
Loading
Loading
+14 −3
Original line number Diff line number Diff line
@@ -4687,14 +4687,25 @@ static void
bdev_nvme_abort(struct nvme_bdev_channel *nbdev_ch, struct nvme_bdev_io *bio,
		struct nvme_bdev_io *bio_to_abort)
{
	struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(bio);
	struct spdk_bdev_io *bdev_io_to_abort;
	struct nvme_io_path *io_path;
	struct nvme_ctrlr *nvme_ctrlr;
	int rc = 0;

	bio->io_path = NULL;

	bio->orig_thread = spdk_get_thread();

	/* Traverse the retry_io_list first. */
	TAILQ_FOREACH(bdev_io_to_abort, &nbdev_ch->retry_io_list, module_link) {
		if ((struct nvme_bdev_io *)bdev_io_to_abort->driver_ctx == bio_to_abort) {
			TAILQ_REMOVE(&nbdev_ch->retry_io_list, bdev_io_to_abort, module_link);
			spdk_bdev_io_complete(bdev_io_to_abort, SPDK_BDEV_IO_STATUS_ABORTED);

			spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
			return;
		}
	}

	/* Even admin commands, they were submitted to only nvme_ctrlrs which were
	 * on any io_path. So traverse the io_path list for not only I/O commands
	 * but also admin commands.
@@ -4725,7 +4736,7 @@ bdev_nvme_abort(struct nvme_bdev_channel *nbdev_ch, struct nvme_bdev_io *bio,
		/* If no command was found or there was any error, complete the abort
		 * request with failure.
		 */
		spdk_bdev_io_complete(spdk_bdev_io_from_ctx(bio), SPDK_BDEV_IO_STATUS_FAILED);
		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
	}
}

+31 −0
Original line number Diff line number Diff line
@@ -2445,6 +2445,37 @@ test_abort(void)

	set_thread(0);

	/* If qpair is disconnected, it is freed and then reconnected via resetting
	 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted
	 * while resetting the nvme_ctrlr.
	 */
	ctrlr_ch1->qpair->is_connected = false;

	poll_thread_times(0, 3);

	CU_ASSERT(ctrlr_ch1->qpair == NULL);
	CU_ASSERT(nvme_ctrlr->resetting == true);

	write_io->internal.in_submit_request = true;

	bdev_nvme_submit_request(ch1, write_io);

	CU_ASSERT(write_io->internal.in_submit_request == true);
	CU_ASSERT(write_io == TAILQ_FIRST(&nbdev_ch1->retry_io_list));

	/* Aborting the queued write request should succeed immediately. */
	abort_io->internal.ch = (struct spdk_bdev_channel *)ch1;
	abort_io->u.abort.bio_to_abort = write_io;
	abort_io->internal.in_submit_request = true;

	bdev_nvme_submit_request(ch1, abort_io);

	CU_ASSERT(abort_io->internal.in_submit_request == false);
	CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
	CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0);
	CU_ASSERT(write_io->internal.in_submit_request == false);
	CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED);

	spdk_put_io_channel(ch1);

	set_thread(1);