Commit 60d80d59 authored by Shuhei Matsumoto's avatar Shuhei Matsumoto Committed by Tomasz Zawadzki
Browse files

bdev/nvme: Fix pending resets to move to next ctrlr



Handling pending reset requests is per ctrlr. If multipath is configured,
reset requests from the upper layer should reset all ctrlrs in a bdev ctrlr.
Based on this, pending reset requests should move to the next ctrlr
rather than return to the upper layer. However, implementation was
different. Fix this bug in this patch.

Signed-off-by: default avatarShuhei Matsumoto <smatsumoto@nvidia.com>
Change-Id: I99fa606ff83ef32dc2eb04518f452029bbd9f443
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/21603


Reviewed-by: default avatarAleksey Marchuk <alexeymar@nvidia.com>
Reviewed-by: default avatarJim Harris <jim.harris@samsung.com>
Community-CI: Mellanox Build Bot
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
parent 2e6e9553
Loading
Loading
Loading
Loading
+18 −10
Original line number Diff line number Diff line
@@ -1793,22 +1793,25 @@ err:
	return rc;
}

static void bdev_nvme_reset_io_continue(void *cb_arg, int rc);

static void
bdev_nvme_complete_pending_resets(struct spdk_io_channel_iter *i)
{
	struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
	struct nvme_ctrlr_channel *ctrlr_ch = spdk_io_channel_get_ctx(_ch);
	enum spdk_bdev_io_status status = SPDK_BDEV_IO_STATUS_SUCCESS;
	int rc = 0;
	struct nvme_bdev_io *bio;

	if (spdk_io_channel_iter_get_ctx(i) != NULL) {
		status = SPDK_BDEV_IO_STATUS_FAILED;
		rc = -1;
	}

	while (!TAILQ_EMPTY(&ctrlr_ch->pending_resets)) {
		bio = TAILQ_FIRST(&ctrlr_ch->pending_resets);
		TAILQ_REMOVE(&ctrlr_ch->pending_resets, bio, retry_link);
		__bdev_nvme_io_complete(spdk_bdev_io_from_ctx(bio), status, NULL);

		bdev_nvme_reset_io_continue(bio, rc);
	}

	spdk_for_each_channel_continue(i, 0);
@@ -2813,10 +2816,14 @@ _bdev_nvme_reset_io(struct nvme_io_path *io_path, struct nvme_bdev_io *bio)

	rc = nvme_ctrlr_op(io_path->qpair->ctrlr, NVME_CTRLR_OP_RESET,
			   bdev_nvme_reset_io_continue, bio);
	if (rc == 0) {
	if (rc != 0 && rc != -EBUSY) {
		return rc;
	}

	assert(bio->io_path == NULL);
	bio->io_path = io_path;
	} else if (rc == -EBUSY) {

	if (rc == -EBUSY) {
		ctrlr_ch = io_path->qpair->ctrlr_ch;
		assert(ctrlr_ch != NULL);
		/*
@@ -2825,10 +2832,9 @@ _bdev_nvme_reset_io(struct nvme_io_path *io_path, struct nvme_bdev_io *bio)
		 * upper level. If they are in the middle of a reset, we won't try to schedule another one.
		 */
		TAILQ_INSERT_TAIL(&ctrlr_ch->pending_resets, bio, retry_link);
		rc = 0;
	}

	return rc;
	return 0;
}

static void
@@ -2846,7 +2852,9 @@ bdev_nvme_reset_io(struct nvme_bdev_channel *nbdev_ch, struct nvme_bdev_io *bio)
	rc = _bdev_nvme_reset_io(io_path, bio);
	if (rc != 0) {
		/* If the current nvme_ctrlr is disabled, skip it and move to the next nvme_ctrlr. */
		bdev_nvme_reset_io_continue(bio, rc == -EALREADY);
		rc = (rc == -EALREADY) ? 0 : rc;

		bdev_nvme_reset_io_continue(bio, rc);
	}
}