Commit fdc4c13e authored by Shuhei Matsumoto's avatar Shuhei Matsumoto Committed by Tomasz Zawadzki
Browse files

bdev/nvme: Rename nvme_io_channel by nvme_io_path



By the next patch, nvme_io_channel will be used as an I/O channel
to a single nvme_bdev. This channel is created to a single
nvme_bdev_ctrlr and has a corresponding I/O qpair. nvme_io_path
will be a better name especially when we support multipath.

Signed-off-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: Ic34162f3c383676c5249396a09173329fc6febce
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/8095


Reviewed-by: default avatarZiye Yang <ziye.yang@intel.com>
Reviewed-by: default avatarAleksey Marchuk <alexeymar@mellanox.com>
Reviewed-by: default avatar <dongx.yi@intel.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Community-CI: Mellanox Build Bot
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
parent 5863f95a
Loading
Loading
Loading
Loading
+76 −74
Original line number Diff line number Diff line
@@ -176,7 +176,7 @@ static int bdev_nvme_get_zone_info(struct spdk_nvme_ns *ns, struct spdk_nvme_qpa
static int bdev_nvme_zone_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
				     struct nvme_bdev_io *bio, uint64_t zone_id,
				     enum spdk_bdev_zone_action action);
static int bdev_nvme_admin_passthru(struct nvme_io_channel *nvme_ch,
static int bdev_nvme_admin_passthru(struct nvme_io_path *io_path,
				    struct nvme_bdev_io *bio,
				    struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes);
static int bdev_nvme_io_passthru(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
@@ -185,9 +185,9 @@ static int bdev_nvme_io_passthru(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair
static int bdev_nvme_io_passthru_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
				    struct nvme_bdev_io *bio,
				    struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes, void *md_buf, size_t md_len);
static int bdev_nvme_abort(struct nvme_io_channel *nvme_ch,
static int bdev_nvme_abort(struct nvme_io_path *io_path,
			   struct nvme_bdev_io *bio, struct nvme_bdev_io *bio_to_abort);
static int bdev_nvme_reset(struct nvme_io_channel *nvme_ch, struct spdk_bdev_io *bdev_io);
static int bdev_nvme_reset(struct nvme_io_path *io_path, struct spdk_bdev_io *bdev_io);
static int bdev_nvme_failover(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, bool remove);
static void remove_cb(void *cb_ctx, struct spdk_nvme_ctrlr *ctrlr);

@@ -223,15 +223,15 @@ static config_json_namespace_fn g_config_json_namespace_fn[] = {
};

struct spdk_nvme_qpair *
bdev_nvme_get_io_qpair(struct spdk_io_channel *ctrlr_io_ch)
bdev_nvme_get_io_qpair(struct spdk_io_channel *io_path_ch)
{
	struct nvme_io_channel *nvme_ch;
	struct nvme_io_path *io_path;

	assert(ctrlr_io_ch != NULL);
	assert(io_path_ch != NULL);

	nvme_ch = spdk_io_channel_get_ctx(ctrlr_io_ch);
	io_path = spdk_io_channel_get_ctx(io_path_ch);

	return nvme_ch->qpair;
	return io_path->qpair;
}

static int
@@ -368,9 +368,9 @@ bdev_nvme_flush(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
}

static int
bdev_nvme_create_qpair(struct nvme_io_channel *nvme_ch)
bdev_nvme_create_qpair(struct nvme_io_path *io_path)
{
	struct spdk_nvme_ctrlr *ctrlr = nvme_ch->ctrlr->ctrlr;
	struct spdk_nvme_ctrlr *ctrlr = io_path->ctrlr->ctrlr;
	struct spdk_nvme_io_qpair_opts opts;
	struct spdk_nvme_qpair *qpair;
	int rc;
@@ -386,9 +386,9 @@ bdev_nvme_create_qpair(struct nvme_io_channel *nvme_ch)
		return -1;
	}

	assert(nvme_ch->group != NULL);
	assert(io_path->group != NULL);

	rc = spdk_nvme_poll_group_add(nvme_ch->group->group, qpair);
	rc = spdk_nvme_poll_group_add(io_path->group->group, qpair);
	if (rc != 0) {
		SPDK_ERRLOG("Unable to begin polling on NVMe Channel.\n");
		goto err;
@@ -400,7 +400,7 @@ bdev_nvme_create_qpair(struct nvme_io_channel *nvme_ch)
		goto err;
	}

	nvme_ch->qpair = qpair;
	io_path->qpair = qpair;

	return 0;

@@ -411,17 +411,17 @@ err:
}

static int
bdev_nvme_destroy_qpair(struct nvme_io_channel *nvme_ch)
bdev_nvme_destroy_qpair(struct nvme_io_path *io_path)
{
	int rc;

	if (nvme_ch->qpair == NULL) {
	if (io_path->qpair == NULL) {
		return 0;
	}

	rc = spdk_nvme_ctrlr_free_io_qpair(nvme_ch->qpair);
	rc = spdk_nvme_ctrlr_free_io_qpair(io_path->qpair);
	if (!rc) {
		nvme_ch->qpair = NULL;
		io_path->qpair = NULL;
	}
	return rc;
}
@@ -450,14 +450,14 @@ bdev_nvme_check_pending_destruct(struct spdk_io_channel_iter *i, int status)
}

static void
_bdev_nvme_complete_pending_resets(struct nvme_io_channel *nvme_ch,
_bdev_nvme_complete_pending_resets(struct nvme_io_path *io_path,
				   enum spdk_bdev_io_status status)
{
	struct spdk_bdev_io *bdev_io;

	while (!TAILQ_EMPTY(&nvme_ch->pending_resets)) {
		bdev_io = TAILQ_FIRST(&nvme_ch->pending_resets);
		TAILQ_REMOVE(&nvme_ch->pending_resets, bdev_io, module_link);
	while (!TAILQ_EMPTY(&io_path->pending_resets)) {
		bdev_io = TAILQ_FIRST(&io_path->pending_resets);
		TAILQ_REMOVE(&io_path->pending_resets, bdev_io, module_link);
		spdk_bdev_io_complete(bdev_io, status);
	}
}
@@ -466,9 +466,9 @@ static void
bdev_nvme_complete_pending_resets(struct spdk_io_channel_iter *i)
{
	struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
	struct nvme_io_channel *nvme_ch = spdk_io_channel_get_ctx(_ch);
	struct nvme_io_path *io_path = spdk_io_channel_get_ctx(_ch);

	_bdev_nvme_complete_pending_resets(nvme_ch, SPDK_BDEV_IO_STATUS_SUCCESS);
	_bdev_nvme_complete_pending_resets(io_path, SPDK_BDEV_IO_STATUS_SUCCESS);

	spdk_for_each_channel_continue(i, 0);
}
@@ -477,9 +477,9 @@ static void
bdev_nvme_abort_pending_resets(struct spdk_io_channel_iter *i)
{
	struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
	struct nvme_io_channel *nvme_ch = spdk_io_channel_get_ctx(_ch);
	struct nvme_io_path *io_path = spdk_io_channel_get_ctx(_ch);

	_bdev_nvme_complete_pending_resets(nvme_ch, SPDK_BDEV_IO_STATUS_FAILED);
	_bdev_nvme_complete_pending_resets(io_path, SPDK_BDEV_IO_STATUS_FAILED);

	spdk_for_each_channel_continue(i, 0);
}
@@ -559,10 +559,10 @@ static void
_bdev_nvme_reset_create_qpair(struct spdk_io_channel_iter *i)
{
	struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
	struct nvme_io_channel *nvme_ch = spdk_io_channel_get_ctx(_ch);
	struct nvme_io_path *io_path = spdk_io_channel_get_ctx(_ch);
	int rc;

	rc = bdev_nvme_create_qpair(nvme_ch);
	rc = bdev_nvme_create_qpair(io_path);

	spdk_for_each_channel_continue(i, rc);
}
@@ -598,10 +598,10 @@ static void
_bdev_nvme_reset_destroy_qpair(struct spdk_io_channel_iter *i)
{
	struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
	struct nvme_io_channel *nvme_ch = spdk_io_channel_get_ctx(ch);
	struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch);
	int rc;

	rc = bdev_nvme_destroy_qpair(nvme_ch);
	rc = bdev_nvme_destroy_qpair(io_path);

	spdk_for_each_channel_continue(i, rc);
}
@@ -634,21 +634,21 @@ _bdev_nvme_reset(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr)
}

static int
bdev_nvme_reset(struct nvme_io_channel *nvme_ch, struct spdk_bdev_io *bdev_io)
bdev_nvme_reset(struct nvme_io_path *io_path, struct spdk_bdev_io *bdev_io)
{
	int rc;

	rc = _bdev_nvme_reset(nvme_ch->ctrlr);
	rc = _bdev_nvme_reset(io_path->ctrlr);
	if (rc == 0) {
		assert(nvme_ch->ctrlr->reset_bdev_io == NULL);
		nvme_ch->ctrlr->reset_bdev_io = bdev_io;
		assert(io_path->ctrlr->reset_bdev_io == NULL);
		io_path->ctrlr->reset_bdev_io = bdev_io;
	} else if (rc == -EAGAIN) {
		/*
		 * Reset call is queued only if it is from the app framework. This is on purpose so that
		 * we don't interfere with the app framework reset strategy. i.e. we are deferring to the
		 * upper level. If they are in the middle of a reset, we won't try to schedule another one.
		 */
		TAILQ_INSERT_TAIL(&nvme_ch->pending_resets, bdev_io, module_link);
		TAILQ_INSERT_TAIL(&io_path->pending_resets, bdev_io, module_link);
	} else {
		return rc;
	}
@@ -746,7 +746,7 @@ bdev_nvme_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
	struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx;
	struct spdk_bdev *bdev = bdev_io->bdev;
	struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev->ctxt;
	struct nvme_io_channel *nvme_ch = spdk_io_channel_get_ctx(ch);
	struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch);
	struct nvme_bdev_ns *nvme_ns;
	struct spdk_nvme_qpair *qpair;
	int ret;
@@ -756,7 +756,7 @@ bdev_nvme_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io,
		goto exit;
	}

	if (spdk_unlikely(!bdev_nvme_find_io_path(nbdev, nvme_ch, &nvme_ns, &qpair))) {
	if (spdk_unlikely(!bdev_nvme_find_io_path(nbdev, io_path, &nvme_ns, &qpair))) {
		ret = -ENXIO;
		goto exit;
	}
@@ -780,7 +780,7 @@ exit:
static void
bdev_nvme_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
{
	struct nvme_io_channel *nvme_ch = spdk_io_channel_get_ctx(ch);
	struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch);
	struct spdk_bdev *bdev = bdev_io->bdev;
	struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev->ctxt;
	struct nvme_bdev_io *nbdev_io = (struct nvme_bdev_io *)bdev_io->driver_ctx;
@@ -789,7 +789,7 @@ bdev_nvme_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_i
	struct spdk_nvme_qpair *qpair;
	int rc = 0;

	if (spdk_unlikely(!bdev_nvme_find_io_path(nbdev, nvme_ch, &nvme_ns, &qpair))) {
	if (spdk_unlikely(!bdev_nvme_find_io_path(nbdev, io_path, &nvme_ns, &qpair))) {
		rc = -ENXIO;
		goto exit;
	}
@@ -855,7 +855,7 @@ bdev_nvme_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_i
				     bdev_io->u.bdev.num_blocks);
		break;
	case SPDK_BDEV_IO_TYPE_RESET:
		rc = bdev_nvme_reset(nvme_ch, bdev_io);
		rc = bdev_nvme_reset(io_path, bdev_io);
		break;
	case SPDK_BDEV_IO_TYPE_FLUSH:
		rc = bdev_nvme_flush(nvme_ns->ns,
@@ -891,7 +891,7 @@ bdev_nvme_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_i
					       bdev_io->u.zone_mgmt.zone_action);
		break;
	case SPDK_BDEV_IO_TYPE_NVME_ADMIN:
		rc = bdev_nvme_admin_passthru(nvme_ch,
		rc = bdev_nvme_admin_passthru(io_path,
					      nbdev_io,
					      &bdev_io->u.nvme_passthru.cmd,
					      bdev_io->u.nvme_passthru.buf,
@@ -917,7 +917,7 @@ bdev_nvme_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_i
		break;
	case SPDK_BDEV_IO_TYPE_ABORT:
		nbdev_io_to_abort = (struct nvme_bdev_io *)bdev_io->u.abort.bio_to_abort->driver_ctx;
		rc = bdev_nvme_abort(nvme_ch,
		rc = bdev_nvme_abort(io_path,
				     nbdev_io,
				     nbdev_io_to_abort);
		break;
@@ -996,10 +996,10 @@ bdev_nvme_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type)
}

static int
bdev_nvme_create_cb(void *io_device, void *ctx_buf)
bdev_nvme_create_path_cb(void *io_device, void *ctx_buf)
{
	struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = io_device;
	struct nvme_io_channel *nvme_ch = ctx_buf;
	struct nvme_io_path *io_path = ctx_buf;
	struct spdk_io_channel *pg_ch;
	int rc;

@@ -1008,26 +1008,26 @@ bdev_nvme_create_cb(void *io_device, void *ctx_buf)
		return -1;
	}

	nvme_ch->group = spdk_io_channel_get_ctx(pg_ch);
	io_path->group = spdk_io_channel_get_ctx(pg_ch);

#ifdef SPDK_CONFIG_VTUNE
	nvme_ch->group->collect_spin_stat = true;
	io_path->group->collect_spin_stat = true;
#else
	nvme_ch->group->collect_spin_stat = false;
	io_path->group->collect_spin_stat = false;
#endif

	TAILQ_INIT(&nvme_ch->pending_resets);
	TAILQ_INIT(&io_path->pending_resets);

	if (spdk_nvme_ctrlr_is_ocssd_supported(nvme_bdev_ctrlr->ctrlr)) {
		rc = bdev_ocssd_create_io_channel(nvme_ch);
		rc = bdev_ocssd_create_io_channel(io_path);
		if (rc != 0) {
			goto err_ocssd_ch;
		}
	}

	nvme_ch->ctrlr = nvme_bdev_ctrlr;
	io_path->ctrlr = nvme_bdev_ctrlr;

	rc = bdev_nvme_create_qpair(nvme_ch);
	rc = bdev_nvme_create_qpair(io_path);
	if (rc != 0) {
		goto err_qpair;
	}
@@ -1035,8 +1035,8 @@ bdev_nvme_create_cb(void *io_device, void *ctx_buf)
	return 0;

err_qpair:
	if (nvme_ch->ocssd_ch) {
		bdev_ocssd_destroy_io_channel(nvme_ch);
	if (io_path->ocssd_ch) {
		bdev_ocssd_destroy_io_channel(io_path);
	}
err_ocssd_ch:
	spdk_put_io_channel(pg_ch);
@@ -1045,19 +1045,19 @@ err_ocssd_ch:
}

static void
bdev_nvme_destroy_cb(void *io_device, void *ctx_buf)
bdev_nvme_destroy_path_cb(void *io_device, void *ctx_buf)
{
	struct nvme_io_channel *nvme_ch = ctx_buf;
	struct nvme_io_path *io_path = ctx_buf;

	assert(nvme_ch->group != NULL);
	assert(io_path->group != NULL);

	if (nvme_ch->ocssd_ch != NULL) {
		bdev_ocssd_destroy_io_channel(nvme_ch);
	if (io_path->ocssd_ch != NULL) {
		bdev_ocssd_destroy_io_channel(io_path);
	}

	bdev_nvme_destroy_qpair(nvme_ch);
	bdev_nvme_destroy_qpair(io_path);

	spdk_put_io_channel(spdk_io_channel_from_ctx(nvme_ch->group));
	spdk_put_io_channel(spdk_io_channel_from_ctx(io_path->group));
}

static void
@@ -1294,8 +1294,8 @@ bdev_nvme_write_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *
static uint64_t
bdev_nvme_get_spin_time(struct spdk_io_channel *ch)
{
	struct nvme_io_channel *nvme_ch = spdk_io_channel_get_ctx(ch);
	struct nvme_bdev_poll_group *group = nvme_ch->group;
	struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch);
	struct nvme_bdev_poll_group *group = io_path->group;
	uint64_t spin_time;

	if (!group || !group->collect_spin_stat) {
@@ -1832,8 +1832,10 @@ _nvme_bdev_ctrlr_create(struct spdk_nvme_ctrlr *ctrlr,

	nvme_bdev_ctrlr->prchk_flags = prchk_flags;

	spdk_io_device_register(nvme_bdev_ctrlr, bdev_nvme_create_cb, bdev_nvme_destroy_cb,
				sizeof(struct nvme_io_channel),
	spdk_io_device_register(nvme_bdev_ctrlr,
				bdev_nvme_create_path_cb,
				bdev_nvme_destroy_path_cb,
				sizeof(struct nvme_io_path),
				name);

	nvme_bdev_ctrlr->adminq_timer_poller = SPDK_POLLER_REGISTER(bdev_nvme_poll_adminq, nvme_bdev_ctrlr,
@@ -2562,7 +2564,7 @@ bdev_nvme_readv_done(void *ref, const struct spdk_nvme_cpl *cpl)
	struct nvme_bdev_io *bio = ref;
	struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(bio);
	struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev_io->bdev->ctxt;
	struct nvme_io_channel *nvme_ch;
	struct nvme_io_path *io_path;
	struct nvme_bdev_ns *nvme_ns;
	struct spdk_nvme_qpair *qpair;
	int ret;
@@ -2574,9 +2576,9 @@ bdev_nvme_readv_done(void *ref, const struct spdk_nvme_cpl *cpl)
		/* Save completion status to use after verifying PI error. */
		bio->cpl = *cpl;

		nvme_ch = spdk_io_channel_get_ctx(spdk_bdev_io_get_io_channel(bdev_io));
		io_path = spdk_io_channel_get_ctx(spdk_bdev_io_get_io_channel(bdev_io));

		if (spdk_likely(bdev_nvme_find_io_path(nbdev, nvme_ch, &nvme_ns, &qpair))) {
		if (spdk_likely(bdev_nvme_find_io_path(nbdev, io_path, &nvme_ns, &qpair))) {
			/* Read without PI checking to verify PI error. */
			ret = bdev_nvme_no_pi_readv(nvme_ns->ns,
						    qpair,
@@ -2725,7 +2727,7 @@ bdev_nvme_get_zone_info_done(void *ref, const struct spdk_nvme_cpl *cpl)
	struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(bio);
	struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev_io->bdev->ctxt;
	struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io);
	struct nvme_io_channel *nvme_ch = spdk_io_channel_get_ctx(ch);
	struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch);
	uint64_t zone_id = bdev_io->u.zone_mgmt.zone_id;
	uint32_t zones_to_copy = bdev_io->u.zone_mgmt.num_zones;
	struct spdk_bdev_zone_info *info = bdev_io->u.zone_mgmt.buf;
@@ -2739,7 +2741,7 @@ bdev_nvme_get_zone_info_done(void *ref, const struct spdk_nvme_cpl *cpl)
		goto out_complete_io_nvme_cpl;
	}

	if (!bdev_nvme_find_io_path(nbdev, nvme_ch, &nvme_ns, &qpair)) {
	if (!bdev_nvme_find_io_path(nbdev, io_path, &nvme_ns, &qpair)) {
		ret = -ENXIO;
		goto out_complete_io_ret;
	}
@@ -3251,13 +3253,13 @@ bdev_nvme_zone_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair
}

static int
bdev_nvme_admin_passthru(struct nvme_io_channel *nvme_ch, struct nvme_bdev_io *bio,
bdev_nvme_admin_passthru(struct nvme_io_path *io_path, struct nvme_bdev_io *bio,
			 struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes)
{
	struct nvme_bdev_ctrlr *nvme_bdev_ctrlr;
	uint32_t max_xfer_size;

	if (!bdev_nvme_find_admin_path(nvme_ch, &nvme_bdev_ctrlr)) {
	if (!bdev_nvme_find_admin_path(io_path, &nvme_bdev_ctrlr)) {
		return -EINVAL;
	}

@@ -3327,22 +3329,22 @@ bdev_nvme_io_passthru_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
}

static int
bdev_nvme_abort(struct nvme_io_channel *nvme_ch, struct nvme_bdev_io *bio,
bdev_nvme_abort(struct nvme_io_path *io_path, struct nvme_bdev_io *bio,
		struct nvme_bdev_io *bio_to_abort)
{
	int rc;

	bio->orig_thread = spdk_get_thread();

	rc = spdk_nvme_ctrlr_cmd_abort_ext(nvme_ch->ctrlr->ctrlr,
					   nvme_ch->qpair,
	rc = spdk_nvme_ctrlr_cmd_abort_ext(io_path->ctrlr->ctrlr,
					   io_path->qpair,
					   bio_to_abort,
					   bdev_nvme_abort_done, bio);
	if (rc == -ENOENT) {
		/* If no command was found in I/O qpair, the target command may be
		 * admin command.
		 */
		rc = spdk_nvme_ctrlr_cmd_abort_ext(nvme_ch->ctrlr->ctrlr,
		rc = spdk_nvme_ctrlr_cmd_abort_ext(io_path->ctrlr->ctrlr,
						   NULL,
						   bio_to_abort,
						   bdev_nvme_abort_done, bio);
+1 −1
Original line number Diff line number Diff line
@@ -63,7 +63,7 @@ struct spdk_bdev_nvme_opts {
	bool delay_cmd_submit;
};

struct spdk_nvme_qpair *bdev_nvme_get_io_qpair(struct spdk_io_channel *ctrlr_io_ch);
struct spdk_nvme_qpair *bdev_nvme_get_io_qpair(struct spdk_io_channel *io_path_ch);
void bdev_nvme_get_opts(struct spdk_bdev_nvme_opts *opts);
int bdev_nvme_set_opts(const struct spdk_bdev_nvme_opts *opts);
int bdev_nvme_set_hotplug(bool enabled, uint64_t period_us, spdk_msg_fn cb, void *cb_ctx);
+17 −17
Original line number Diff line number Diff line
@@ -514,7 +514,7 @@ static void
bdev_ocssd_io_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success)
{
	struct ocssd_bdev *ocssd_bdev = (struct ocssd_bdev *)bdev_io->bdev->ctxt;
	struct nvme_io_channel *nvme_ch = spdk_io_channel_get_ctx(ch);
	struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch);
	struct nvme_bdev_ns *nvme_ns;
	struct spdk_nvme_qpair *qpair;
	int rc;
@@ -524,7 +524,7 @@ bdev_ocssd_io_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_i
		return;
	}

	if (spdk_unlikely(!bdev_nvme_find_io_path(&ocssd_bdev->nvme_bdev, nvme_ch,
	if (spdk_unlikely(!bdev_nvme_find_io_path(&ocssd_bdev->nvme_bdev, io_path,
			  &nvme_ns, &qpair))) {
		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
		return;
@@ -639,7 +639,7 @@ bdev_ocssd_zone_info_cb(void *ctx, const struct spdk_nvme_cpl *cpl)
	struct spdk_ocssd_chunk_information_entry *chunk_info = &ocdev_io->zone_info.chunk_info;
	struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(ctx);
	struct ocssd_bdev *ocssd_bdev = bdev_io->bdev->ctxt;
	struct nvme_io_channel *nvme_ch;
	struct nvme_io_path *io_path;
	struct nvme_bdev_ns *nvme_ns;
	struct spdk_nvme_qpair *qpair;
	struct bdev_ocssd_ns *ocssd_ns;
@@ -651,9 +651,9 @@ bdev_ocssd_zone_info_cb(void *ctx, const struct spdk_nvme_cpl *cpl)
		return;
	}

	nvme_ch = spdk_io_channel_get_ctx(spdk_bdev_io_get_io_channel(bdev_io));
	io_path = spdk_io_channel_get_ctx(spdk_bdev_io_get_io_channel(bdev_io));

	if (spdk_unlikely(!bdev_nvme_find_io_path(&ocssd_bdev->nvme_bdev, nvme_ch, &nvme_ns, &qpair))) {
	if (spdk_unlikely(!bdev_nvme_find_io_path(&ocssd_bdev->nvme_bdev, io_path, &nvme_ns, &qpair))) {
		spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED);
		return;
	}
@@ -741,14 +741,14 @@ static void bdev_ocssd_submit_request(struct spdk_io_channel *ch, struct spdk_bd
static int
bdev_ocssd_poll_pending(void *ctx)
{
	struct nvme_io_channel *nvme_ch = ctx;
	struct nvme_io_path *io_path = ctx;
	struct ocssd_io_channel *ocssd_ch;
	struct spdk_bdev_io *bdev_io;
	struct spdk_io_channel *ch;
	TAILQ_HEAD(, spdk_bdev_io) pending_requests;
	int num_requests = 0;

	ocssd_ch = nvme_ch->ocssd_ch;
	ocssd_ch = io_path->ocssd_ch;

	TAILQ_INIT(&pending_requests);
	TAILQ_SWAP(&ocssd_ch->pending_requests, &pending_requests, spdk_bdev_io, module_link);
@@ -770,8 +770,8 @@ bdev_ocssd_poll_pending(void *ctx)
static void
bdev_ocssd_delay_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
{
	struct nvme_io_channel *nvme_ch = spdk_io_channel_get_ctx(ch);
	struct ocssd_io_channel *ocssd_ch = nvme_ch->ocssd_ch;
	struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch);
	struct ocssd_io_channel *ocssd_ch = io_path->ocssd_ch;

	TAILQ_INSERT_TAIL(&ocssd_ch->pending_requests, bdev_io, module_link);
	spdk_poller_resume(ocssd_ch->pending_poller);
@@ -780,13 +780,13 @@ bdev_ocssd_delay_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_i
static int
_bdev_ocssd_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
{
	struct nvme_io_channel *nvme_ch = spdk_io_channel_get_ctx(ch);
	struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch);
	struct ocssd_bdev *ocssd_bdev = (struct ocssd_bdev *)bdev_io->bdev->ctxt;
	struct bdev_ocssd_io *ocdev_io = (struct bdev_ocssd_io *)bdev_io->driver_ctx;
	struct nvme_bdev_ns *nvme_ns;
	struct spdk_nvme_qpair *qpair;

	if (spdk_unlikely(!bdev_nvme_find_io_path(&ocssd_bdev->nvme_bdev, nvme_ch,
	if (spdk_unlikely(!bdev_nvme_find_io_path(&ocssd_bdev->nvme_bdev, io_path,
			  &nvme_ns, &qpair))) {
		return -1;
	}
@@ -1496,7 +1496,7 @@ bdev_ocssd_depopulate_namespace(struct nvme_bdev_ns *nvme_ns)
}

int
bdev_ocssd_create_io_channel(struct nvme_io_channel *nvme_ch)
bdev_ocssd_create_io_channel(struct nvme_io_path *io_path)
{
	struct ocssd_io_channel *ocssd_ch;

@@ -1505,7 +1505,7 @@ bdev_ocssd_create_io_channel(struct nvme_io_channel *nvme_ch)
		return -ENOMEM;
	}

	ocssd_ch->pending_poller = SPDK_POLLER_REGISTER(bdev_ocssd_poll_pending, nvme_ch, 0);
	ocssd_ch->pending_poller = SPDK_POLLER_REGISTER(bdev_ocssd_poll_pending, io_path, 0);
	if (ocssd_ch->pending_poller == NULL) {
		SPDK_ERRLOG("Failed to register pending requests poller\n");
		free(ocssd_ch);
@@ -1516,16 +1516,16 @@ bdev_ocssd_create_io_channel(struct nvme_io_channel *nvme_ch)
	spdk_poller_pause(ocssd_ch->pending_poller);

	TAILQ_INIT(&ocssd_ch->pending_requests);
	nvme_ch->ocssd_ch = ocssd_ch;
	io_path->ocssd_ch = ocssd_ch;

	return 0;
}

void
bdev_ocssd_destroy_io_channel(struct nvme_io_channel *nvme_ch)
bdev_ocssd_destroy_io_channel(struct nvme_io_path *io_path)
{
	spdk_poller_unregister(&nvme_ch->ocssd_ch->pending_poller);
	free(nvme_ch->ocssd_ch);
	spdk_poller_unregister(&io_path->ocssd_ch->pending_poller);
	free(io_path->ocssd_ch);
}

int
+2 −2
Original line number Diff line number Diff line
@@ -50,8 +50,8 @@ void bdev_ocssd_populate_namespace(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr,
void bdev_ocssd_depopulate_namespace(struct nvme_bdev_ns *nvme_ns);
void bdev_ocssd_namespace_config_json(struct spdk_json_write_ctx *w, struct nvme_bdev_ns *nvme_ns);

int bdev_ocssd_create_io_channel(struct nvme_io_channel *ioch);
void bdev_ocssd_destroy_io_channel(struct nvme_io_channel *ioch);
int bdev_ocssd_create_io_channel(struct nvme_io_path *ioch);
void bdev_ocssd_destroy_io_channel(struct nvme_io_path *ioch);

int bdev_ocssd_init_ctrlr(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr);
void bdev_ocssd_fini_ctrlr(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr);
+6 −6
Original line number Diff line number Diff line
@@ -154,7 +154,7 @@ struct nvme_async_probe_ctx {

struct ocssd_io_channel;

struct nvme_io_channel {
struct nvme_io_path {
	struct nvme_bdev_ctrlr		*ctrlr;
	struct spdk_nvme_qpair		*qpair;
	struct nvme_bdev_poll_group	*group;
@@ -178,24 +178,24 @@ void nvme_bdev_ctrlr_destruct(struct nvme_bdev_ctrlr *nvme_bdev_ctrlr);
void nvme_bdev_ctrlr_unregister(void *ctx);

static inline bool
bdev_nvme_find_io_path(struct nvme_bdev *nbdev, struct nvme_io_channel *nvme_ch,
bdev_nvme_find_io_path(struct nvme_bdev *nbdev, struct nvme_io_path *io_path,
		       struct nvme_bdev_ns **_nvme_ns, struct spdk_nvme_qpair **_qpair)
{
	if (spdk_unlikely(nvme_ch->qpair == NULL)) {
	if (spdk_unlikely(io_path->qpair == NULL)) {
		/* The device is currently resetting. */
		return false;
	}

	*_nvme_ns = nbdev->nvme_ns;
	*_qpair = nvme_ch->qpair;
	*_qpair = io_path->qpair;
	return true;
}

static inline bool
bdev_nvme_find_admin_path(struct nvme_io_channel *nvme_ch,
bdev_nvme_find_admin_path(struct nvme_io_path *io_path,
			  struct nvme_bdev_ctrlr **_nvme_bdev_ctrlr)
{
	*_nvme_bdev_ctrlr = nvme_ch->ctrlr;
	*_nvme_bdev_ctrlr = io_path->ctrlr;
	return true;
}

Loading