Commit 4e06de69 authored by Jacek Kalwas's avatar Jacek Kalwas Committed by Tomasz Zawadzki
Browse files

nvme: expose functions to manage queue identifiers



In cases where the SPDK nvme driver is being used as a validation/test
vehicle, users may need to allocate a currently unused qid that can be
used for creating queues using the raw interfaces. One example would be
testing N:1 SQ:CQ mappings which are supported by PCIe controllers but
not through the standard SPDK nvme driver APIs.

These new functions fulfill this purpose, and ensure that the allocated
qid will not be used by the SPDK driver for any future queues allocated
through the spdk_nvme_ctrlr_alloc_io_qpair API.

Signed-off-by: default avatarJacek Kalwas <jacek.kalwas@intel.com>
Change-Id: I21c33596ec415c2816728a600972b242da9d971b
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/3896


Community-CI: Broadcom CI
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
parent 848e9e2d
Loading
Loading
Loading
Loading
+21 −0
Original line number Diff line number Diff line
@@ -1993,6 +1993,27 @@ void spdk_nvme_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr);
const struct spdk_nvme_transport_id *spdk_nvme_ctrlr_get_transport_id(
	struct spdk_nvme_ctrlr *ctrlr);

/**
 * \brief Alloc NVMe I/O queue identifier.
 *
 * This function is only needed for the non-standard case of allocating queues using the raw
 * command interface. In most cases \ref spdk_nvme_ctrlr_alloc_io_qpair should be sufficient.
 *
 * \param ctrlr Opaque handle to NVMe controller.
 * \return qid on success, -1 on failure.
 */
int32_t spdk_nvme_ctrlr_alloc_qid(struct spdk_nvme_ctrlr *ctrlr);

/**
 * \brief Free NVMe I/O queue identifier.
 *
 * This function must only be called with qids previously allocated with \ref spdk_nvme_ctrlr_alloc_qid.
 *
 * \param ctrlr Opaque handle to NVMe controller.
 * \param qid NVMe Queue Identifier.
 */
void spdk_nvme_ctrlr_free_qid(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid);

/**
 * Opaque handle for a poll group. A poll group is a collection of spdk_nvme_qpair
 * objects that are polled for completions as a unit.
+36 −12
Original line number Diff line number Diff line
@@ -323,7 +323,7 @@ static struct spdk_nvme_qpair *
nvme_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
			   const struct spdk_nvme_io_qpair_opts *opts)
{
	uint32_t				qid;
	int32_t					qid;
	struct spdk_nvme_qpair			*qpair;
	union spdk_nvme_cc_register		cc;

@@ -353,12 +353,8 @@ nvme_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
		return NULL;
	}

	/*
	 * Get the first available I/O queue ID.
	 */
	qid = spdk_bit_array_find_first_set(ctrlr->free_io_qids, 1);
	if (qid > ctrlr->opts.num_io_queues) {
		SPDK_ERRLOG("No free I/O queue IDs\n");
	qid = spdk_nvme_ctrlr_alloc_qid(ctrlr);
	if (qid < 0) {
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		return NULL;
	}
@@ -366,11 +362,11 @@ nvme_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr,
	qpair = nvme_transport_ctrlr_create_io_qpair(ctrlr, qid, opts);
	if (qpair == NULL) {
		SPDK_ERRLOG("nvme_transport_ctrlr_create_io_qpair() failed\n");
		spdk_nvme_ctrlr_free_qid(ctrlr, qid);
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		return NULL;
	}

	spdk_bit_array_clear(ctrlr->free_io_qids, qid);
	TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq);

	nvme_ctrlr_proc_add_io_qpair(qpair);
@@ -584,7 +580,7 @@ spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
	nvme_ctrlr_proc_remove_io_qpair(qpair);

	TAILQ_REMOVE(&ctrlr->active_io_qpairs, qpair, tailq);
	spdk_bit_array_set(ctrlr->free_io_qids, qpair->id);
	spdk_nvme_ctrlr_free_qid(ctrlr, qpair->id);

	if (nvme_transport_ctrlr_delete_io_qpair(ctrlr, qpair)) {
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
@@ -1811,11 +1807,11 @@ nvme_ctrlr_set_num_queues_done(void *arg, const struct spdk_nvme_cpl *cpl)
		return;
	}

	/* Initialize list of free I/O queue IDs. QID 0 is the admin queue. */
	spdk_bit_array_clear(ctrlr->free_io_qids, 0);
	/* Initialize list of free I/O queue IDs. QID 0 is the admin queue (implicitly allocated). */
	for (i = 1; i <= ctrlr->opts.num_io_queues; i++) {
		spdk_bit_array_set(ctrlr->free_io_qids, i);
		spdk_nvme_ctrlr_free_qid(ctrlr, i);
	}

	nvme_ctrlr_set_state(ctrlr, NVME_CTRLR_STATE_CONSTRUCT_NS,
			     ctrlr->opts.admin_timeout_ms);
}
@@ -3582,6 +3578,34 @@ spdk_nvme_ctrlr_get_transport_id(struct spdk_nvme_ctrlr *ctrlr)
	return &ctrlr->trid;
}

int32_t
spdk_nvme_ctrlr_alloc_qid(struct spdk_nvme_ctrlr *ctrlr)
{
	uint32_t qid;

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	qid = spdk_bit_array_find_first_set(ctrlr->free_io_qids, 1);
	if (qid > ctrlr->opts.num_io_queues) {
		SPDK_ERRLOG("No free I/O queue IDs\n");
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		return -1;
	}

	spdk_bit_array_clear(ctrlr->free_io_qids, qid);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
	return qid;
}

void
spdk_nvme_ctrlr_free_qid(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid)
{
	assert(qid <= ctrlr->opts.num_io_queues);

	nvme_robust_mutex_lock(&ctrlr->ctrlr_lock);
	spdk_bit_array_set(ctrlr->free_io_qids, qid);
	nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
}

/* FIXME need to specify max number of iovs */
int
spdk_nvme_map_prps(void *prv, struct spdk_nvme_cmd *cmd, struct iovec *iovs,
+2 −0
Original line number Diff line number Diff line
@@ -83,6 +83,8 @@
	spdk_nvme_ctrlr_map_cmb;
	spdk_nvme_ctrlr_unmap_cmb;
	spdk_nvme_ctrlr_get_transport_id;
	spdk_nvme_ctrlr_alloc_qid;
	spdk_nvme_ctrlr_free_qid;

	spdk_nvme_poll_group_create;
	spdk_nvme_poll_group_add;