Commit 5742e9b9 authored by Daniel Verkamp's avatar Daniel Verkamp
Browse files

nvme: allocate requests on a per-queue basis



Change-Id: I7bec816e518a0a6f2e9fb719128c83d4b908d46c
Signed-off-by: default avatarDaniel Verkamp <daniel.verkamp@intel.com>
parent cd13f280
Loading
Loading
Loading
Loading
+11 −0
Original line number Diff line number Diff line
@@ -107,6 +107,17 @@ struct spdk_nvme_ctrlr_opts {
	 * Unused for local PCIe-attached NVMe devices.
	 */
	char hostnqn[SPDK_NVMF_NQN_MAX_LEN + 1];

	/**
	 * The number of requests to allocate for each NVMe I/O queue.
	 *
	 * This should be at least as large as io_queue_size.
	 *
	 * A single I/O may allocate more than one request, since splitting may be necessary to
	 * conform to the device's maximum transfer size, PRP list compatibility requirements,
	 * or driver-assisted striping.
	 */
	uint32_t io_queue_requests;
};

/**
+6 −16
Original line number Diff line number Diff line
@@ -75,13 +75,15 @@ nvme_allocate_request(struct spdk_nvme_qpair *qpair,
		      const struct nvme_payload *payload, uint32_t payload_size,
		      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
	struct nvme_request *req = NULL;
	struct nvme_request *req;

	req = spdk_mempool_get(g_spdk_nvme_driver->request_mempool);
	req = STAILQ_FIRST(&qpair->free_req);
	if (req == NULL) {
		return req;
	}

	STAILQ_REMOVE_HEAD(&qpair->free_req, stailq);

	/*
	 * Only memset up to (but not including) the children
	 *  TAILQ_ENTRY.  children, and following members, are
@@ -190,8 +192,9 @@ nvme_free_request(struct nvme_request *req)
{
	assert(req != NULL);
	assert(req->num_children == 0);
	assert(req->qpair != NULL);

	spdk_mempool_put(g_spdk_nvme_driver->request_mempool, req);
	STAILQ_INSERT_HEAD(&req->qpair->free_req, req, stailq);
}

int
@@ -285,19 +288,6 @@ nvme_driver_init(void)
	TAILQ_INIT(&g_spdk_nvme_driver->init_ctrlrs);
	TAILQ_INIT(&g_spdk_nvme_driver->attached_ctrlrs);

	g_spdk_nvme_driver->request_mempool = spdk_mempool_create("nvme_request", 8192,
					      sizeof(struct nvme_request), 128, SPDK_ENV_SOCKET_ID_ANY);
	if (g_spdk_nvme_driver->request_mempool == NULL) {
		SPDK_ERRLOG("unable to allocate pool of requests\n");

		nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
		pthread_mutex_destroy(&g_spdk_nvme_driver->lock);

		spdk_memzone_free(SPDK_NVME_DRIVER_NAME);

		return -1;
	}

	nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);

	return ret;
+5 −0
Original line number Diff line number Diff line
@@ -82,6 +82,7 @@ spdk_nvme_ctrlr_opts_set_defaults(struct spdk_nvme_ctrlr_opts *opts)
	opts->keep_alive_timeout_ms = 10 * 1000;
	opts->io_queue_size = DEFAULT_IO_QUEUE_SIZE;
	strncpy(opts->hostnqn, DEFAULT_HOSTNQN, sizeof(opts->hostnqn));
	opts->io_queue_requests = DEFAULT_IO_QUEUE_REQUESTS;
}

/**
@@ -223,6 +224,8 @@ spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
	TAILQ_REMOVE(&ctrlr->active_io_qpairs, qpair, tailq);
	spdk_bit_array_set(ctrlr->free_io_qids, qpair->id);

	spdk_free(qpair->req_buf);

	if (nvme_transport_ctrlr_delete_io_qpair(ctrlr, qpair)) {
		nvme_robust_mutex_unlock(&ctrlr->ctrlr_lock);
		return -1;
@@ -1350,6 +1353,8 @@ nvme_ctrlr_init_cap(struct spdk_nvme_ctrlr *ctrlr, const union spdk_nvme_cap_reg
	ctrlr->opts.io_queue_size = spdk_max(ctrlr->opts.io_queue_size, SPDK_NVME_IO_QUEUE_MIN_ENTRIES);
	ctrlr->opts.io_queue_size = spdk_min(ctrlr->opts.io_queue_size, ctrlr->cap.bits.mqes + 1u);
	ctrlr->opts.io_queue_size = spdk_min(ctrlr->opts.io_queue_size, max_io_queue_size);

	ctrlr->opts.io_queue_requests = spdk_max(ctrlr->opts.io_queue_requests, ctrlr->opts.io_queue_size);
}

void
+8 −2
Original line number Diff line number Diff line
@@ -112,6 +112,9 @@
#define DEFAULT_MAX_IO_QUEUES		(1024)
#define DEFAULT_IO_QUEUE_SIZE		(256)

#define DEFAULT_ADMIN_QUEUE_REQUESTS	(32)
#define DEFAULT_IO_QUEUE_REQUESTS	(512)

#define DEFAULT_HOSTNQN			"nqn.2016-06.io.spdk:host"

enum nvme_payload_type {
@@ -254,6 +257,7 @@ struct nvme_async_event_request {
};

struct spdk_nvme_qpair {
	STAILQ_HEAD(, nvme_request)	free_req;
	STAILQ_HEAD(, nvme_request)	queued_req;

	enum spdk_nvme_transport_type	trtype;
@@ -277,6 +281,8 @@ struct spdk_nvme_qpair {

	/* List entry for spdk_nvme_ctrlr_process::allocated_io_qpairs */
	TAILQ_ENTRY(spdk_nvme_qpair)	per_process_tailq;

	void				*req_buf;
};

struct spdk_nvme_ns {
@@ -458,7 +464,6 @@ struct nvme_driver {
	pthread_mutex_t			lock;
	TAILQ_HEAD(, spdk_nvme_ctrlr)	init_ctrlrs;
	TAILQ_HEAD(, spdk_nvme_ctrlr)	attached_ctrlrs;
	struct spdk_mempool		*request_mempool;
	bool				initialized;
};

@@ -547,7 +552,8 @@ int nvme_ctrlr_get_cap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cap_regist
void	nvme_ctrlr_init_cap(struct spdk_nvme_ctrlr *ctrlr, const union spdk_nvme_cap_register *cap);
int	nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
			struct spdk_nvme_ctrlr *ctrlr,
			enum spdk_nvme_qprio qprio);
			enum spdk_nvme_qprio qprio,
			uint32_t num_requests);
void	nvme_qpair_enable(struct spdk_nvme_qpair *qpair);
void	nvme_qpair_disable(struct spdk_nvme_qpair *qpair);
int	nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair,
+3 −2
Original line number Diff line number Diff line
@@ -573,7 +573,8 @@ nvme_pcie_ctrlr_construct_admin_qpair(struct spdk_nvme_ctrlr *ctrlr)
	rc = nvme_qpair_init(ctrlr->adminq,
			     0, /* qpair ID */
			     ctrlr,
			     SPDK_NVME_QPRIO_URGENT);
			     SPDK_NVME_QPRIO_URGENT,
			     NVME_ADMIN_ENTRIES);
	if (rc != 0) {
		return rc;
	}
@@ -1418,7 +1419,7 @@ nvme_pcie_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,

	qpair = &pqpair->qpair;

	rc = nvme_qpair_init(qpair, qid, ctrlr, qprio);
	rc = nvme_qpair_init(qpair, qid, ctrlr, qprio, ctrlr->opts.io_queue_requests);
	if (rc != 0) {
		nvme_pcie_qpair_destroy(qpair);
		return NULL;
Loading