Commit 635d0cbe authored by Jim Harris's avatar Jim Harris
Browse files

nvme: allocate extra request for fabrics connect



With async connect, we need to avoid the case
where the initiator is sending the icreq, and
meanwhile the application submits enough I/O
such that the request objects are exhausted, leaving
none for the FABRICS/CONNECT command that we need
to send after the icreq is done.

So allocate an extra request, and then use it
when sending the FABRICS/CONNECT command, rather
than trying to pull one from the qpair's STAILQ.

Fixes issue #2371.

Signed-off-by: default avatarJim Harris <james.r.harris@intel.com>
Change-Id: If42a3fbb3fd9d863ee48cf5cae75a9ba1754c349
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/11515


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Reviewed-by: default avatarChangpeng Liu <changpeng.liu@intel.com>
Reviewed-by: default avatarAleksey Marchuk <alexeymar@mellanox.com>
parent a97200ad
Loading
Loading
Loading
Loading
+9 −4
Original line number Diff line number Diff line
@@ -533,6 +533,7 @@ nvme_fabric_qpair_connect_async(struct spdk_nvme_qpair *qpair, uint32_t num_entr
	struct spdk_nvmf_fabric_connect_cmd cmd;
	struct spdk_nvmf_fabric_connect_data *nvmf_data;
	struct spdk_nvme_ctrlr *ctrlr;
	struct nvme_request *req;
	int rc;

	if (num_entries == 0 || num_entries > SPDK_NVME_IO_QUEUE_MAX_ENTRIES) {
@@ -567,6 +568,10 @@ nvme_fabric_qpair_connect_async(struct spdk_nvme_qpair *qpair, uint32_t num_entr
	cmd.sqsize = num_entries - 1;
	cmd.kato = ctrlr->opts.keep_alive_timeout_ms;

	assert(qpair->reserved_req != NULL);
	req = qpair->reserved_req;
	memcpy(&req->cmd, &cmd, sizeof(cmd));

	if (nvme_qpair_is_admin_queue(qpair)) {
		nvmf_data->cntlid = 0xFFFF;
	} else {
@@ -579,10 +584,10 @@ nvme_fabric_qpair_connect_async(struct spdk_nvme_qpair *qpair, uint32_t num_entr
	snprintf(nvmf_data->hostnqn, sizeof(nvmf_data->hostnqn), "%s", ctrlr->opts.hostnqn);
	snprintf(nvmf_data->subnqn, sizeof(nvmf_data->subnqn), "%s", ctrlr->trid.subnqn);

	rc = spdk_nvme_ctrlr_cmd_io_raw(ctrlr, qpair,
					(struct spdk_nvme_cmd *)&cmd,
					nvmf_data, sizeof(*nvmf_data),
					nvme_completion_poll_cb, status);
	NVME_INIT_REQUEST(req, nvme_completion_poll_cb, status, NVME_PAYLOAD_CONTIG(nvmf_data, NULL),
			  sizeof(*nvmf_data), 0);

	rc = nvme_qpair_submit_request(qpair, req);
	if (rc < 0) {
		SPDK_ERRLOG("Failed to allocate/submit FABRIC_CONNECT command, rc %d\n", rc);
		spdk_free(status->dma_data);
+9 −1
Original line number Diff line number Diff line
@@ -453,6 +453,9 @@ struct spdk_nvme_qpair {

	enum spdk_nvme_transport_type		trtype;

	/* request object used only for this qpair's FABRICS/CONNECT command (if needed) */
	struct nvme_request			*reserved_req;

	STAILQ_HEAD(, nvme_request)		free_req;
	STAILQ_HEAD(, nvme_request)		queued_req;

@@ -1356,8 +1359,13 @@ nvme_free_request(struct nvme_request *req)
	assert(req->num_children == 0);
	assert(req->qpair != NULL);

	/* The reserved_req does not go in the free_req STAILQ - it is
	 * saved only for use with a FABRICS/CONNECT command.
	 */
	if (spdk_likely(req->qpair->reserved_req != req)) {
		STAILQ_INSERT_HEAD(&req->qpair->free_req, req, stailq);
	}
}

static inline void
nvme_qpair_set_state(struct spdk_nvme_qpair *qpair, enum nvme_qpair_state state)
+8 −1
Original line number Diff line number Diff line
@@ -819,6 +819,9 @@ nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,

	req_size_padded = (sizeof(struct nvme_request) + 63) & ~(size_t)63;

	/* Add one for the reserved_req */
	num_requests++;

	qpair->req_buf = spdk_zmalloc(req_size_padded * num_requests, 64, NULL,
				      SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_SHARE);
	if (qpair->req_buf == NULL) {
@@ -831,8 +834,12 @@ nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
		struct nvme_request *req = qpair->req_buf + i * req_size_padded;

		req->qpair = qpair;
		if (i == 0) {
			qpair->reserved_req = req;
		} else {
			STAILQ_INSERT_HEAD(&qpair->free_req, req, stailq);
		}
	}

	return 0;
}
+14 −20
Original line number Diff line number Diff line
@@ -83,23 +83,12 @@ static struct spdk_nvmf_fabric_connect_data g_nvmf_data;
static struct nvme_request *g_request;

int
spdk_nvme_ctrlr_cmd_io_raw(struct spdk_nvme_ctrlr *ctrlr,
			   struct spdk_nvme_qpair *qpair,
			   struct spdk_nvme_cmd *cmd,
			   void *buf, uint32_t len,
			   spdk_nvme_cmd_cb cb_fn, void *cb_arg)
nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
{
	struct nvme_request	*req;

	req = nvme_allocate_request_contig(qpair, buf, len, cb_fn, cb_arg);
	CU_ASSERT(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_CONTIG);

	if (req == NULL) {
		return -ENOMEM;
	}

	memcpy(&req->cmd, cmd, sizeof(req->cmd));
	memcpy(&g_nvmf_data, buf, sizeof(g_nvmf_data));
	g_request = req;
	memcpy(&g_nvmf_data, req->payload.contig_or_cb_arg, sizeof(g_nvmf_data));

	return 0;
}
@@ -368,6 +357,7 @@ static void
test_nvme_fabric_qpair_connect(void)
{
	struct spdk_nvme_qpair qpair = {};
	struct nvme_request	reserved_req = {};
	struct nvme_request	req = {};
	struct spdk_nvme_ctrlr ctrlr = {};
	struct spdk_nvmf_fabric_connect_cmd *cmd = NULL;
@@ -379,11 +369,13 @@ test_nvme_fabric_qpair_connect(void)
		0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
	};

	cmd = (void *)&req.cmd;
	cmd = (void *)&reserved_req.cmd;
	qpair.ctrlr = &ctrlr;
	req.qpair = &qpair;
	reserved_req.qpair = &qpair;
	STAILQ_INIT(&qpair.free_req);
	STAILQ_INSERT_HEAD(&qpair.free_req, &req, stailq);
	qpair.reserved_req = &reserved_req;
	memset(&g_nvmf_data, 0, sizeof(g_nvmf_data));

	qpair.id = 1;
@@ -404,12 +396,13 @@ test_nvme_fabric_qpair_connect(void)
	CU_ASSERT(!strncmp(g_nvmf_data.hostid, ctrlr.opts.extended_host_id, sizeof(g_nvmf_data.hostid)));
	CU_ASSERT(!strncmp(g_nvmf_data.hostnqn, ctrlr.opts.hostnqn, sizeof(ctrlr.opts.hostnqn)));
	CU_ASSERT(!strncmp(g_nvmf_data.subnqn, ctrlr.trid.subnqn, sizeof(ctrlr.trid.subnqn)));
	CU_ASSERT(STAILQ_EMPTY(&qpair.free_req));
	/* Make sure we used the qpair's reserved_req, and not one from the STAILQ */
	CU_ASSERT(g_request == qpair.reserved_req);
	CU_ASSERT(!STAILQ_EMPTY(&qpair.free_req));

	/* qid is adminq */
	memset(&g_nvmf_data, 0, sizeof(g_nvmf_data));
	memset(&req, 0, sizeof(req));
	STAILQ_INSERT_HEAD(&qpair.free_req, &req, stailq);
	memset(&reserved_req, 0, sizeof(reserved_req));
	qpair.id = 0;
	ctrlr.cntlid = 0;

@@ -425,10 +418,11 @@ test_nvme_fabric_qpair_connect(void)
	CU_ASSERT(!strncmp(g_nvmf_data.hostid, ctrlr.opts.extended_host_id, sizeof(g_nvmf_data.hostid)));
	CU_ASSERT(!strncmp(g_nvmf_data.hostnqn, ctrlr.opts.hostnqn, sizeof(ctrlr.opts.hostnqn)));
	CU_ASSERT(!strncmp(g_nvmf_data.subnqn, ctrlr.trid.subnqn, sizeof(ctrlr.trid.subnqn)));
	CU_ASSERT(STAILQ_EMPTY(&qpair.free_req));
	/* Make sure we used the qpair's reserved_req, and not one from the STAILQ */
	CU_ASSERT(g_request == qpair.reserved_req);
	CU_ASSERT(!STAILQ_EMPTY(&qpair.free_req));

	/* Wait_for completion timeout */
	STAILQ_INSERT_HEAD(&qpair.free_req, &req, stailq);
	g_nvme_wait_for_completion_timeout = true;

	rc = nvme_fabric_qpair_connect(&qpair, 1);