Commit babe0ba9 authored by Konrad Sztyber's avatar Konrad Sztyber Committed by Tomasz Zawadzki
Browse files

nvmf: deprecate spdk_nvmf_request_exec_fabrics()



This function served two purposes:

 1) It allowed non-fabrics transports to execute fabrics commands, as
    spdk_nvmf_request_exec() did not allow it.
 2) It didn't do the checks performed by nvmf_check_subsystem_active(),
    which allowed transports to execute commands before qpair's state
    was set to ACTIVE.

1) is no longer true, as spdk_nvmf_request_exec() was recently changed
to allow non-fabrics transports to execute fabrics commands and 2) can
be worked around in other ways, e.g. by a spdk_thread_send_msg(), which
is what the vfio-user transport will now do to send the CONNECT command.

Signed-off-by: default avatarKonrad Sztyber <konrad.sztyber@intel.com>
Change-Id: I712a006296e661a1c3de5dfd003cde9503209bf5
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/22130


Reviewed-by: default avatarJacek Kalwas <jacek.kalwas@intel.com>
Reviewed-by: default avatarTomasz Zawadzki <tomasz.zawadzki@intel.com>
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarJim Harris <jim.harris@samsung.com>
Community-CI: Mellanox Build Bot
parent 289eef51
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -55,6 +55,12 @@ The function is deprecated and will be removed in 24.09 release. Please use
Parameters `cb_fn` and `ctx` of `spdk_nvmf_qpair_disconnect` API are deprecated. These parameters
will be removed in 24.05 release.

#### `spdk_nvmf_request_exec_fabrics`

This function is deprecated and will be removed in the 24.09 release.  Instead, users should use
`spdk_nvmf_request_exec()`, which now allows all transports (both fabrics and non-fabrics) to
execute fabrics commands.

#### `nvmf_get_subsystems`

`transport` field in `listen_addresses` of `nvmf_get_subsystems` RPC is deprecated.
+4 −21
Original line number Diff line number Diff line
@@ -4513,31 +4513,14 @@ spdk_nvmf_request_complete(struct spdk_nvmf_request *req)
	return 0;
}

SPDK_LOG_DEPRECATION_REGISTER(nvmf_request_exec_fabrics, "spdk_nvmf_request_exec_fabrics()",
			      "v24.09", 1);
void
spdk_nvmf_request_exec_fabrics(struct spdk_nvmf_request *req)
{
	struct spdk_nvmf_qpair *qpair = req->qpair;
	struct spdk_nvmf_subsystem_poll_group *sgroup = NULL;
	enum spdk_nvmf_request_exec_status status;

	if (qpair->ctrlr) {
		sgroup = &qpair->group->sgroups[qpair->ctrlr->subsys->id];
	} else if (spdk_unlikely(nvmf_request_is_fabric_connect(req))) {
		sgroup = nvmf_subsystem_pg_from_connect_cmd(req);
	}
	SPDK_LOG_DEPRECATED(nvmf_request_exec_fabrics);

	assert(sgroup != NULL);
	sgroup->mgmt_io_outstanding++;

	/* Place the request on the outstanding list so we can keep track of it */
	TAILQ_INSERT_TAIL(&qpair->outstanding, req, link);

	assert(req->cmd->nvmf_cmd.opcode == SPDK_NVME_OPC_FABRIC);
	status = nvmf_ctrlr_process_fabrics_cmd(req);

	if (status == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE) {
		_nvmf_request_complete(req);
	}
	return spdk_nvmf_request_exec(req);
}

static bool
+0 −2
Original line number Diff line number Diff line
@@ -1898,8 +1898,6 @@ nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group,
		TAILQ_REMOVE(&sgroup->queued, req, link);
		if (spdk_nvmf_request_using_zcopy(req)) {
			spdk_nvmf_request_zcopy_start(req);
		} else if (nvmf_request_is_fabric_connect(req)) {
			spdk_nvmf_request_exec_fabrics(req);
		} else {
			spdk_nvmf_request_exec(req);
		}
+16 −3
Original line number Diff line number Diff line
@@ -2107,7 +2107,7 @@ handle_create_io_sq(struct nvmf_vfio_user_ctrlr *ctrlr,
	/*
	 * Create our new I/O qpair. This asynchronously invokes, on a suitable
	 * poll group, the nvmf_vfio_user_poll_group_add() callback, which will
	 * call spdk_nvmf_request_exec_fabrics() with a generated fabrics
	 * call spdk_nvmf_request_exec() with a generated fabrics
	 * connect command. This command is then eventually completed via
	 * handle_queue_connect_rsp().
	 */
@@ -3017,7 +3017,7 @@ vfio_user_property_access(struct nvmf_vfio_user_ctrlr *vu_ctrlr,
	req->req.length = count;
	SPDK_IOV_ONE(req->req.iov, &req->req.iovcnt, buf, req->req.length);

	spdk_nvmf_request_exec_fabrics(&req->req);
	spdk_nvmf_request_exec(&req->req);

	return count;
}
@@ -5188,6 +5188,12 @@ handle_queue_connect_rsp(struct nvmf_vfio_user_req *req, void *cb_arg)
	return 0;
}

static void
_nvmf_vfio_user_poll_group_add(void *req)
{
	spdk_nvmf_request_exec(req);
}

/*
 * Add the given qpair to the given poll group. New qpairs are added via
 * spdk_nvmf_tgt_new_qpair(), which picks a poll group via
@@ -5249,7 +5255,14 @@ nvmf_vfio_user_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
		      "%s: sending connect fabrics command for qid:%#x cntlid=%#x\n",
		      ctrlr_id(ctrlr), qpair->qid, data->cntlid);

	spdk_nvmf_request_exec_fabrics(req);
	/*
	 * By the time transport's poll_group_add() callback is executed, the
	 * qpair isn't in the ACTIVE state yet, so spdk_nvmf_request_exec()
	 * would fail.  The state changes to ACTIVE immediately after the
	 * callback finishes, so delay spdk_nvmf_request_exec() by sending a
	 * message.
	 */
	spdk_thread_send_msg(spdk_get_thread(), _nvmf_vfio_user_poll_group_add, req);
	return 0;
}

+0 −1
Original line number Diff line number Diff line
@@ -95,7 +95,6 @@ DEFINE_STUB(nvmf_transport_qpair_get_listen_trid, int,
	    (struct spdk_nvmf_qpair *qpair,
	     struct spdk_nvme_transport_id *trid), 0);
DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req));
DEFINE_STUB_V(spdk_nvmf_request_exec_fabrics, (struct spdk_nvmf_request *req));
DEFINE_STUB_V(spdk_nvmf_request_zcopy_start, (struct spdk_nvmf_request *req));
DEFINE_STUB(spdk_nvmf_get_transport_name, const char *,
	    (struct spdk_nvmf_transport *transport), NULL);
Loading