Commit 1551197d authored by Alexey Marchuk's avatar Alexey Marchuk Committed by Tomasz Zawadzki
Browse files

rpc: Deprecate max_qpairs_per_ctrlr parameter



This parameter describes the number of admin and IO
qpairs while admin qpair always exists and should not
be configured explicitly.
Introduce a new parameter `max_io_qpairs_per_ctrlr`
which configures the number of IO qpairs.
Internal structure of NVMF transport is not changed,
both RPC parameters configure the same nvmf transport parameter.

Deprecate max_qpairs_per_ctrlr in spdkcli as well

Side change: update dif_insert_or_strip description -
it can be used by TCP and RDMA transports

Config files parsing is not changed since it is deprecated

Fixes #1378

Change-Id: I8403ee6fcf090bb5e86a32e4868fea5924daed23
Signed-off-by: default avatarAlexey Marchuk <alexeymar@mellanox.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/2279


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: default avatarAnil Veerabhadrappa <anil.veerabhadrappa@broadcom.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
Community-CI: Broadcom CI
parent 074cdb4d
Loading
Loading
Loading
Loading
+8 −0
Original line number Diff line number Diff line
@@ -26,6 +26,14 @@ Two providers are available - verbs (used by default when RDMA is enabled or ena
using --with-rdma=verbs) and mlx5 Direct Verbs aka DV (enabled by --with-rdma=mlx5_dv).
Using mlx5_dv requires libmlx5 installed on the system.

### rpc

Parameter `-p` or `--max-qpairs-per-ctrlr` of `nvmf_create_transport` RPC command accepted by the
rpc.py script is deprecated, new parameter `-m` or `--max-io-qpairs-per-ctrlr` is added.

Parameter `max_qpairs_per_ctrlr` of `nvmf_create_transport` RPC command accepted by the NVMF target
is deprecated, new parameter `max_io_qpairs_per_ctrlr` is added.

### sock

Added `spdk_sock_impl_get_opts` and `spdk_sock_impl_set_opts` functions to set/get socket layer configuration
+4 −3
Original line number Diff line number Diff line
@@ -4021,7 +4021,8 @@ Name | Optional | Type | Description
trtype                      | Required | string  | Transport type (ex. RDMA)
tgt_name                    | Optional | string  | Parent NVMe-oF target name.
max_queue_depth             | Optional | number  | Max number of outstanding I/O per queue
max_qpairs_per_ctrlr        | Optional | number  | Max number of SQ and CQ per controller
max_qpairs_per_ctrlr        | Optional | number  | Max number of SQ and CQ per controller (deprecated, use max_io_qpairs_per_ctrlr)
max_io_qpairs_per_ctrlr     | Optional | number  | Max number of IO qpairs per controller
in_capsule_data_size        | Optional | number  | Max number of in-capsule data size
max_io_size                 | Optional | number  | Max I/O size (bytes)
io_unit_size                | Optional | number  | I/O unit size (bytes)
@@ -4031,7 +4032,7 @@ buf_cache_size | Optional | number | The number of shared buffers
max_srq_depth               | Optional | number  | The number of elements in a per-thread shared receive queue (RDMA only)
no_srq                      | Optional | boolean | Disable shared receive queue even for devices that support it. (RDMA only)
c2h_success                 | Optional | boolean | Disable C2H success optimization (TCP only)
dif_insert_or_strip         | Optional | boolean | Enable DIF insert for write I/O and DIF strip for read I/O DIF (TCP only)
dif_insert_or_strip         | Optional | boolean | Enable DIF insert for write I/O and DIF strip for read I/O DIF
sock_priority               | Optional | number  | The socket priority of the connection owned by this transport (TCP only)

### Example
@@ -4565,7 +4566,7 @@ Example response:
    {
      "type": "RDMA".
      "max_queue_depth": 128,
      "max_qpairs_per_ctrlr": 64,
      "max_io_qpairs_per_ctrlr": 64,
      "in_capsule_data_size": 4096,
      "max_io_size": 131072,
      "io_unit_size": 131072
+2 −2
Original line number Diff line number Diff line
@@ -1821,11 +1821,11 @@ nvmf_fc_create(struct spdk_nvmf_transport_opts *opts)

	SPDK_INFOLOG(SPDK_LOG_NVMF_FC, "*** FC Transport Init ***\n"
		     "  Transport opts:  max_ioq_depth=%d, max_io_size=%d,\n"
		     "  max_qpairs_per_ctrlr=%d, io_unit_size=%d,\n"
		     "  max_io_qpairs_per_ctrlr=%d, io_unit_size=%d,\n"
		     "  max_aq_depth=%d\n",
		     opts->max_queue_depth,
		     opts->max_io_size,
		     opts->max_qpairs_per_ctrlr,
		     opts->max_qpairs_per_ctrlr - 1,
		     opts->io_unit_size,
		     opts->max_aq_depth);

+2 −1
Original line number Diff line number Diff line
@@ -525,7 +525,8 @@ spdk_nvmf_tgt_write_config_json(struct spdk_json_write_ctx *w, struct spdk_nvmf_
		spdk_json_write_named_object_begin(w, "params");
		spdk_json_write_named_string(w, "trtype", spdk_nvme_transport_id_trtype_str(transport->ops->type));
		spdk_json_write_named_uint32(w, "max_queue_depth", transport->opts.max_queue_depth);
		spdk_json_write_named_uint32(w, "max_qpairs_per_ctrlr", transport->opts.max_qpairs_per_ctrlr);
		spdk_json_write_named_uint32(w, "max_io_qpairs_per_ctrlr",
					     transport->opts.max_qpairs_per_ctrlr - 1);
		spdk_json_write_named_uint32(w, "in_capsule_data_size", transport->opts.in_capsule_data_size);
		spdk_json_write_named_uint32(w, "max_io_size", transport->opts.max_io_size);
		spdk_json_write_named_uint32(w, "io_unit_size", transport->opts.io_unit_size);
+46 −2
Original line number Diff line number Diff line
@@ -1559,6 +1559,46 @@ struct nvmf_rpc_create_transport_ctx {
	struct spdk_jsonrpc_request	*request;
};

/**
 * `max_qpairs_per_ctrlr` represents both admin and IO qpairs, that confuses
 * users when they configure a transport using RPC. So it was decided to
 * deprecate `max_qpairs_per_ctrlr` RPC parameter and use `max_io_qpairs_per_ctrlr`
 * But internal logic remains unchanged and SPDK expects that
 * spdk_nvmf_transport_opts::max_qpairs_per_ctrlr includes an admin qpair.
 * This function parses the number of IO qpairs and adds +1 for admin qpair.
 */
static int
nvmf_rpc_decode_max_io_qpairs(const struct spdk_json_val *val, void *out)
{
	uint16_t *i = out;
	int rc;

	rc = spdk_json_number_to_uint16(val, i);
	if (rc == 0) {
		(*i)++;
	}

	return rc;
}

/**
 * This function parses deprecated `max_qpairs_per_ctrlr` and warns the user to use
 * the new parameter `max_io_qpairs_per_ctrlr`
 */
static int
nvmf_rpc_decode_max_qpairs(const struct spdk_json_val *val, void *out)
{
	uint16_t *i = out;
	int rc;

	rc = spdk_json_number_to_uint16(val, i);
	if (rc == 0) {
		SPDK_WARNLOG("Parameter max_qpairs_per_ctrlr is deprecated, use max_io_qpairs_per_ctrlr instead.\n");
	}

	return rc;
}

static const struct spdk_json_object_decoder nvmf_rpc_create_transport_decoder[] = {
	{	"trtype", offsetof(struct nvmf_rpc_create_transport_ctx, trtype), spdk_json_decode_string},
	{
@@ -1567,7 +1607,11 @@ static const struct spdk_json_object_decoder nvmf_rpc_create_transport_decoder[]
	},
	{
		"max_qpairs_per_ctrlr", offsetof(struct nvmf_rpc_create_transport_ctx, opts.max_qpairs_per_ctrlr),
		spdk_json_decode_uint16, true
		nvmf_rpc_decode_max_qpairs, true
	},
	{
		"max_io_qpairs_per_ctrlr", offsetof(struct nvmf_rpc_create_transport_ctx, opts.max_qpairs_per_ctrlr),
		nvmf_rpc_decode_max_io_qpairs, true
	},
	{
		"in_capsule_data_size", offsetof(struct nvmf_rpc_create_transport_ctx, opts.in_capsule_data_size),
@@ -1749,7 +1793,7 @@ dump_nvmf_transport(struct spdk_json_write_ctx *w, struct spdk_nvmf_transport *t

	spdk_json_write_named_string(w, "trtype", spdk_nvmf_get_transport_name(transport));
	spdk_json_write_named_uint32(w, "max_queue_depth", opts->max_queue_depth);
	spdk_json_write_named_uint32(w, "max_qpairs_per_ctrlr", opts->max_qpairs_per_ctrlr);
	spdk_json_write_named_uint32(w, "max_io_qpairs_per_ctrlr", opts->max_qpairs_per_ctrlr - 1);
	spdk_json_write_named_uint32(w, "in_capsule_data_size", opts->in_capsule_data_size);
	spdk_json_write_named_uint32(w, "max_io_size", opts->max_io_size);
	spdk_json_write_named_uint32(w, "io_unit_size", opts->io_unit_size);
Loading