Commit 58f16244 authored by Ziye Yang's avatar Ziye Yang Committed by Jim Harris
Browse files

nvmf: add the transport shared buffer num configuration option.



Previously, we allocate the buffer size according
to the MaxQueueDepth info, however this is not exactly
a good way for customers to configure, we should provided
a shared buffer number configuration for the transport.

Change-Id: Ic6ff83076a65e77ec7376688ffb3737fd899057c
Signed-off-by: default avatarZiye Yang <optimistyzy@gmail.com>
Reviewed-on: https://review.gerrithub.io/437450


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
parent 3947bc24
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
@@ -42,6 +42,11 @@ prior to calling `spdk_nvmf_tgt_listen`.
Related to the previous change, the rpc `set_nvmf_target_options` has been renamed to
`set_nvmf_target_max_subsystems` to indicate that this is the only target option available for the user to edit.

Add an field `num_shared_buffers` in struct spdk_nvmf_transport_opts,
and also update the related rpc function nvmf_create_transport, to make this
configurable parameter available to users. The `num_shared_buffers` is used to
configure the shared buffer numbers of the transport used by RDMA or TCP transport.

### nvmf

Add a new TCP/IP transport (located in lib/nvmf/tcp.c). With this tranport,
+4 −0
Original line number Diff line number Diff line
@@ -90,6 +90,10 @@
  # Set the maximum number of IO for admin queue
  #MaxAQDepth 32

  # Set the number of pooled data buffers available to the transport
  # It is used to provide the read/write data buffers for the qpairs on this transport.
  #NumSharedBuffers 512

[Nvme]
  # NVMe Device Whitelist
  # Users may specify which NVMe devices to claim by their transport id.
+1 −0
Original line number Diff line number Diff line
@@ -70,6 +70,7 @@ struct spdk_nvmf_transport_opts {
	uint32_t max_io_size;
	uint32_t io_unit_size;
	uint32_t max_aq_depth;
	uint32_t num_shared_buffers;
};

/**
+4 −0
Original line number Diff line number Diff line
@@ -520,6 +520,10 @@ spdk_nvmf_parse_transport(struct spdk_nvmf_parse_transport_ctx *ctx)
	if (val >= 0) {
		opts.max_aq_depth = val;
	}
	val = spdk_conf_section_get_intval(ctx->sp, "NumSharedBuffers");
	if (val >= 0) {
		opts.num_shared_buffers = val;
	}

	transport = spdk_nvmf_transport_create(trtype, &opts);
	if (transport) {
+5 −0
Original line number Diff line number Diff line
@@ -1450,6 +1450,10 @@ static const struct spdk_json_object_decoder nvmf_rpc_create_transport_decoder[]
		"max_aq_depth", offsetof(struct nvmf_rpc_create_transport_ctx, opts.max_aq_depth),
		spdk_json_decode_uint32, true
	},
	{
		"num_shared_buffers", offsetof(struct nvmf_rpc_create_transport_ctx, opts.num_shared_buffers),
		spdk_json_decode_uint32, true
	},
};

static void
@@ -1581,6 +1585,7 @@ dump_nvmf_transport(struct spdk_json_write_ctx *w, struct spdk_nvmf_transport *t
	spdk_json_write_named_uint32(w, "max_io_size", opts->max_io_size);
	spdk_json_write_named_uint32(w, "io_unit_size", opts->io_unit_size);
	spdk_json_write_named_uint32(w, "max_aq_depth", opts->max_aq_depth);
	spdk_json_write_named_uint32(w, "num_shared_buffers", opts->num_shared_buffers);

	spdk_json_write_object_end(w);
}
Loading