Commit cf73fb2f authored by Seth Howell's avatar Seth Howell Committed by Jim Harris
Browse files

nvmf/rdma: add a pool of request_data structs



This change is related to enabling multi-sgl element support in
the NVMe-oF target.

For single SGL use cases, there is a 1:1  relationship between
rdma_requests and ibv_wrs used to transfer the data associated with
the request. In the ingle SGL case that ibv_wr is embedded inside of
the spdk_nvmf_rdma_request structure as part of an rdma_request_data
structure.
However, with Multi-SGL element support, we require multiple
ibv_wrs per rdma_request. Insted of embedding these
structures inside of the rdma_request and bloating up that object, I
opted to leave the first one embedded in the object and create a pool
that requests can pull from in the Multi-SGL path.
By leaving the first request_data object embedded in the rdma_request
structure, we avoid adding the latency of requesting a mempool object
in the basic cases.

Change-Id: I7282242f1e34a32eb59b55f326a6c331d455625e
Signed-off-by: default avatarSeth Howell <seth.howell@intel.com>
Reviewed-on: https://review.gerrithub.io/c/428561


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
parent 3c88819b
Loading
Loading
Loading
Loading
+31 −6
Original line number Diff line number Diff line
@@ -211,6 +211,13 @@ struct spdk_nvmf_rdma_recv {
	TAILQ_ENTRY(spdk_nvmf_rdma_recv) link;
};

struct spdk_nvmf_rdma_request_data {
	struct spdk_nvmf_rdma_wr	rdma_wr;
	struct ibv_send_wr		wr;
	struct ibv_sge			sgl[SPDK_NVMF_MAX_SGL_ENTRIES];
	void				*buffers[SPDK_NVMF_MAX_SGL_ENTRIES];
};

struct spdk_nvmf_rdma_request {
	struct spdk_nvmf_request		req;
	bool					data_from_pool;
@@ -225,12 +232,7 @@ struct spdk_nvmf_rdma_request {
		struct	ibv_sge			sgl[NVMF_DEFAULT_RSP_SGE];
	} rsp;

	struct {
		struct spdk_nvmf_rdma_wr	rdma_wr;
		struct ibv_send_wr		wr;
		struct ibv_sge			sgl[NVMF_DEFAULT_TX_SGE];
		void				*buffers[NVMF_DEFAULT_TX_SGE];
	} data;
	struct spdk_nvmf_rdma_request_data	data;

	struct spdk_nvmf_rdma_wr		rdma_wr;

@@ -364,6 +366,8 @@ struct spdk_nvmf_rdma_transport {

	struct rdma_event_channel	*event_channel;

	struct spdk_mempool		*data_wr_pool;

	pthread_mutex_t			lock;

	/* fields used to poll RDMA/IB events */
@@ -1712,6 +1716,17 @@ spdk_nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
		return NULL;
	}

	rtransport->data_wr_pool = spdk_mempool_create("spdk_nvmf_rdma_wr_data",
				   opts->max_queue_depth * SPDK_NVMF_MAX_SGL_ENTRIES,
				   sizeof(struct spdk_nvmf_rdma_request_data),
				   SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
				   SPDK_ENV_SOCKET_ID_ANY);
	if (!rtransport->data_wr_pool) {
		SPDK_ERRLOG("Unable to allocate work request pool for poll group\n");
		spdk_nvmf_rdma_destroy(&rtransport->transport);
		return NULL;
	}

	contexts = rdma_get_devices(NULL);
	if (contexts == NULL) {
		SPDK_ERRLOG("rdma_get_devices() failed: %s (%d)\n", spdk_strerror(errno), errno);
@@ -1850,6 +1865,16 @@ spdk_nvmf_rdma_destroy(struct spdk_nvmf_transport *transport)
		free(device);
	}

	if (rtransport->data_wr_pool != NULL) {
		if (spdk_mempool_count(rtransport->data_wr_pool) !=
		    (transport->opts.max_queue_depth * SPDK_NVMF_MAX_SGL_ENTRIES)) {
			SPDK_ERRLOG("transport wr pool count is %zu but should be %u\n",
				    spdk_mempool_count(rtransport->data_wr_pool),
				    transport->opts.max_queue_depth * SPDK_NVMF_MAX_SGL_ENTRIES);
		}
	}

	spdk_mempool_free(rtransport->data_wr_pool);
	spdk_io_device_unregister(rtransport, NULL);
	pthread_mutex_destroy(&rtransport->lock);
	free(rtransport);