Commit 04ae83ec authored by Shuhei Matsumoto's avatar Shuhei Matsumoto Committed by Ben Walker
Browse files

nvmf: Move allocated buffer pointers to common struct spdk_nvmf_request



This is a preparation to unify buffer management among transports.
struct spdk_nvmf_request already has SPDK_NVMF_MAX_SGL_ENTRIES (16) * 2
iovecs. Hence incresing the number of buffers twice will be no problem.

Signed-off-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: Idb525abbf35dc9f4b8547b785b5dfa77d106d8c9
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/465873


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarChangpeng Liu <changpeng.liu@intel.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-by: default avatarBroadcom SPDK FC-NVMe CI <spdk-ci.pdl@broadcom.com>
parent a3b7ae8a
Loading
Loading
Loading
Loading
+7 −7
Original line number Diff line number Diff line
@@ -426,14 +426,14 @@ nvmf_fc_request_free_buffers(struct spdk_nvmf_fc_request *fc_req,
	for (i = 0; i < num_buffers; i++) {
		if (group->buf_cache_count < group->buf_cache_size) {
			STAILQ_INSERT_HEAD(&group->buf_cache,
					   (struct spdk_nvmf_transport_pg_cache_buf *)fc_req->buffers[i],
					   (struct spdk_nvmf_transport_pg_cache_buf *)fc_req->req.buffers[i],
					   link);
			group->buf_cache_count++;
		} else {
			spdk_mempool_put(transport->data_buf_pool, fc_req->buffers[i]);
			spdk_mempool_put(transport->data_buf_pool, fc_req->req.buffers[i]);
		}
		fc_req->req.iov[i].iov_base = NULL;
		fc_req->buffers[i] = NULL;
		fc_req->req.buffers[i] = NULL;
	}
	fc_req->data_from_pool = false;
}
@@ -1307,12 +1307,12 @@ nvmf_fc_request_get_buffers(struct spdk_nvmf_fc_request *fc_req,
	while (i < num_buffers) {
		if (!(STAILQ_EMPTY(&group->buf_cache))) {
			group->buf_cache_count--;
			fc_req->buffers[i] = STAILQ_FIRST(&group->buf_cache);
			fc_req->req.buffers[i] = STAILQ_FIRST(&group->buf_cache);
			STAILQ_REMOVE_HEAD(&group->buf_cache, link);
			assert(fc_req->buffers[i] != NULL);
			assert(fc_req->req.buffers[i] != NULL);
			i++;
		} else {
			if (spdk_mempool_get_bulk(transport->data_buf_pool, &fc_req->buffers[i],
			if (spdk_mempool_get_bulk(transport->data_buf_pool, &fc_req->req.buffers[i],
						  num_buffers - i)) {
				goto err_exit;
			}
@@ -1336,7 +1336,7 @@ nvmf_fc_request_fill_buffers(struct spdk_nvmf_fc_request *fc_req,

	while (length) {
		i = fc_req->req.iovcnt;
		fc_req->req.iov[i].iov_base = (void *)((uintptr_t)((char *)fc_req->buffers[i] +
		fc_req->req.iov[i].iov_base = (void *)((uintptr_t)((char *)fc_req->req.buffers[i] +
						       NVMF_DATA_BUFFER_MASK) &
						       ~NVMF_DATA_BUFFER_MASK);
		fc_req->req.iov[i].iov_len  = spdk_min(length, transport->opts.io_unit_size);
+0 −1
Original line number Diff line number Diff line
@@ -350,7 +350,6 @@ struct spdk_nvmf_fc_request {
	uint32_t magic;
	uint32_t s_id;
	uint32_t d_id;
	void *buffers[SPDK_NVMF_MAX_SGL_ENTRIES];
	bool data_from_pool;
	TAILQ_ENTRY(spdk_nvmf_fc_request) link;
	TAILQ_ENTRY(spdk_nvmf_fc_request) pending_link;
+5 −1
Original line number Diff line number Diff line
@@ -47,6 +47,9 @@

#define SPDK_NVMF_MAX_SGL_ENTRIES	16

/* The maximum number of buffers per request */
#define NVMF_REQ_MAX_BUFFERS	(SPDK_NVMF_MAX_SGL_ENTRIES * 2)

/* AIO backend requires block size aligned data buffers,
 * extra 4KiB aligned data buffer should work for most devices.
 */
@@ -209,7 +212,8 @@ struct spdk_nvmf_request {
	void				*data;
	union nvmf_h2c_msg		*cmd;
	union nvmf_c2h_msg		*rsp;
	struct iovec			iov[SPDK_NVMF_MAX_SGL_ENTRIES * 2];
	void				*buffers[NVMF_REQ_MAX_BUFFERS];
	struct iovec			iov[NVMF_REQ_MAX_BUFFERS];
	uint32_t			iovcnt;
	struct spdk_bdev_io_wait_entry	bdev_io_wait;

+7 −8
Original line number Diff line number Diff line
@@ -264,7 +264,6 @@ struct spdk_nvmf_rdma_request {
	} rsp;

	struct spdk_nvmf_rdma_request_data	data;
	void					*buffers[NVMF_REQ_MAX_BUFFERS];

	uint32_t				num_outstanding_data_wr;
	uint64_t				receive_tsc;
@@ -1366,13 +1365,13 @@ spdk_nvmf_rdma_request_free_buffers(struct spdk_nvmf_rdma_request *rdma_req,
	for (i = 0; i < num_buffers; i++) {
		if (group->buf_cache_count < group->buf_cache_size) {
			STAILQ_INSERT_HEAD(&group->buf_cache,
					   (struct spdk_nvmf_transport_pg_cache_buf *)rdma_req->buffers[i], link);
					   (struct spdk_nvmf_transport_pg_cache_buf *)rdma_req->req.buffers[i], link);
			group->buf_cache_count++;
		} else {
			spdk_mempool_put(transport->data_buf_pool, rdma_req->buffers[i]);
			spdk_mempool_put(transport->data_buf_pool, rdma_req->req.buffers[i]);
		}
		rdma_req->req.iov[i].iov_base = NULL;
		rdma_req->buffers[i] = NULL;
		rdma_req->req.buffers[i] = NULL;
		rdma_req->req.iov[i].iov_len = 0;

	}
@@ -1389,12 +1388,12 @@ nvmf_rdma_request_get_buffers(struct spdk_nvmf_rdma_request *rdma_req,
	while (i < num_buffers) {
		if (!(STAILQ_EMPTY(&group->buf_cache))) {
			group->buf_cache_count--;
			rdma_req->buffers[i] = STAILQ_FIRST(&group->buf_cache);
			rdma_req->req.buffers[i] = STAILQ_FIRST(&group->buf_cache);
			STAILQ_REMOVE_HEAD(&group->buf_cache, link);
			assert(rdma_req->buffers[i] != NULL);
			assert(rdma_req->req.buffers[i] != NULL);
			i++;
		} else {
			if (spdk_mempool_get_bulk(transport->data_buf_pool, &rdma_req->buffers[i], num_buffers - i)) {
			if (spdk_mempool_get_bulk(transport->data_buf_pool, &rdma_req->req.buffers[i], num_buffers - i)) {
				goto err_exit;
			}
			i += num_buffers - i;
@@ -1529,7 +1528,7 @@ nvmf_rdma_fill_buffers(struct spdk_nvmf_rdma_transport *rtransport,

	while (remaining_length) {
		iovcnt = rdma_req->req.iovcnt;
		rdma_req->req.iov[iovcnt].iov_base = (void *)((uintptr_t)(rdma_req->buffers[iovcnt] +
		rdma_req->req.iov[iovcnt].iov_base = (void *)((uintptr_t)(rdma_req->req.buffers[iovcnt] +
						     NVMF_DATA_BUFFER_MASK) &
						     ~NVMF_DATA_BUFFER_MASK);
		rdma_req->req.iov[iovcnt].iov_len  = spdk_min(remaining_length,
+8 −10
Original line number Diff line number Diff line
@@ -176,8 +176,6 @@ struct spdk_nvmf_tcp_req {

	enum spdk_nvmf_tcp_req_state		state;

	void					*buffers[SPDK_NVMF_MAX_SGL_ENTRIES];

	/*
	 * next_expected_r2t_offset is used when we receive the h2c_data PDU.
	 */
@@ -2167,16 +2165,16 @@ spdk_nvmf_tcp_request_free_buffers(struct spdk_nvmf_tcp_req *tcp_req,
				   uint32_t num_buffers)
{
	for (uint32_t i = 0; i < num_buffers; i++) {
		assert(tcp_req->buffers[i] != NULL);
		assert(tcp_req->req.buffers[i] != NULL);
		if (group->buf_cache_count < group->buf_cache_size) {
			STAILQ_INSERT_HEAD(&group->buf_cache,
					   (struct spdk_nvmf_transport_pg_cache_buf *)tcp_req->buffers[i], link);
					   (struct spdk_nvmf_transport_pg_cache_buf *)tcp_req->req.buffers[i], link);
			group->buf_cache_count++;
		} else {
			spdk_mempool_put(transport->data_buf_pool, tcp_req->buffers[i]);
			spdk_mempool_put(transport->data_buf_pool, tcp_req->req.buffers[i]);
		}
		tcp_req->req.iov[i].iov_base = NULL;
		tcp_req->buffers[i] = NULL;
		tcp_req->req.buffers[i] = NULL;
		tcp_req->req.iov[i].iov_len = 0;
	}
	tcp_req->data_from_pool = false;
@@ -2193,13 +2191,13 @@ spdk_nvmf_tcp_req_get_buffers(struct spdk_nvmf_tcp_req *tcp_req,
	while (i < num_buffers) {
		if (!(STAILQ_EMPTY(&group->buf_cache))) {
			group->buf_cache_count--;
			tcp_req->buffers[i] = STAILQ_FIRST(&group->buf_cache);
			tcp_req->req.buffers[i] = STAILQ_FIRST(&group->buf_cache);
			STAILQ_REMOVE_HEAD(&group->buf_cache, link);
			assert(tcp_req->buffers[i] != NULL);
			assert(tcp_req->req.buffers[i] != NULL);
			i++;
		} else {
			if (spdk_mempool_get_bulk(transport->data_buf_pool,
						  &tcp_req->buffers[i], num_buffers - i)) {
						  &tcp_req->req.buffers[i], num_buffers - i)) {
				goto nomem;
			}
			i += num_buffers - i;
@@ -2224,7 +2222,7 @@ spdk_nvmf_tcp_req_fill_buffers(struct spdk_nvmf_tcp_req *tcp_req,
	tcp_req->req.iovcnt = 0;
	while (length) {
		i = tcp_req->req.iovcnt;
		tcp_req->req.iov[i].iov_base = (void *)((uintptr_t)(tcp_req->buffers[i] +
		tcp_req->req.iov[i].iov_base = (void *)((uintptr_t)(tcp_req->req.buffers[i] +
							NVMF_DATA_BUFFER_MASK) &
							~NVMF_DATA_BUFFER_MASK);
		tcp_req->req.iov[i].iov_len  = spdk_min(length, transport->opts.io_unit_size);
Loading