Commit 5437470c authored by Shuhei Matsumoto's avatar Shuhei Matsumoto Committed by Jim Harris
Browse files

nvmf/fc: Factor out getting and filling buffers from nvmf_fc_request_alloc_buffers



This follows the practice of RDMA transport and  is a preparation to
unify buffer allocation among transports.

Signed-off-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: I3cd4377ae31e47bbde697837be2d9bc1b1b582f1
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/465869


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
Reviewed-by: default avatarBroadcom SPDK FC-NVMe CI <spdk-ci.pdl@broadcom.com>
Reviewed-by: default avatarAnil Veerabhadrappa <anil.veerabhadrappa@broadcom.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
parent 71ae3959
Loading
Loading
Loading
Loading
+34 −12
Original line number Diff line number Diff line
@@ -1297,16 +1297,12 @@ complete:
}

static int
nvmf_fc_request_alloc_buffers(struct spdk_nvmf_fc_request *fc_req)
nvmf_fc_request_get_buffers(struct spdk_nvmf_fc_request *fc_req,
			    struct spdk_nvmf_transport_poll_group *group,
			    struct spdk_nvmf_transport *transport,
			    uint32_t num_buffers)
{
	uint32_t length = fc_req->req.length;
	uint32_t num_buffers;
	uint32_t i = 0;
	struct spdk_nvmf_fc_poll_group *fc_poll_group = fc_req->hwqp->fc_poll_group;
	struct spdk_nvmf_transport_poll_group *group = &fc_poll_group->tp_poll_group;
	struct spdk_nvmf_transport *transport = &fc_poll_group->fc_transport->transport;

	num_buffers = SPDK_CEIL_DIV(length, transport->opts.io_unit_size);

	while (i < num_buffers) {
		if (!(STAILQ_EMPTY(&group->buf_cache))) {
@@ -1323,6 +1319,18 @@ nvmf_fc_request_alloc_buffers(struct spdk_nvmf_fc_request *fc_req)
			i += num_buffers - i;
		}
	}
	return 0;

err_exit:
	nvmf_fc_request_free_buffers(fc_req, group, transport, i);
	return -ENOMEM;
}

static void
nvmf_fc_request_fill_buffers(struct spdk_nvmf_fc_request *fc_req,
			     struct spdk_nvmf_transport *transport, uint32_t length)
{
	uint32_t i;

	fc_req->req.iovcnt = 0;

@@ -1336,14 +1344,28 @@ nvmf_fc_request_alloc_buffers(struct spdk_nvmf_fc_request *fc_req)
		length -= fc_req->req.iov[i].iov_len;
	}
	fc_req->data_from_pool = true;
}

	return 0;
static int
nvmf_fc_request_alloc_buffers(struct spdk_nvmf_fc_request *fc_req)
{
	uint32_t length = fc_req->req.length;
	uint32_t num_buffers;
	struct spdk_nvmf_fc_poll_group *fc_poll_group = fc_req->hwqp->fc_poll_group;
	struct spdk_nvmf_transport_poll_group *group = &fc_poll_group->tp_poll_group;
	struct spdk_nvmf_transport *transport = &fc_poll_group->fc_transport->transport;

err_exit:
	nvmf_fc_request_free_buffers(fc_req, group, transport, i);
	num_buffers = SPDK_CEIL_DIV(length, transport->opts.io_unit_size);

	if (nvmf_fc_request_get_buffers(fc_req, group, transport, num_buffers)) {
		return -ENOMEM;
	}

	nvmf_fc_request_fill_buffers(fc_req, transport, length);

	return 0;
}

static int
nvmf_fc_request_execute(struct spdk_nvmf_fc_request *fc_req)
{