Commit 71ae3959 authored by Shuhei Matsumoto's avatar Shuhei Matsumoto Committed by Jim Harris
Browse files

nvmf/fc: Use buffer cache in nvmf_fc_request_alloc/free_buffers



FC transport can use buffer cache as same as RDMA and TCP transport
now. The next patch will factor out getting buffers and filling
buffers to iovs in nvmf_fc_request_alloc_buffers().

Signed-off-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: I0d7b4552f6ba053ba8fb5b3ca8fe7657b86f9984
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/465868


Reviewed-by: default avatarBroadcom SPDK FC-NVMe CI <spdk-ci.pdl@broadcom.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
Reviewed-by: default avatarAnil Veerabhadrappa <anil.veerabhadrappa@broadcom.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
parent fbb0f0fa
Loading
Loading
Loading
Loading
+34 −8
Original line number Diff line number Diff line
@@ -417,13 +417,21 @@ nvmf_fc_req_in_get_buff(struct spdk_nvmf_fc_request *fc_req)

static void
nvmf_fc_request_free_buffers(struct spdk_nvmf_fc_request *fc_req,
			     struct spdk_nvmf_transport_poll_group *group,
			     struct spdk_nvmf_transport *transport,
			     uint32_t num_buffers)
{
	uint32_t i;

	for (i = 0; i < num_buffers; i++) {
		if (group->buf_cache_count < group->buf_cache_size) {
			STAILQ_INSERT_HEAD(&group->buf_cache,
					   (struct spdk_nvmf_transport_pg_cache_buf *)fc_req->buffers[i],
					   link);
			group->buf_cache_count++;
		} else {
			spdk_mempool_put(transport->data_buf_pool, fc_req->buffers[i]);
		}
		fc_req->req.iov[i].iov_base = NULL;
		fc_req->buffers[i] = NULL;
	}
@@ -1294,13 +1302,26 @@ nvmf_fc_request_alloc_buffers(struct spdk_nvmf_fc_request *fc_req)
	uint32_t length = fc_req->req.length;
	uint32_t num_buffers;
	uint32_t i = 0;
	struct spdk_nvmf_fc_transport *fc_transport = fc_req->hwqp->fc_poll_group->fc_transport;
	struct spdk_nvmf_transport *transport = &fc_transport->transport;
	struct spdk_nvmf_fc_poll_group *fc_poll_group = fc_req->hwqp->fc_poll_group;
	struct spdk_nvmf_transport_poll_group *group = &fc_poll_group->tp_poll_group;
	struct spdk_nvmf_transport *transport = &fc_poll_group->fc_transport->transport;

	num_buffers = SPDK_CEIL_DIV(length, transport->opts.io_unit_size);

	if (spdk_mempool_get_bulk(transport->data_buf_pool, fc_req->buffers, num_buffers)) {
		return -ENOMEM;
	while (i < num_buffers) {
		if (!(STAILQ_EMPTY(&group->buf_cache))) {
			group->buf_cache_count--;
			fc_req->buffers[i] = STAILQ_FIRST(&group->buf_cache);
			STAILQ_REMOVE_HEAD(&group->buf_cache, link);
			assert(fc_req->buffers[i] != NULL);
			i++;
		} else {
			if (spdk_mempool_get_bulk(transport->data_buf_pool, &fc_req->buffers[i],
						  num_buffers - i)) {
				goto err_exit;
			}
			i += num_buffers - i;
		}
	}

	fc_req->req.iovcnt = 0;
@@ -1317,6 +1338,10 @@ nvmf_fc_request_alloc_buffers(struct spdk_nvmf_fc_request *fc_req)
	fc_req->data_from_pool = true;

	return 0;

err_exit:
	nvmf_fc_request_free_buffers(fc_req, group, transport, i);
	return -ENOMEM;
}

static int
@@ -1473,8 +1498,9 @@ void
spdk_nvmf_fc_request_free(struct spdk_nvmf_fc_request *fc_req)
{
	struct spdk_nvmf_fc_hwqp *hwqp = fc_req->hwqp;
	struct spdk_nvmf_fc_transport *fc_transport = hwqp->fc_poll_group->fc_transport;
	struct spdk_nvmf_transport *transport = &fc_transport->transport;
	struct spdk_nvmf_fc_poll_group *fc_poll_group = hwqp->fc_poll_group;
	struct spdk_nvmf_transport_poll_group *group = &fc_poll_group->tp_poll_group;
	struct spdk_nvmf_transport *transport = &fc_poll_group->fc_transport->transport;

	if (!fc_req) {
		return;
@@ -1487,7 +1513,7 @@ spdk_nvmf_fc_request_free(struct spdk_nvmf_fc_request *fc_req)

	/* Release IO buffers */
	if (fc_req->data_from_pool) {
		nvmf_fc_request_free_buffers(fc_req, transport, fc_req->req.iovcnt);
		nvmf_fc_request_free_buffers(fc_req, group, transport, fc_req->req.iovcnt);
	}
	fc_req->req.data = NULL;
	fc_req->req.iovcnt  = 0;