Commit 79945ef0 authored by Shuhei Matsumoto's avatar Shuhei Matsumoto Committed by Jim Harris
Browse files

nvmf: Hold number of allocated buffers in struct spdk_nvmf_request



This patch makes multi SGL case possible to call spdk_nvmf_request_get_buffers()
per WR.

This patch has an unrelated fix to clear req->iovcnt in
reset_nvmf_rdma_request() in UT. We can do the fix in a separate patch
but include it in this patch because it is very small.

Signed-off-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: If6e5af0505fb199c95ef5d0522b579242a7cef29
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/468942


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarAlexey Marchuk <alexeymar@mellanox.com>
Reviewed-by: default avatarSeth Howell <seth.howell@intel.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
parent 410455e4
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -1483,7 +1483,7 @@ spdk_nvmf_fc_request_free(struct spdk_nvmf_fc_request *fc_req)

	/* Release IO buffers */
	if (fc_req->req.data_from_pool) {
		spdk_nvmf_request_free_buffers(&fc_req->req, group, transport, fc_req->req.iovcnt);
		spdk_nvmf_request_free_buffers(&fc_req->req, group, transport);
	}
	fc_req->req.data = NULL;
	fc_req->req.iovcnt  = 0;
+2 −2
Original line number Diff line number Diff line
@@ -213,6 +213,7 @@ struct spdk_nvmf_request {
	union nvmf_h2c_msg		*cmd;
	union nvmf_c2h_msg		*rsp;
	void				*buffers[NVMF_REQ_MAX_BUFFERS];
	uint32_t			num_buffers;
	struct iovec			iov[NVMF_REQ_MAX_BUFFERS];
	uint32_t			iovcnt;
	bool				data_from_pool;
@@ -385,8 +386,7 @@ int spdk_nvmf_request_complete(struct spdk_nvmf_request *req);

void spdk_nvmf_request_free_buffers(struct spdk_nvmf_request *req,
				    struct spdk_nvmf_transport_poll_group *group,
				    struct spdk_nvmf_transport *transport,
				    uint32_t num_buffers);
				    struct spdk_nvmf_transport *transport);
int spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
				  struct spdk_nvmf_transport_poll_group *group,
				  struct spdk_nvmf_transport *transport,
+3 −4
Original line number Diff line number Diff line
@@ -1712,7 +1712,7 @@ spdk_nvmf_rdma_request_fill_iovs(struct spdk_nvmf_rdma_transport *rtransport,
	return rc;

err_exit:
	spdk_nvmf_request_free_buffers(req, &rgroup->group, &rtransport->transport, num_buffers);
	spdk_nvmf_request_free_buffers(req, &rgroup->group, &rtransport->transport);
	memset(wr->sg_list, 0, sizeof(wr->sg_list[0]) * wr->num_sge);
	wr->num_sge = 0;
	req->iovcnt = 0;
@@ -1813,7 +1813,7 @@ nvmf_rdma_request_fill_iovs_multi_sgl(struct spdk_nvmf_rdma_transport *rtranspor
	return 0;

err_exit:
	spdk_nvmf_request_free_buffers(req, &rgroup->group, &rtransport->transport, num_buffers);
	spdk_nvmf_request_free_buffers(req, &rgroup->group, &rtransport->transport);
	nvmf_rdma_request_free_data(rdma_req, rtransport);
	return rc;
}
@@ -1962,8 +1962,7 @@ nvmf_rdma_request_free(struct spdk_nvmf_rdma_request *rdma_req,
	if (rdma_req->req.data_from_pool) {
		rgroup = rqpair->poller->group;

		spdk_nvmf_request_free_buffers(&rdma_req->req, &rgroup->group, &rtransport->transport,
					       rdma_req->req.iovcnt);
		spdk_nvmf_request_free_buffers(&rdma_req->req, &rgroup->group, &rtransport->transport);
	}
	nvmf_rdma_request_free_data(rdma_req, rtransport);
	rdma_req->req.length = 0;
+1 −2
Original line number Diff line number Diff line
@@ -2677,8 +2677,7 @@ spdk_nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
		case TCP_REQUEST_STATE_COMPLETED:
			spdk_trace_record(TRACE_TCP_REQUEST_STATE_COMPLETED, 0, 0, (uintptr_t)tcp_req, 0);
			if (tcp_req->req.data_from_pool) {
				spdk_nvmf_request_free_buffers(&tcp_req->req, group, &ttransport->transport,
							       tcp_req->req.iovcnt);
				spdk_nvmf_request_free_buffers(&tcp_req->req, group, &ttransport->transport);
			}
			tcp_req->req.length = 0;
			tcp_req->req.iovcnt = 0;
+10 −7
Original line number Diff line number Diff line
@@ -368,12 +368,11 @@ spdk_nvmf_transport_poll_group_free_stat(struct spdk_nvmf_transport *transport,
void
spdk_nvmf_request_free_buffers(struct spdk_nvmf_request *req,
			       struct spdk_nvmf_transport_poll_group *group,
			       struct spdk_nvmf_transport *transport,
			       uint32_t num_buffers)
			       struct spdk_nvmf_transport *transport)
{
	uint32_t i;

	for (i = 0; i < num_buffers; i++) {
	for (i = 0; i < req->num_buffers; i++) {
		if (group->buf_cache_count < group->buf_cache_size) {
			STAILQ_INSERT_HEAD(&group->buf_cache,
					   (struct spdk_nvmf_transport_pg_cache_buf *)req->buffers[i],
@@ -386,6 +385,7 @@ spdk_nvmf_request_free_buffers(struct spdk_nvmf_request *req,
		req->buffers[i] = NULL;
		req->iov[i].iov_len = 0;
	}
	req->num_buffers = 0;
	req->data_from_pool = false;
}

@@ -400,15 +400,18 @@ spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
	while (i < num_buffers) {
		if (!(STAILQ_EMPTY(&group->buf_cache))) {
			group->buf_cache_count--;
			req->buffers[i] = STAILQ_FIRST(&group->buf_cache);
			req->buffers[req->num_buffers] = STAILQ_FIRST(&group->buf_cache);
			STAILQ_REMOVE_HEAD(&group->buf_cache, link);
			assert(req->buffers[i] != NULL);
			assert(req->buffers[req->num_buffers] != NULL);
			req->num_buffers++;
			i++;
		} else {
			if (spdk_mempool_get_bulk(transport->data_buf_pool, &req->buffers[i],
			if (spdk_mempool_get_bulk(transport->data_buf_pool,
						  &req->buffers[req->num_buffers],
						  num_buffers - i)) {
				goto err_exit;
			}
			req->num_buffers += num_buffers - i;
			i += num_buffers - i;
		}
	}
@@ -416,6 +419,6 @@ spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
	return 0;

err_exit:
	spdk_nvmf_request_free_buffers(req, group, transport, i);
	spdk_nvmf_request_free_buffers(req, group, transport);
	return -ENOMEM;
}
Loading