Commit 97967681 authored by Shuhei Matsumoto's avatar Shuhei Matsumoto Committed by Jim Harris
Browse files

nvmf: Move pending_data_buf_queue to common struct spdk_nvmf_transport_poll_group



This unifies buffer management among transports further and is a
preparation to make buffer allocation asynchronous.

Signed-off-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: I8c588eeac4081f50fe32605feb7352f72c628d95
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/466847


Reviewed-by: default avatarBroadcom SPDK FC-NVMe CI <spdk-ci.pdl@broadcom.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-by: default avatarAnil Veerabhadrappa <anil.veerabhadrappa@broadcom.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
parent cb5c6612
Loading
Loading
Loading
Loading
+11 −10
Original line number Diff line number Diff line
@@ -1135,10 +1135,10 @@ nvmf_fc_req_in_bdev(struct spdk_nvmf_fc_request *fc_req)
static inline bool
nvmf_fc_req_in_pending(struct spdk_nvmf_fc_request *fc_req)
{
	struct spdk_nvmf_fc_request *tmp = NULL;
	struct spdk_nvmf_request *tmp = NULL;

	STAILQ_FOREACH(tmp, &fc_req->hwqp->fgroup->pending_data_buf_queue, pending_link) {
		if (tmp == fc_req) {
	STAILQ_FOREACH(tmp, &fc_req->hwqp->fgroup->group.pending_buf_queue, buf_link) {
		if (tmp == &fc_req->req) {
			return true;
		}
	}
@@ -1258,8 +1258,8 @@ spdk_nvmf_fc_request_abort(struct spdk_nvmf_fc_request *fc_req, bool send_abts,
		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "Abort req when getting buffers.\n");
	} else if (nvmf_fc_req_in_pending(fc_req)) {
		/* Remove from pending */
		STAILQ_REMOVE(&fc_req->hwqp->fgroup->pending_data_buf_queue, fc_req,
			      spdk_nvmf_fc_request, pending_link);
		STAILQ_REMOVE(&fc_req->hwqp->fgroup->group.pending_buf_queue, &fc_req->req,
			      spdk_nvmf_request, buf_link);
		goto complete;
	} else {
		/* Should never happen */
@@ -1454,7 +1454,7 @@ nvmf_fc_hwqp_handle_request(struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_

	nvmf_fc_record_req_trace_point(fc_req, SPDK_NVMF_FC_REQ_INIT);
	if (nvmf_fc_request_execute(fc_req)) {
		STAILQ_INSERT_TAIL(&hwqp->fgroup->pending_data_buf_queue, fc_req, pending_link);
		STAILQ_INSERT_TAIL(&hwqp->fgroup->group.pending_buf_queue, &fc_req->req, buf_link);
	}

	return 0;
@@ -1626,13 +1626,15 @@ spdk_nvmf_fc_hwqp_process_frame(struct spdk_nvmf_fc_hwqp *hwqp,
void
spdk_nvmf_fc_hwqp_process_pending_reqs(struct spdk_nvmf_fc_hwqp *hwqp)
{
	struct spdk_nvmf_fc_request *fc_req = NULL, *tmp;
	struct spdk_nvmf_request *req = NULL, *tmp;
	struct spdk_nvmf_fc_request *fc_req;
	int budget = 64;

	STAILQ_FOREACH_SAFE(fc_req, &hwqp->fgroup->pending_data_buf_queue, pending_link, tmp) {
	STAILQ_FOREACH_SAFE(req, &hwqp->fgroup->group.pending_buf_queue, buf_link, tmp) {
		fc_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_fc_request, req);
		if (!nvmf_fc_request_execute(fc_req)) {
			/* Succesfuly posted, Delete from pending. */
			STAILQ_REMOVE_HEAD(&hwqp->fgroup->pending_data_buf_queue, pending_link);
			STAILQ_REMOVE_HEAD(&hwqp->fgroup->group.pending_buf_queue, buf_link);
		}

		if (budget) {
@@ -1999,7 +2001,6 @@ nvmf_fc_poll_group_create(struct spdk_nvmf_transport *transport)
	}

	TAILQ_INIT(&fgroup->hwqp_list);
	STAILQ_INIT(&fgroup->pending_data_buf_queue);

	pthread_mutex_lock(&ftransport->lock);
	TAILQ_INSERT_TAIL(&g_nvmf_fgroups, fgroup, link);
+0 −2
Original line number Diff line number Diff line
@@ -264,8 +264,6 @@ struct spdk_nvmf_fc_poll_group {
	struct spdk_nvmf_tgt *nvmf_tgt;
	uint32_t hwqp_count; /* number of hwqp's assigned to this pg */
	TAILQ_HEAD(, spdk_nvmf_fc_hwqp) hwqp_list;
	/* requests that are waiting to obtain xchg/buffer */
	STAILQ_HEAD(, spdk_nvmf_fc_request) pending_data_buf_queue;

	TAILQ_ENTRY(spdk_nvmf_fc_poll_group) link;
};
+3 −0
Original line number Diff line number Diff line
@@ -111,6 +111,8 @@ struct spdk_nvmf_transport_pg_cache_buf {

struct spdk_nvmf_transport_poll_group {
	struct spdk_nvmf_transport					*transport;
	/* Requests that are waiting to obtain a data buffer */
	STAILQ_HEAD(, spdk_nvmf_request)				pending_buf_queue;
	STAILQ_HEAD(, spdk_nvmf_transport_pg_cache_buf)			buf_cache;
	uint32_t							buf_cache_count;
	uint32_t							buf_cache_size;
@@ -216,6 +218,7 @@ struct spdk_nvmf_request {
	bool				data_from_pool;
	struct spdk_bdev_io_wait_entry	bdev_io_wait;

	STAILQ_ENTRY(spdk_nvmf_request)	buf_link;
	TAILQ_ENTRY(spdk_nvmf_request)	link;
};

+8 −15
Original line number Diff line number Diff line
@@ -452,9 +452,6 @@ struct spdk_nvmf_rdma_poll_group_stat {
struct spdk_nvmf_rdma_poll_group {
	struct spdk_nvmf_transport_poll_group	group;

	/* Requests that are waiting to obtain a data buffer */
	STAILQ_HEAD(, spdk_nvmf_rdma_request)	pending_data_buf_queue;

	TAILQ_HEAD(, spdk_nvmf_rdma_poller)	pollers;

	struct spdk_nvmf_rdma_poll_group_stat	stat;
@@ -1826,7 +1823,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
	 * to release resources. */
	if (rqpair->ibv_state == IBV_QPS_ERR || rqpair->qpair.state != SPDK_NVMF_QPAIR_ACTIVE) {
		if (rdma_req->state == RDMA_REQUEST_STATE_NEED_BUFFER) {
			STAILQ_REMOVE(&rgroup->pending_data_buf_queue, rdma_req, spdk_nvmf_rdma_request, state_link);
			STAILQ_REMOVE(&rgroup->group.pending_buf_queue, &rdma_req->req, spdk_nvmf_request, buf_link);
		} else if (rdma_req->state == RDMA_REQUEST_STATE_DATA_TRANSFER_TO_CONTROLLER_PENDING) {
			STAILQ_REMOVE(&rqpair->pending_rdma_read_queue, rdma_req, spdk_nvmf_rdma_request, state_link);
		} else if (rdma_req->state == RDMA_REQUEST_STATE_DATA_TRANSFER_TO_HOST_PENDING) {
@@ -1870,7 +1867,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
			}

			rdma_req->state = RDMA_REQUEST_STATE_NEED_BUFFER;
			STAILQ_INSERT_TAIL(&rgroup->pending_data_buf_queue, rdma_req, state_link);
			STAILQ_INSERT_TAIL(&rgroup->group.pending_buf_queue, &rdma_req->req, buf_link);
			break;
		case RDMA_REQUEST_STATE_NEED_BUFFER:
			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_NEED_BUFFER, 0, 0,
@@ -1878,7 +1875,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,

			assert(rdma_req->req.xfer != SPDK_NVME_DATA_NONE);

			if (rdma_req != STAILQ_FIRST(&rgroup->pending_data_buf_queue)) {
			if (&rdma_req->req != STAILQ_FIRST(&rgroup->group.pending_buf_queue)) {
				/* This request needs to wait in line to obtain a buffer */
				break;
			}
@@ -1886,7 +1883,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
			/* Try to get a data buffer */
			rc = spdk_nvmf_rdma_request_parse_sgl(rtransport, device, rdma_req);
			if (rc < 0) {
				STAILQ_REMOVE_HEAD(&rgroup->pending_data_buf_queue, state_link);
				STAILQ_REMOVE_HEAD(&rgroup->group.pending_buf_queue, buf_link);
				rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
				rdma_req->state = RDMA_REQUEST_STATE_READY_TO_COMPLETE;
				break;
@@ -1898,7 +1895,7 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
				break;
			}

			STAILQ_REMOVE_HEAD(&rgroup->pending_data_buf_queue, state_link);
			STAILQ_REMOVE_HEAD(&rgroup->group.pending_buf_queue, buf_link);

			/* If data is transferring from host to controller and the data didn't
			 * arrive using in capsule data, we need to do a transfer from the host.
@@ -2521,6 +2518,7 @@ static void
spdk_nvmf_rdma_qpair_process_pending(struct spdk_nvmf_rdma_transport *rtransport,
				     struct spdk_nvmf_rdma_qpair *rqpair, bool drain)
{
	struct spdk_nvmf_request *req, *tmp;
	struct spdk_nvmf_rdma_request	*rdma_req, *req_tmp;
	struct spdk_nvmf_rdma_resources *resources;

@@ -2539,8 +2537,8 @@ spdk_nvmf_rdma_qpair_process_pending(struct spdk_nvmf_rdma_transport *rtransport
	}

	/* The second highest priority is I/O waiting on memory buffers. */
	STAILQ_FOREACH_SAFE(rdma_req, &rqpair->poller->group->pending_data_buf_queue, state_link,
			    req_tmp) {
	STAILQ_FOREACH_SAFE(req, &rqpair->poller->group->group.pending_buf_queue, buf_link, tmp) {
		rdma_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_rdma_request, req);
		if (spdk_nvmf_rdma_request_process(rtransport, rdma_req) == false && drain == false) {
			break;
		}
@@ -2937,7 +2935,6 @@ spdk_nvmf_rdma_poll_group_create(struct spdk_nvmf_transport *transport)
	}

	TAILQ_INIT(&rgroup->pollers);
	STAILQ_INIT(&rgroup->pending_data_buf_queue);

	pthread_mutex_lock(&rtransport->lock);
	TAILQ_FOREACH(device, &rtransport->devices, link) {
@@ -3047,10 +3044,6 @@ spdk_nvmf_rdma_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
		free(poller);
	}

	if (!STAILQ_EMPTY(&rgroup->pending_data_buf_queue)) {
		SPDK_ERRLOG("Pending I/O list wasn't empty on poll group destruction\n");
	}

	free(rgroup);
}

+10 −15
Original line number Diff line number Diff line
@@ -273,9 +273,6 @@ struct spdk_nvmf_tcp_poll_group {
	struct spdk_nvmf_transport_poll_group	group;
	struct spdk_sock_group			*sock_group;

	/* Requests that are waiting to obtain a data buffer */
	STAILQ_HEAD(, spdk_nvmf_tcp_req)	pending_data_buf_queue;

	TAILQ_HEAD(, spdk_nvmf_tcp_qpair)	qpairs;
};

@@ -442,7 +439,8 @@ spdk_nvmf_tcp_cleanup_all_states(struct spdk_nvmf_tcp_qpair *tqpair)
	/* Wipe the requests waiting for buffer from the global list */
	TAILQ_FOREACH_SAFE(tcp_req, &tqpair->state_queue[TCP_REQUEST_STATE_NEED_BUFFER], state_link,
			   req_tmp) {
		STAILQ_REMOVE(&tqpair->group->pending_data_buf_queue, tcp_req, spdk_nvmf_tcp_req, link);
		STAILQ_REMOVE(&tqpair->group->group.pending_buf_queue, &tcp_req->req,
			      spdk_nvmf_request, buf_link);
	}

	spdk_nvmf_tcp_drain_state_queue(tqpair, TCP_REQUEST_STATE_NEED_BUFFER);
@@ -1211,7 +1209,6 @@ spdk_nvmf_tcp_poll_group_create(struct spdk_nvmf_transport *transport)
	}

	TAILQ_INIT(&tgroup->qpairs);
	STAILQ_INIT(&tgroup->pending_data_buf_queue);

	return &tgroup->group;

@@ -1244,10 +1241,6 @@ spdk_nvmf_tcp_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
	tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group);
	spdk_sock_group_close(&tgroup->sock_group);

	if (!STAILQ_EMPTY(&tgroup->pending_data_buf_queue)) {
		SPDK_ERRLOG("Pending I/O list wasn't empty on poll group destruction\n");
	}

	free(tgroup);
}

@@ -2590,14 +2583,14 @@ spdk_nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
			}

			spdk_nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_NEED_BUFFER);
			STAILQ_INSERT_TAIL(&tqpair->group->pending_data_buf_queue, tcp_req, link);
			STAILQ_INSERT_TAIL(&group->pending_buf_queue, &tcp_req->req, buf_link);
			break;
		case TCP_REQUEST_STATE_NEED_BUFFER:
			spdk_trace_record(TRACE_TCP_REQUEST_STATE_NEED_BUFFER, 0, 0, (uintptr_t)tcp_req, 0);

			assert(tcp_req->req.xfer != SPDK_NVME_DATA_NONE);

			if (tcp_req != STAILQ_FIRST(&tqpair->group->pending_data_buf_queue)) {
			if (&tcp_req->req != STAILQ_FIRST(&group->pending_buf_queue)) {
				SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP,
					      "Not the first element to wait for the buf for tcp_req(%p) on tqpair=%p\n",
					      tcp_req, tqpair);
@@ -2608,7 +2601,7 @@ spdk_nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
			/* Try to get a data buffer */
			rc = spdk_nvmf_tcp_req_parse_sgl(ttransport, tcp_req);
			if (rc < 0) {
				STAILQ_REMOVE_HEAD(&tqpair->group->pending_data_buf_queue, link);
				STAILQ_REMOVE_HEAD(&group->pending_buf_queue, buf_link);
				rsp->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
				/* Reset the tqpair receving pdu state */
				spdk_nvmf_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
@@ -2623,7 +2616,7 @@ spdk_nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
				break;
			}

			STAILQ_REMOVE_HEAD(&tqpair->group->pending_data_buf_queue, link);
			STAILQ_REMOVE_HEAD(&group->pending_buf_queue, buf_link);

			/* If data is transferring from host to controller, we need to do a transfer from the host. */
			if (tcp_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
@@ -2825,7 +2818,8 @@ spdk_nvmf_tcp_poll_group_poll(struct spdk_nvmf_transport_poll_group *group)
{
	struct spdk_nvmf_tcp_poll_group *tgroup;
	int rc;
	struct spdk_nvmf_tcp_req *tcp_req, *req_tmp;
	struct spdk_nvmf_request *req, *req_tmp;
	struct spdk_nvmf_tcp_req *tcp_req;
	struct spdk_nvmf_tcp_transport *ttransport = SPDK_CONTAINEROF(group->transport,
			struct spdk_nvmf_tcp_transport, transport);

@@ -2835,7 +2829,8 @@ spdk_nvmf_tcp_poll_group_poll(struct spdk_nvmf_transport_poll_group *group)
		return 0;
	}

	STAILQ_FOREACH_SAFE(tcp_req, &tgroup->pending_data_buf_queue, link, req_tmp) {
	STAILQ_FOREACH_SAFE(req, &group->pending_buf_queue, buf_link, req_tmp) {
		tcp_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_tcp_req, req);
		if (spdk_nvmf_tcp_req_process(ttransport, tcp_req) == false) {
			break;
		}
Loading