Commit 005b053a authored by Shuhei Matsumoto's avatar Shuhei Matsumoto Committed by Ben Walker
Browse files

nvmf: Move data_from_pool flag to common struct spdk_nvmf_request



This is a prepration to unify buffer management among transports.

Signed-off-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: I6b1c208207ae3679619239db4e6e9a77b33291d0
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/466002


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarChangpeng Liu <changpeng.liu@intel.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
parent 04ae83ec
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -435,7 +435,7 @@ nvmf_fc_request_free_buffers(struct spdk_nvmf_fc_request *fc_req,
		fc_req->req.iov[i].iov_base = NULL;
		fc_req->req.buffers[i] = NULL;
	}
	fc_req->data_from_pool = false;
	fc_req->req.data_from_pool = false;
}

void
@@ -1343,7 +1343,7 @@ nvmf_fc_request_fill_buffers(struct spdk_nvmf_fc_request *fc_req,
		fc_req->req.iovcnt++;
		length -= fc_req->req.iov[i].iov_len;
	}
	fc_req->data_from_pool = true;
	fc_req->req.data_from_pool = true;
}

static int
@@ -1534,7 +1534,7 @@ spdk_nvmf_fc_request_free(struct spdk_nvmf_fc_request *fc_req)
	}

	/* Release IO buffers */
	if (fc_req->data_from_pool) {
	if (fc_req->req.data_from_pool) {
		nvmf_fc_request_free_buffers(fc_req, group, transport, fc_req->req.iovcnt);
	}
	fc_req->req.data = NULL;
+0 −1
Original line number Diff line number Diff line
@@ -350,7 +350,6 @@ struct spdk_nvmf_fc_request {
	uint32_t magic;
	uint32_t s_id;
	uint32_t d_id;
	bool data_from_pool;
	TAILQ_ENTRY(spdk_nvmf_fc_request) link;
	TAILQ_ENTRY(spdk_nvmf_fc_request) pending_link;
	TAILQ_HEAD(, spdk_nvmf_fc_caller_ctx) abort_cbs;
+1 −0
Original line number Diff line number Diff line
@@ -215,6 +215,7 @@ struct spdk_nvmf_request {
	void				*buffers[NVMF_REQ_MAX_BUFFERS];
	struct iovec			iov[NVMF_REQ_MAX_BUFFERS];
	uint32_t			iovcnt;
	bool				data_from_pool;
	struct spdk_bdev_io_wait_entry	bdev_io_wait;

	TAILQ_ENTRY(spdk_nvmf_request)	link;
+8 −8
Original line number Diff line number Diff line
@@ -251,7 +251,6 @@ struct spdk_nvmf_rdma_request_data {

struct spdk_nvmf_rdma_request {
	struct spdk_nvmf_request		req;
	bool					data_from_pool;

	enum spdk_nvmf_rdma_request_state	state;

@@ -665,7 +664,7 @@ nvmf_rdma_request_free_data(struct spdk_nvmf_rdma_request *rdma_req,
static void
nvmf_rdma_dump_request(struct spdk_nvmf_rdma_request *req)
{
	SPDK_ERRLOG("\t\tRequest Data From Pool: %d\n", req->data_from_pool);
	SPDK_ERRLOG("\t\tRequest Data From Pool: %d\n", req->req.data_from_pool);
	if (req->req.cmd) {
		SPDK_ERRLOG("\t\tRequest opcode: %d\n", req->req.cmd->nvmf_cmd.opcode);
	}
@@ -1375,7 +1374,7 @@ spdk_nvmf_rdma_request_free_buffers(struct spdk_nvmf_rdma_request *rdma_req,
		rdma_req->req.iov[i].iov_len = 0;

	}
	rdma_req->data_from_pool = false;
	rdma_req->req.data_from_pool = false;
}

static int
@@ -1590,7 +1589,7 @@ spdk_nvmf_rdma_request_fill_iovs(struct spdk_nvmf_rdma_transport *rtransport,

	assert(rdma_req->req.iovcnt <= rqpair->max_send_sge);

	rdma_req->data_from_pool = true;
	rdma_req->req.data_from_pool = true;

	return rc;

@@ -1692,7 +1691,7 @@ nvmf_rdma_request_fill_iovs_multi_sgl(struct spdk_nvmf_rdma_transport *rtranspor
#endif

	rdma_req->num_outstanding_data_wr = num_sgl_descriptors;
	rdma_req->data_from_pool = true;
	req->data_from_pool = true;

	return 0;

@@ -1792,7 +1791,7 @@ spdk_nvmf_rdma_request_parse_sgl(struct spdk_nvmf_rdma_transport *rtransport,

		rdma_req->num_outstanding_data_wr = 0;
		rdma_req->req.data = rdma_req->recv->buf + offset;
		rdma_req->data_from_pool = false;
		rdma_req->req.data_from_pool = false;
		rdma_req->req.length = sgl->unkeyed.length;

		rdma_req->req.iov[0].iov_base = rdma_req->req.data;
@@ -1835,7 +1834,7 @@ nvmf_rdma_request_free(struct spdk_nvmf_rdma_request *rdma_req,
	struct spdk_nvmf_rdma_poll_group	*rgroup;

	rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
	if (rdma_req->data_from_pool) {
	if (rdma_req->req.data_from_pool) {
		rgroup = rqpair->poller->group;

		spdk_nvmf_rdma_request_free_buffers(rdma_req, &rgroup->group, &rtransport->transport,
@@ -1954,7 +1953,8 @@ spdk_nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
			/* If data is transferring from host to controller and the data didn't
			 * arrive using in capsule data, we need to do a transfer from the host.
			 */
			if (rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER && rdma_req->data_from_pool) {
			if (rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER &&
			    rdma_req->req.data_from_pool) {
				STAILQ_INSERT_TAIL(&rqpair->pending_rdma_read_queue, rdma_req, state_link);
				rdma_req->state = RDMA_REQUEST_STATE_DATA_TRANSFER_TO_CONTROLLER_PENDING;
				break;
+6 −7
Original line number Diff line number Diff line
@@ -168,7 +168,6 @@ struct spdk_nvmf_tcp_req {
	/* In-capsule data buffer */
	uint8_t					*buf;

	bool					data_from_pool;
	bool					has_incapsule_data;

	/* transfer_tag */
@@ -461,7 +460,7 @@ nvmf_tcp_dump_qpair_req_contents(struct spdk_nvmf_tcp_qpair *tqpair)
	for (i = 1; i < TCP_REQUEST_NUM_STATES; i++) {
		SPDK_ERRLOG("\tNum of requests in state[%d] = %d\n", i, tqpair->state_cntr[i]);
		TAILQ_FOREACH(tcp_req, &tqpair->state_queue[i], state_link) {
			SPDK_ERRLOG("\t\tRequest Data From Pool: %d\n", tcp_req->data_from_pool);
			SPDK_ERRLOG("\t\tRequest Data From Pool: %d\n", tcp_req->req.data_from_pool);
			SPDK_ERRLOG("\t\tRequest opcode: %d\n", tcp_req->req.cmd->nvmf_cmd.opcode);
		}
	}
@@ -2177,7 +2176,7 @@ spdk_nvmf_tcp_request_free_buffers(struct spdk_nvmf_tcp_req *tcp_req,
		tcp_req->req.buffers[i] = NULL;
		tcp_req->req.iov[i].iov_len = 0;
	}
	tcp_req->data_from_pool = false;
	tcp_req->req.data_from_pool = false;
}

static int
@@ -2231,7 +2230,7 @@ spdk_nvmf_tcp_req_fill_buffers(struct spdk_nvmf_tcp_req *tcp_req,
	}

	assert(tcp_req->req.iovcnt <= SPDK_NVMF_MAX_SGL_ENTRIES);
	tcp_req->data_from_pool = true;
	tcp_req->req.data_from_pool = true;
}

static int
@@ -2330,7 +2329,7 @@ spdk_nvmf_tcp_req_parse_sgl(struct spdk_nvmf_tcp_transport *ttransport,
		}

		tcp_req->req.data = tcp_req->buf + offset;
		tcp_req->data_from_pool = false;
		tcp_req->req.data_from_pool = false;
		tcp_req->req.length = length;

		if (spdk_unlikely(tcp_req->dif_insert_or_strip)) {
@@ -2524,7 +2523,7 @@ spdk_nvmf_tcp_pdu_set_buf_from_req(struct spdk_nvmf_tcp_qpair *tqpair,
{
	struct nvme_tcp_pdu *pdu;

	if (tcp_req->data_from_pool) {
	if (tcp_req->req.data_from_pool) {
		SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "Will send r2t for tcp_req(%p) on tqpair=%p\n", tcp_req, tqpair);
		tcp_req->next_expected_r2t_offset = 0;
		spdk_nvmf_tcp_send_r2t_pdu(tqpair, tcp_req);
@@ -2705,7 +2704,7 @@ spdk_nvmf_tcp_req_process(struct spdk_nvmf_tcp_transport *ttransport,
			break;
		case TCP_REQUEST_STATE_COMPLETED:
			spdk_trace_record(TRACE_TCP_REQUEST_STATE_COMPLETED, 0, 0, (uintptr_t)tcp_req, 0);
			if (tcp_req->data_from_pool) {
			if (tcp_req->req.data_from_pool) {
				spdk_nvmf_tcp_request_free_buffers(tcp_req, group, &ttransport->transport,
								   tcp_req->req.iovcnt);
			}
Loading