Commit 58739014 authored by Ziye Yang's avatar Ziye Yang Committed by Jim Harris
Browse files

nvmf/tcp: use the nvme_tcp_readv_data



The purpose is to use the single readv to read both
the payload the digest(if there is a possible one).

And this patch will be prepared to support the
multiple SGL in NVMe tcp transport later.

Change-Id: Ia30a5e0080b041a65461d2be13db4e0592a70305
Signed-off-by: default avatarZiye Yang <ziye.yang@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/447670


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
parent 78a24106
Loading
Loading
Loading
Loading
+61 −18
Original line number Diff line number Diff line
@@ -68,6 +68,14 @@

typedef void (*nvme_tcp_qpair_xfer_complete_cb)(void *cb_arg);

struct _iov_ctx {
	struct iovec    *iov;
	int             num_iovs;
	uint32_t        iov_offset;
	int             iovcnt;
	uint32_t        mapped_len;
};

struct nvme_tcp_pdu {
	union {
		/* to hold error pdu data */
@@ -85,13 +93,12 @@ struct nvme_tcp_pdu {
	} hdr;

	bool						has_hdgst;
	bool						ddgst_enable;
	uint8_t						data_digest[SPDK_NVME_TCP_DIGEST_LEN];
	int32_t						padding_valid_bytes;
	uint32_t					ddigest_valid_bytes;

	uint32_t					ch_valid_bytes;
	uint32_t					psh_valid_bytes;
	uint32_t					data_valid_bytes;

	nvme_tcp_qpair_xfer_complete_cb			cb_fn;
	void						*cb_arg;
@@ -99,10 +106,12 @@ struct nvme_tcp_pdu {
	void						*data;
	uint32_t					data_len;

	uint32_t					readv_offset;
	uint32_t					writev_offset;
	TAILQ_ENTRY(nvme_tcp_pdu)			tailq;
	uint32_t					remaining;
	uint32_t					padding_len;
	struct _iov_ctx					iov_ctx;

	void						*ctx; /* data tied to a tcp request */
};
@@ -137,14 +146,6 @@ enum nvme_tcp_qpair_state {
	NVME_TCP_QPAIR_STATE_EXITED = 3,
};

struct _iov_ctx {
	struct iovec    *iov;
	int             num_iovs;
	uint32_t        iov_offset;
	int             iovcnt;
	uint32_t        mapped_len;
};

static uint32_t
nvme_tcp_pdu_calc_header_digest(struct nvme_tcp_pdu *pdu)
{
@@ -216,15 +217,16 @@ static int
nvme_tcp_build_iovecs(struct iovec *iovec, int num_iovs, struct nvme_tcp_pdu *pdu,
		      bool hdgst_enable, bool ddgst_enable, uint32_t *_mapped_length)
{
	struct _iov_ctx ctx;
	int enable_digest;
	uint32_t hlen, plen;
	struct _iov_ctx *ctx;

	if (num_iovs == 0) {
		return 0;
	}

	_iov_ctx_init(&ctx, iovec, num_iovs, pdu->writev_offset);
	ctx = &pdu->iov_ctx;
	_iov_ctx_init(ctx, iovec, num_iovs, pdu->writev_offset);
	hlen = pdu->hdr.common.hlen;
	enable_digest = 1;
	if (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_REQ ||
@@ -243,7 +245,7 @@ nvme_tcp_build_iovecs(struct iovec *iovec, int num_iovs, struct nvme_tcp_pdu *pd
	plen = hlen;
	if (!pdu->data_len || !pdu->data) {
		/* PDU header + possible header digest */
		_iov_ctx_set_iov(&ctx, (uint8_t *)&pdu->hdr.raw, hlen);
		_iov_ctx_set_iov(ctx, (uint8_t *)&pdu->hdr.raw, hlen);
		goto end;
	}

@@ -253,25 +255,25 @@ nvme_tcp_build_iovecs(struct iovec *iovec, int num_iovs, struct nvme_tcp_pdu *pd
		plen = hlen;
	}

	if (!_iov_ctx_set_iov(&ctx, (uint8_t *)&pdu->hdr.raw, hlen)) {
	if (!_iov_ctx_set_iov(ctx, (uint8_t *)&pdu->hdr.raw, hlen)) {
		goto end;
	}

	/* Data Segment */
	plen += pdu->data_len;
	if (!_iov_ctx_set_iov(&ctx, pdu->data, pdu->data_len)) {
	if (!_iov_ctx_set_iov(ctx, pdu->data, pdu->data_len)) {
		goto end;
	}

	/* Data Digest */
	if (enable_digest && ddgst_enable) {
		plen += SPDK_NVME_TCP_DIGEST_LEN;
		_iov_ctx_set_iov(&ctx, pdu->data_digest, SPDK_NVME_TCP_DIGEST_LEN);
		_iov_ctx_set_iov(ctx, pdu->data_digest, SPDK_NVME_TCP_DIGEST_LEN);
	}

end:
	if (_mapped_length != NULL) {
		*_mapped_length = ctx.mapped_len;
		*_mapped_length = ctx->mapped_len;
	}

	/* check the plen for the first time constructing iov */
@@ -279,7 +281,35 @@ end:
		assert(plen == pdu->hdr.common.plen);
	}

	return ctx.iovcnt;
	return ctx->iovcnt;
}

static int
nvme_tcp_build_payload_iovecs(struct iovec *iovec, int num_iovs, struct nvme_tcp_pdu *pdu,
			      bool ddgst_enable, uint32_t *_mapped_length)
{
	struct _iov_ctx *ctx;

	if (num_iovs == 0) {
		return 0;
	}

	ctx = &pdu->iov_ctx;
	_iov_ctx_init(ctx, iovec, num_iovs, pdu->readv_offset);
	if (!_iov_ctx_set_iov(ctx, pdu->data, pdu->data_len)) {
		goto end;
	}

	/* Data Digest */
	if (ddgst_enable) {
		_iov_ctx_set_iov(ctx, pdu->data_digest, SPDK_NVME_TCP_DIGEST_LEN);
	}

end:
	if (_mapped_length != NULL) {
		*_mapped_length = ctx->mapped_len;
	}
	return ctx->iovcnt;
}

static int
@@ -329,4 +359,17 @@ nvme_tcp_read_data(struct spdk_sock *sock, int bytes,
	return nvme_tcp_readv_data(sock, &iov, 1);
}

static int
nvme_tcp_read_payload_data(struct spdk_sock *sock, struct nvme_tcp_pdu *pdu)
{
	struct iovec iovec_array[2];
	struct iovec *iov = iovec_array;
	int iovec_cnt;

	iovec_cnt = nvme_tcp_build_payload_iovecs(iovec_array, 2, pdu, pdu->ddgst_enable, NULL);
	assert(iovec_cnt >= 0);

	return nvme_tcp_readv_data(sock, iov, iovec_cnt);
}

#endif /* SPDK_INTERNAL_NVME_TCP_H */
+15 −28
Original line number Diff line number Diff line
@@ -1018,7 +1018,7 @@ nvme_tcp_pdu_payload_handle(struct nvme_tcp_qpair *tqpair,
	SPDK_DEBUGLOG(SPDK_LOG_NVME, "enter\n");

	/* check data digest if need */
	if (pdu->ddigest_valid_bytes) {
	if (pdu->ddgst_enable) {
		crc32c = nvme_tcp_pdu_calc_data_digest(pdu);
		rc = MATCH_DIGEST_WORD(pdu->data_digest, crc32c);
		if (rc == 0) {
@@ -1499,39 +1499,26 @@ nvme_tcp_read_pdu(struct nvme_tcp_qpair *tqpair, uint32_t *reaped)
			}

			data_len = pdu->data_len;
			/* data len */
			if (pdu->data_valid_bytes < data_len) {
				rc = nvme_tcp_read_data(tqpair->sock,
							data_len - pdu->data_valid_bytes,
							(uint8_t *)pdu->data + pdu->data_valid_bytes);
				if (rc < 0) {
					nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
					break;
				}
			/* data digest */
			if (spdk_unlikely((pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_DATA) &&
					  tqpair->host_ddgst_enable)) {
				data_len += SPDK_NVME_TCP_DIGEST_LEN;
				pdu->ddgst_enable = true;

				pdu->data_valid_bytes += rc;
				if (pdu->data_valid_bytes < data_len) {
					return NVME_TCP_PDU_IN_PROGRESS;
				}
			}

			/* data digest */
			if ((pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_DATA) &&
			    tqpair->host_ddgst_enable && (pdu->ddigest_valid_bytes < SPDK_NVME_TCP_DIGEST_LEN)) {
				rc = nvme_tcp_read_data(tqpair->sock,
							SPDK_NVME_TCP_DIGEST_LEN - pdu->ddigest_valid_bytes,
							pdu->data_digest + pdu->ddigest_valid_bytes);
			rc = nvme_tcp_read_payload_data(tqpair->sock, pdu);
			if (rc < 0) {
				nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
				break;
			}

				pdu->ddigest_valid_bytes += rc;
				if (pdu->ddigest_valid_bytes < SPDK_NVME_TCP_DIGEST_LEN) {
			pdu->readv_offset += rc;
			if (pdu->readv_offset < data_len) {
				return NVME_TCP_PDU_IN_PROGRESS;
			}
			}

			assert(pdu->readv_offset == data_len);
			/* All of this PDU has now been read from the socket. */
			nvme_tcp_pdu_payload_handle(tqpair, reaped);
			break;
+13 −26
Original line number Diff line number Diff line
@@ -1631,7 +1631,7 @@ spdk_nvmf_tcp_pdu_payload_handle(struct spdk_nvmf_tcp_qpair *tqpair)

	SPDK_DEBUGLOG(SPDK_LOG_NVMF_TCP, "enter\n");
	/* check data digest if need */
	if (pdu->ddigest_valid_bytes) {
	if (pdu->ddgst_enable) {
		crc32c = nvme_tcp_pdu_calc_data_digest(pdu);
		rc = MATCH_DIGEST_WORD(pdu->data_digest, crc32c);
		if (rc == 0) {
@@ -1965,35 +1965,22 @@ spdk_nvmf_tcp_sock_process(struct spdk_nvmf_tcp_qpair *tqpair)
			}

			data_len = pdu->data_len;
			/* data len */
			if (pdu->data_valid_bytes < data_len) {
				rc = nvme_tcp_read_data(tqpair->sock, data_len - pdu->data_valid_bytes,
							(void *)pdu->data + pdu->data_valid_bytes);
				if (rc < 0) {
					return NVME_TCP_PDU_FATAL;
				}

				pdu->data_valid_bytes += rc;
				if (pdu->data_valid_bytes < data_len) {
					return NVME_TCP_PDU_IN_PROGRESS;
				}
			/* data digest */
			if (spdk_unlikely((pdu->hdr.common.pdu_type != SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ) &&
					  tqpair->host_ddgst_enable)) {
				data_len += SPDK_NVME_TCP_DIGEST_LEN;
				pdu->ddgst_enable = true;
			}

			/* data digest */
			if ((pdu->hdr.common.pdu_type != SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ) &&
			    tqpair->host_ddgst_enable && (pdu->ddigest_valid_bytes < SPDK_NVME_TCP_DIGEST_LEN)) {
				rc = nvme_tcp_read_data(tqpair->sock,
							SPDK_NVME_TCP_DIGEST_LEN - pdu->ddigest_valid_bytes,
							pdu->data_digest + pdu->ddigest_valid_bytes);
			rc = nvme_tcp_read_payload_data(tqpair->sock, pdu);
			if (rc < 0) {
					return NVME_TCP_PDU_FATAL;
				return NVME_TCP_PDU_IN_PROGRESS;
			}

				pdu->ddigest_valid_bytes += rc;
				if (pdu->ddigest_valid_bytes < SPDK_NVME_TCP_DIGEST_LEN) {
			pdu->readv_offset += rc;
			if (pdu->readv_offset < data_len) {
				return NVME_TCP_PDU_IN_PROGRESS;
			}
			}

			/* All of this PDU has now been read from the socket. */
			spdk_nvmf_tcp_pdu_payload_handle(tqpair);