Commit 48f38636 authored by Alexey Marchuk's avatar Alexey Marchuk Committed by Jim Harris
Browse files

tcp: Cleanup nvme_tcp_pdu structure



With the recent changes which added usage of writev_async to
both TCP target and initiator, nvme_tcp_pdu::writev_offset
becomes useless since it is not updated in data path. This
field is only used in UT. Remove this field from nvme_tcp_pdu
structure, now nvme_tcp_build_iovs builds iov which fully
describes the PDU. Update UT accordingly.

Field padding_valid_bytes is not used at all, delete it too

Change-Id: I2d6040ae64d6847cb455f59f65ec5677de8e5192
Signed-off-by: default avatarAlexey Marchuk <alexeymar@mellanox.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/483374


Reviewed-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: SPDK CI Jenkins <sys_sgci@intel.com>
parent 6146c678
Loading
Loading
Loading
Loading
+4 −9
Original line number Diff line number Diff line
/*-
 *   BSD LICENSE
 *
 *   Copyright (c) Intel Corporation.
 *   All rights reserved.
 *   Copyright (c) Intel Corporation. All rights reserved.
 *   Copyright (c) 2020 Mellanox Technologies LTD. All rights reserved.
 *
 *   Redistribution and use in source and binary forms, with or without
 *   modification, are permitted provided that the following conditions
@@ -102,7 +102,6 @@ struct nvme_tcp_pdu {
	bool						has_hdgst;
	bool						ddgst_enable;
	uint8_t						data_digest[SPDK_NVME_TCP_DIGEST_LEN];
	int32_t						padding_valid_bytes;

	uint8_t						ch_valid_bytes;
	uint8_t						psh_valid_bytes;
@@ -122,7 +121,6 @@ struct nvme_tcp_pdu {
	uint32_t					data_len;

	uint32_t					readv_offset;
	uint32_t					writev_offset;
	TAILQ_ENTRY(nvme_tcp_pdu)			tailq;
	uint32_t					remaining;
	uint32_t					padding_len;
@@ -353,7 +351,7 @@ nvme_tcp_build_iovs(struct iovec *iov, int iovcnt, struct nvme_tcp_pdu *pdu,
	}

	sgl = &pdu->sgl;
	_nvme_tcp_sgl_init(sgl, iov, iovcnt, pdu->writev_offset);
	_nvme_tcp_sgl_init(sgl, iov, iovcnt, 0);
	hlen = pdu->hdr->common.hlen;
	enable_digest = 1;
	if (pdu->hdr->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_REQ ||
@@ -405,10 +403,7 @@ nvme_tcp_build_iovs(struct iovec *iov, int iovcnt, struct nvme_tcp_pdu *pdu,
		_nvme_tcp_sgl_append(sgl, pdu->data_digest, SPDK_NVME_TCP_DIGEST_LEN);
	}

	/* check the plen for the first time constructing iov */
	if (!pdu->writev_offset) {
	assert(plen == pdu->hdr->common.plen);
	}

end:
	if (_mapped_length != NULL) {
+33 −53
Original line number Diff line number Diff line
@@ -116,65 +116,60 @@ test_nvme_tcp_pdu_set_data_buf(void)
static void
test_nvme_tcp_build_iovs(void)
{
	const uintptr_t pdu_iov_len = 4096;
	struct nvme_tcp_pdu pdu = {};
	struct iovec iovs[4] = {};
	struct iovec iovs[5] = {};
	uint32_t mapped_length = 0;
	int rc;

	pdu.hdr = &pdu.hdr_mem;
	pdu.hdr->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
	pdu.hdr->common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
	pdu.hdr->common.plen = pdu.hdr->common.hlen + SPDK_NVME_TCP_DIGEST_LEN + 4096 * 2 +
	pdu.hdr->common.plen = pdu.hdr->common.hlen + SPDK_NVME_TCP_DIGEST_LEN + pdu_iov_len * 2 +
			       SPDK_NVME_TCP_DIGEST_LEN;
	pdu.data_len = 4096 * 2;
	pdu.data_len = pdu_iov_len * 2;
	pdu.padding_len = 0;

	pdu.data_iov[0].iov_base = (void *)0xDEADBEEF;
	pdu.data_iov[0].iov_len = 4096 * 2;
	pdu.data_iovcnt = 1;
	pdu.data_iov[0].iov_len = pdu_iov_len;
	pdu.data_iov[1].iov_base = (void *)(0xDEADBEEF + pdu_iov_len);
	pdu.data_iov[1].iov_len = pdu_iov_len;
	pdu.data_iovcnt = 2;

	rc = nvme_tcp_build_iovs(iovs, 4, &pdu, true, true, &mapped_length);
	CU_ASSERT(rc == 3);
	rc = nvme_tcp_build_iovs(iovs, 5, &pdu, true, true, &mapped_length);
	CU_ASSERT(rc == 4);
	CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr->raw);
	CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN);
	CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF);
	CU_ASSERT(iovs[1].iov_len == 4096 * 2);
	CU_ASSERT(iovs[2].iov_base == (void *)pdu.data_digest);
	CU_ASSERT(iovs[2].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
	CU_ASSERT(iovs[1].iov_len == pdu_iov_len);
	CU_ASSERT(iovs[2].iov_base == (void *)(0xDEADBEEF + pdu_iov_len));
	CU_ASSERT(iovs[2].iov_len == pdu_iov_len);
	CU_ASSERT(iovs[3].iov_base == (void *)pdu.data_digest);
	CU_ASSERT(iovs[3].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
	CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN +
		  4096 * 2 + SPDK_NVME_TCP_DIGEST_LEN);

	pdu.writev_offset += sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN;

	rc = nvme_tcp_build_iovs(iovs, 6, &pdu, true, true, &mapped_length);
	CU_ASSERT(rc == 2);
	CU_ASSERT(iovs[0].iov_base == (void *)0xDEADBEEF);
	CU_ASSERT(iovs[0].iov_len == 4096 * 2);
	CU_ASSERT(iovs[1].iov_base == (void *)pdu.data_digest);
	CU_ASSERT(iovs[1].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
	CU_ASSERT(mapped_length == 4096 * 2 + SPDK_NVME_TCP_DIGEST_LEN);

	pdu.writev_offset += 4096 * 2;

	rc = nvme_tcp_build_iovs(iovs, 6, &pdu, true, true, &mapped_length);
	CU_ASSERT(rc == 1);
	CU_ASSERT(iovs[0].iov_base == (void *)pdu.data_digest);
	CU_ASSERT(iovs[0].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
	CU_ASSERT(mapped_length == SPDK_NVME_TCP_DIGEST_LEN);

	pdu.writev_offset += SPDK_NVME_TCP_DIGEST_LEN;

	rc = nvme_tcp_build_iovs(iovs, 6, &pdu, true, true, &mapped_length);
	CU_ASSERT(rc == 0);
		  pdu_iov_len * 2 + SPDK_NVME_TCP_DIGEST_LEN);

	pdu.writev_offset = 0;
	/* Add a new data_iov entry, update pdu iov count and data length */
	pdu.data_iov[2].iov_base = (void *)(0xBAADF00D);
	pdu.data_iov[2].iov_len = 123;
	pdu.data_iovcnt = 3;
	pdu.data_len += 123;
	pdu.hdr->common.plen += 123;

	rc = nvme_tcp_build_iovs(iovs, 2, &pdu, true, true, &mapped_length);
	CU_ASSERT(rc == 2);
	rc = nvme_tcp_build_iovs(iovs, 5, &pdu, true, true, &mapped_length);
	CU_ASSERT(rc == 5);
	CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr->raw);
	CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN);
	CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF);
	CU_ASSERT(iovs[1].iov_len == 4096 * 2);
	CU_ASSERT(iovs[1].iov_len == pdu_iov_len);
	CU_ASSERT(iovs[2].iov_base == (void *)(0xDEADBEEF + pdu_iov_len));
	CU_ASSERT(iovs[2].iov_len == pdu_iov_len);
	CU_ASSERT(iovs[3].iov_base == (void *)(0xBAADF00D));
	CU_ASSERT(iovs[3].iov_len == 123);
	CU_ASSERT(iovs[4].iov_base == (void *)pdu.data_digest);
	CU_ASSERT(iovs[4].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
	CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN +
		  pdu_iov_len * 2 + SPDK_NVME_TCP_DIGEST_LEN + 123);
}

struct nvme_tcp_ut_bdev_io {
@@ -410,8 +405,6 @@ test_nvme_tcp_build_iovs_with_md(void)
	pdu.data_iov[0].iov_len = (512 + 8) * 8;
	pdu.data_iovcnt = 1;

	pdu.writev_offset = 0;

	rc = nvme_tcp_build_iovs(iovs, 11, &pdu, true, true, &mapped_length);
	CU_ASSERT(rc == 10);
	CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr->raw);
@@ -436,19 +429,6 @@ test_nvme_tcp_build_iovs_with_md(void)
	CU_ASSERT(iovs[9].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
	CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN +
		  512 * 8 + SPDK_NVME_TCP_DIGEST_LEN);

	pdu.writev_offset += sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN +
			     512 * 6 + 256;

	rc = nvme_tcp_build_iovs(iovs, 11, &pdu, true, true, &mapped_length);
	CU_ASSERT(rc == 3);
	CU_ASSERT(iovs[0].iov_base == (void *)(0xDEADBEEF + (520 * 6) + 256));
	CU_ASSERT(iovs[0].iov_len == 256);
	CU_ASSERT(iovs[1].iov_base == (void *)(0xDEADBEEF + 520 * 7));
	CU_ASSERT(iovs[1].iov_len == 512);
	CU_ASSERT(iovs[2].iov_base == (void *)pdu.data_digest);
	CU_ASSERT(iovs[2].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
	CU_ASSERT(mapped_length == 256 + 512 + SPDK_NVME_TCP_DIGEST_LEN);
}

int main(int argc, char **argv)