Commit d5ef62eb authored by Ben Walker's avatar Ben Walker Committed by Jim Harris
Browse files

nvme/tcp: delay building the CONTIG payload until absolutely necessary



On the read path, we can wait until the data has actually arrived.

Change-Id: I9acd04c8e10f44ca525f9056ee1c638939ec0233
Signed-off-by: default avatarBen Walker <benjamin.walker@intel.com>
Signed-off-by: default avatarJacek Kalwas <jacek.kalwas@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/21915


Reviewed-by: default avatarAleksey Marchuk <alexeymar@nvidia.com>
Reviewed-by: default avatarJim Harris <jim.harris@samsung.com>
Community-CI: Mellanox Build Bot
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
parent 1bf3c2fe
Loading
Loading
Loading
Loading
+24 −8
Original line number Diff line number Diff line
@@ -804,8 +804,20 @@ nvme_tcp_req_init(struct nvme_tcp_qpair *tqpair, struct nvme_request *req,
	req->cmd.dptr.sgl1.unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT;
	req->cmd.dptr.sgl1.unkeyed.length = req->payload_size;

	if (spdk_unlikely(req->cmd.opc == SPDK_NVME_OPC_FABRIC)) {
		struct spdk_nvmf_capsule_cmd *nvmf_cmd = (struct spdk_nvmf_capsule_cmd *)&req->cmd;

		xfer = spdk_nvme_opc_get_data_transfer(nvmf_cmd->fctype);
	} else {
		xfer = spdk_nvme_opc_get_data_transfer(req->cmd.opc);
	}

	if (nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_CONTIG) {
		/* For c2h delay filling in the iov until the data arrives.
		 * For h2c some delay is also possible if data doesn't fit into cmd capsule (not implemented). */
		if (xfer != SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
			rc = nvme_tcp_build_contig_request(tqpair, tcp_req);
		}
	} else if (nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL) {
		rc = nvme_tcp_build_sgl_request(tqpair, tcp_req);
	} else {
@@ -816,13 +828,6 @@ nvme_tcp_req_init(struct nvme_tcp_qpair *tqpair, struct nvme_request *req,
		return rc;
	}

	if (spdk_unlikely(req->cmd.opc == SPDK_NVME_OPC_FABRIC)) {
		struct spdk_nvmf_capsule_cmd *nvmf_cmd = (struct spdk_nvmf_capsule_cmd *)&req->cmd;

		xfer = spdk_nvme_opc_get_data_transfer(nvmf_cmd->fctype);
	} else {
		xfer = spdk_nvme_opc_get_data_transfer(req->cmd.opc);
	}
	if (xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
		max_in_capsule_data_size = ctrlr->ioccsz_bytes;
		if (spdk_unlikely((req->cmd.opc == SPDK_NVME_OPC_FABRIC) ||
@@ -1692,6 +1697,17 @@ nvme_tcp_c2h_data_hdr_handle(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_pdu

	}

	if (nvme_payload_type(&tcp_req->req->payload) == NVME_PAYLOAD_TYPE_CONTIG) {
		int rc;

		rc = nvme_tcp_build_contig_request(tqpair, tcp_req);
		if (rc) {
			/* Not the right error message but at least it handles the failure. */
			fes = SPDK_NVME_TCP_TERM_REQ_FES_DATA_TRANSFER_LIMIT_EXCEEDED;
			goto end;
		}
	}

	nvme_tcp_pdu_set_data_buf(pdu, tcp_req->iov, tcp_req->iovcnt,
				  c2h_data->datao, c2h_data->datal);
	pdu->req = tcp_req;