Commit a910bc64 authored by Alexey Marchuk's avatar Alexey Marchuk Committed by Tomasz Zawadzki
Browse files

nvme/tcp: Calculate requests completed asyncronously



A preparation step for enabling zero copy in NVMEoF TCP initiator.
With zero copy enabled, some requests might be completed out
of "process_completions" call and we should take them into
account to return the correct number of completions.

Change-Id: Iba7973f6da815645bbfad0334619d46b66379226
Signed-off-by: default avatarAlexey Marchuk <alexeymar@mellanox.com>
Signed-off-by: default avatarOr Gerlitz <ogerlitz@mellanox.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/4209


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
parent 2ceff364
Loading
Loading
Loading
Loading
+15 −3
Original line number Diff line number Diff line
@@ -88,6 +88,7 @@ struct nvme_tcp_qpair {
	struct nvme_tcp_req			*tcp_reqs;

	uint16_t				num_entries;
	uint16_t				async_complete;

	struct {
		uint16_t host_hdgst_enable: 1;
@@ -550,6 +551,10 @@ nvme_tcp_req_complete_safe(struct nvme_tcp_req *tcp_req)

	SPDK_DEBUGLOG(SPDK_LOG_NVME, "complete tcp_req(%p) on tqpair=%p\n", tcp_req, tcp_req->tqpair);

	if (!tcp_req->tqpair->qpair.in_completion_context) {
		tcp_req->tqpair->async_complete++;
	}

	/* Cache arguments to be passed to nvme_complete_request since tcp_req can be zeroed when released */
	memcpy(&cpl, &tcp_req->rsp, sizeof(cpl));
	user_cb		= tcp_req->req->cb_fn;
@@ -1442,7 +1447,8 @@ nvme_tcp_read_pdu(struct nvme_tcp_qpair *tqpair, uint32_t *reaped)
				}
				pdu->ch_valid_bytes += rc;
				if (pdu->ch_valid_bytes < sizeof(struct spdk_nvme_tcp_common_pdu_hdr)) {
					return NVME_TCP_PDU_IN_PROGRESS;
					rc =  NVME_TCP_PDU_IN_PROGRESS;
					goto out;
				}
			}

@@ -1462,7 +1468,8 @@ nvme_tcp_read_pdu(struct nvme_tcp_qpair *tqpair, uint32_t *reaped)

			pdu->psh_valid_bytes += rc;
			if (pdu->psh_valid_bytes < pdu->psh_len) {
				return NVME_TCP_PDU_IN_PROGRESS;
				rc =  NVME_TCP_PDU_IN_PROGRESS;
				goto out;
			}

			/* All header(ch, psh, head digist) of this PDU has now been read from the socket. */
@@ -1491,7 +1498,8 @@ nvme_tcp_read_pdu(struct nvme_tcp_qpair *tqpair, uint32_t *reaped)

			pdu->readv_offset += rc;
			if (pdu->readv_offset < data_len) {
				return NVME_TCP_PDU_IN_PROGRESS;
				rc =  NVME_TCP_PDU_IN_PROGRESS;
				goto out;
			}

			assert(pdu->readv_offset == data_len);
@@ -1507,6 +1515,10 @@ nvme_tcp_read_pdu(struct nvme_tcp_qpair *tqpair, uint32_t *reaped)
		}
	} while (prev_state != tqpair->recv_state);

out:
	*reaped += tqpair->async_complete;
	tqpair->async_complete = 0;

	return rc;
}