Commit bf63daa5 authored by Konrad Sztyber's avatar Konrad Sztyber
Browse files

nvme/tcp: don't use submit_accel_crc32c for ddgst calculations



This API is deprecated and using it to calculate data digest only makes
the code more complicated.

Signed-off-by: default avatarKonrad Sztyber <konrad.sztyber@intel.com>
Change-Id: I3557fc65592a9bbdcee96af6f94647bbc5e55b95
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/25158


Community-CI: Mellanox Build Bot
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarChangpeng Liu <changpeliu@tencent.com>
Community-CI: Community CI Samsung <spdk.community.ci.samsung@gmail.com>
Reviewed-by: default avatarAleksey Marchuk <alexeymar@nvidia.com>
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
parent 1042d663
Loading
Loading
Loading
Loading
+0 −109
Original line number Diff line number Diff line
@@ -234,17 +234,6 @@ nvme_tcp_req_put(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_req *tcp_req)
	TAILQ_INSERT_HEAD(&tqpair->free_reqs, tcp_req, link);
}

static inline void
nvme_tcp_accel_submit_crc32c(struct nvme_tcp_poll_group *tgroup, struct nvme_tcp_req *treq,
			     uint32_t *dst, struct iovec *iovs, uint32_t iovcnt, uint32_t seed,
			     spdk_nvme_accel_completion_cb cb_fn, void *cb_arg)
{
	struct spdk_nvme_poll_group *pg = tgroup->group.group;

	treq->ordering.bits.in_progress_accel = 1;
	pg->accel_fn_table.submit_accel_crc32c(pg->ctx, dst, iovs, iovcnt, seed, cb_fn, cb_arg);
}

static inline void
nvme_tcp_accel_finish_sequence(struct nvme_tcp_poll_group *tgroup, struct nvme_tcp_req *treq,
			       void *seq, spdk_nvme_accel_completion_cb cb_fn, void *cb_arg)
@@ -570,50 +559,6 @@ tcp_write_pdu(struct nvme_tcp_pdu *pdu)
	_tcp_write_pdu(pdu);
}

static void
pdu_accel_compute_crc32_done(void *cb_arg, int status)
{
	struct nvme_tcp_pdu *pdu = cb_arg;
	struct nvme_tcp_req *req = pdu->req;

	assert(req->ordering.bits.in_progress_accel);
	req->ordering.bits.in_progress_accel = 0;

	if (spdk_unlikely(status)) {
		SPDK_ERRLOG("Failed to compute the data digest for pdu =%p\n", pdu);
		pdu_write_fail(pdu, status);
		return;
	}

	pdu->data_digest_crc32 ^= SPDK_CRC32C_XOR;
	MAKE_DIGEST_WORD(pdu->data_digest, pdu->data_digest_crc32);

	_tcp_write_pdu(pdu);
}

static void
pdu_accel_compute_crc32_seq_cb(void *cb_arg, int status)
{
	struct nvme_tcp_pdu *pdu = cb_arg;
	struct nvme_tcp_qpair *tqpair = pdu->qpair;
	struct nvme_tcp_poll_group *tgroup = nvme_tcp_poll_group(tqpair->qpair.poll_group);
	struct nvme_tcp_req *treq = pdu->req;
	struct nvme_request *req = treq->req;

	assert(treq->ordering.bits.in_progress_accel);
	treq->ordering.bits.in_progress_accel = 0;

	req->accel_sequence = NULL;
	if (spdk_unlikely(status != 0)) {
		pdu_seq_fail(pdu, status);
		return;
	}

	nvme_tcp_accel_submit_crc32c(tgroup, pdu->req, &pdu->data_digest_crc32,
				     pdu->data_iov, pdu->data_iovcnt, 0,
				     pdu_accel_compute_crc32_done, pdu);
}

static void
pdu_accel_seq_compute_crc32_done(void *cb_arg)
{
@@ -659,16 +604,6 @@ pdu_accel_compute_crc32(struct nvme_tcp_pdu *pdu)
		}

		tcp_write_pdu(pdu);
		return true;
	} else if (tgroup->group.group->accel_fn_table.submit_accel_crc32c != NULL) {
		if (req->accel_sequence != NULL) {
			nvme_tcp_accel_finish_sequence(tgroup, pdu->req, req->accel_sequence,
						       pdu_accel_compute_crc32_seq_cb, pdu);
		} else {
			nvme_tcp_accel_submit_crc32c(tgroup, pdu->req, &pdu->data_digest_crc32,
						     pdu->data_iov, pdu->data_iovcnt, 0,
						     pdu_accel_compute_crc32_done, pdu);
		}

		return true;
	}
@@ -1408,43 +1343,6 @@ _nvme_tcp_pdu_payload_handle(struct nvme_tcp_qpair *tqpair, uint32_t *reaped)
	}
}

static void
nvme_tcp_accel_recv_compute_crc32_done(void *cb_arg, int status)
{
	struct nvme_tcp_req *tcp_req = cb_arg;
	struct nvme_tcp_pdu *pdu;
	struct nvme_tcp_qpair *tqpair;
	int rc;
	int dummy_reaped = 0;

	pdu = tcp_req->pdu;
	assert(pdu != NULL);

	tqpair = tcp_req->tqpair;
	assert(tqpair != NULL);

	assert(tcp_req->ordering.bits.in_progress_accel);
	tcp_req->ordering.bits.in_progress_accel = 0;

	nvme_tcp_cond_schedule_qpair_polling(tqpair);

	if (spdk_unlikely(status)) {
		SPDK_ERRLOG("Failed to compute the data digest for pdu =%p\n", pdu);
		tcp_req->rsp.status.sc = SPDK_NVME_SC_COMMAND_TRANSIENT_TRANSPORT_ERROR;
		goto end;
	}

	pdu->data_digest_crc32 ^= SPDK_CRC32C_XOR;
	rc = MATCH_DIGEST_WORD(pdu->data_digest, pdu->data_digest_crc32);
	if (rc == 0) {
		SPDK_ERRLOG("data digest error on tqpair=(%p) with pdu=%p\n", tqpair, pdu);
		tcp_req->rsp.status.sc = SPDK_NVME_SC_COMMAND_TRANSIENT_TRANSPORT_ERROR;
	}

end:
	nvme_tcp_c2h_data_payload_handle(tqpair, tcp_req->pdu, &dummy_reaped);
}

static void
nvme_tcp_req_copy_pdu(struct nvme_tcp_req *treq, struct nvme_tcp_pdu *pdu)
{
@@ -1507,13 +1405,6 @@ nvme_tcp_accel_recv_compute_crc32(struct nvme_tcp_req *treq, struct nvme_tcp_pdu
		nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
		nvme_tcp_c2h_data_payload_handle(tqpair, treq->pdu, &dummy);
		return true;
	} else if (tgroup->group.group->accel_fn_table.submit_accel_crc32c != NULL) {
		nvme_tcp_req_copy_pdu(treq, pdu);
		nvme_tcp_qpair_set_recv_state(tqpair, NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
		nvme_tcp_accel_submit_crc32c(tgroup, treq, &treq->pdu->data_digest_crc32,
					     treq->pdu->data_iov, treq->pdu->data_iovcnt, 0,
					     nvme_tcp_accel_recv_compute_crc32_done, treq);
		return true;
	}

	return false;