Commit 39cd694a authored by Abhineet Pandey's avatar Abhineet Pandey Committed by Tomasz Zawadzki
Browse files

lib/nvmf: Fix error checking in read path



We only check the sc in nvmf read path and not the sct.
This can cause problems in cases where we use a custom sct but the sc is
same as SPDK_NVME_SC_SUCCESS, i.e. 0. E.g. if you keep sct as
SPDK_NVME_SCT_PATH and sc as SPDK_NVME_SC_INTERNAL_PATH_ERROR (0),
the current code will interpret sc as success and send garbage data.

Change-Id: Idc4ba870cb74f78e30cc111ed358b903c20615b9
Signed-off-by: default avatarAbhineet Pandey <abhineet.pandey@nutanix.com>
Reviewed-on: https://review.spdk.io/c/spdk/spdk/+/26520


Reviewed-by: default avatarJacek Kalwas <jacek.kalwas@nutanix.com>
Tested-by: default avatarSPDK Automated Test System <spdkbot@gmail.com>
Reviewed-by: default avatarBen Walker <ben@nvidia.com>
Reviewed-by: default avatarChangpeng Liu <changpeliu@tencent.com>
Community-CI: Mellanox Build Bot
parent 605da2e9
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -1876,7 +1876,7 @@ nvmf_fc_request_complete(struct spdk_nvmf_request *req)
		/* Defer this to make sure we dont call io cleanup in same context. */
		nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE,
					(void *)fc_req);
	} else if (rsp->status.sc == SPDK_NVME_SC_SUCCESS &&
	} else if (spdk_nvme_cpl_is_success(rsp) &&
		   req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {

		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_XFER);
+2 −2
Original line number Diff line number Diff line
@@ -1200,7 +1200,7 @@ request_transfer_out(struct spdk_nvmf_request *req, int *data_posted)
	 */
	first = &rdma_req->rsp.wr;

	if (spdk_unlikely(rsp->status.sc != SPDK_NVME_SC_SUCCESS)) {
	if (spdk_unlikely(spdk_nvme_cpl_is_error(rsp))) {
		/* On failure, data was not read from the controller. So clear the
		 * number of outstanding data WRs to zero.
		 */
@@ -2400,7 +2400,7 @@ nvmf_rdma_request_process(struct spdk_nvmf_rdma_transport *rtransport,
		case RDMA_REQUEST_STATE_EXECUTED:
			spdk_trace_record(TRACE_RDMA_REQUEST_STATE_EXECUTED, 0, 0,
					  (uintptr_t)rdma_req, (uintptr_t)rqpair);
			if (rsp->status.sc == SPDK_NVME_SC_SUCCESS &&
			if (spdk_nvme_cpl_is_success(rsp) &&
			    rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
				STAILQ_INSERT_TAIL(&rqpair->pending_rdma_write_queue, rdma_req, state_link);
				rdma_req->state = RDMA_REQUEST_STATE_DATA_TRANSFER_TO_HOST_PENDING;
+1 −1
Original line number Diff line number Diff line
@@ -2992,7 +2992,7 @@ request_transfer_out(struct spdk_nvmf_request *req)

	tqpair = SPDK_CONTAINEROF(tcp_req->req.qpair, struct spdk_nvmf_tcp_qpair, qpair);
	nvmf_tcp_req_set_state(tcp_req, TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST);
	if (rsp->status.sc == SPDK_NVME_SC_SUCCESS && req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
	if (spdk_nvme_cpl_is_success(rsp) && req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
		nvmf_tcp_send_c2h_data(tqpair, tcp_req);
	} else {
		nvmf_tcp_send_capsule_resp_pdu(tcp_req, tqpair);