Commit fbf06aca authored by Piotr Pelplinski's avatar Piotr Pelplinski Committed by Jim Harris
Browse files

nvme: fail when request is too long



Fail when number of child requests is >= NVME_MAX_CHILD_REQUESTS.

Signed-off-by: default avatarPiotr Pelplinski <piotr.pelplinski@intel.com>
Change-Id: I8c370053847c9f623b861137da8d2387a66fc030

Reviewed-on: https://review.gerrithub.io/408850


Tested-by: default avatarSPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: default avatarMaciej Szwed <maciej.szwed@intel.com>
Reviewed-by: default avatarPawel Wodkowski <pawelx.wodkowski@intel.com>
Reviewed-by: default avatarDaniel Verkamp <daniel.verkamp@intel.com>
Reviewed-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
parent 00a51646
Loading
Loading
Loading
Loading
+63 −0
Original line number Diff line number Diff line
@@ -39,6 +39,24 @@ static struct nvme_request *_nvme_ns_cmd_rw(struct spdk_nvme_ns *ns, struct spdk
		void *cb_arg, uint32_t opc, uint32_t io_flags,
		uint16_t apptag_mask, uint16_t apptag, bool check_sgl);


static bool
spdk_nvme_ns_check_request_length(uint32_t lba_count, uint32_t sectors_per_max_io,
				  uint32_t sectors_per_stripe, uint32_t qdepth)
{
	uint32_t child_per_io;

	if (sectors_per_stripe > 0) {
		child_per_io = (lba_count + sectors_per_stripe - 1) / sectors_per_stripe;
	} else {
		child_per_io = (lba_count + sectors_per_max_io - 1) / sectors_per_max_io;
	}

	SPDK_DEBUGLOG(SPDK_LOG_NVME, "checking maximum i/o length %d\n", child_per_io);

	return child_per_io >= qdepth;
}

static void
nvme_cb_complete_child(void *child_arg, const struct spdk_nvme_cpl *cpl)
{
@@ -499,6 +517,11 @@ spdk_nvme_ns_cmd_compare(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
			      0, true);
	if (req != NULL) {
		return nvme_qpair_submit_request(qpair, req);
	} else if (spdk_nvme_ns_check_request_length(lba_count,
			ns->sectors_per_max_io,
			ns->sectors_per_stripe,
			qpair->ctrlr->opts.io_queue_requests)) {
		return -EINVAL;
	} else {
		return -ENOMEM;
	}
@@ -525,6 +548,11 @@ spdk_nvme_ns_cmd_compare_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair
			      apptag_mask, apptag, true);
	if (req != NULL) {
		return nvme_qpair_submit_request(qpair, req);
	} else if (spdk_nvme_ns_check_request_length(lba_count,
			ns->sectors_per_max_io,
			ns->sectors_per_stripe,
			qpair->ctrlr->opts.io_queue_requests)) {
		return -EINVAL;
	} else {
		return -ENOMEM;
	}
@@ -555,6 +583,11 @@ spdk_nvme_ns_cmd_comparev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair
			      io_flags, 0, 0, true);
	if (req != NULL) {
		return nvme_qpair_submit_request(qpair, req);
	} else if (spdk_nvme_ns_check_request_length(lba_count,
			ns->sectors_per_max_io,
			ns->sectors_per_stripe,
			qpair->ctrlr->opts.io_queue_requests)) {
		return -EINVAL;
	} else {
		return -ENOMEM;
	}
@@ -578,6 +611,11 @@ spdk_nvme_ns_cmd_read(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, vo
			      0, true);
	if (req != NULL) {
		return nvme_qpair_submit_request(qpair, req);
	} else if (spdk_nvme_ns_check_request_length(lba_count,
			ns->sectors_per_max_io,
			ns->sectors_per_stripe,
			qpair->ctrlr->opts.io_queue_requests)) {
		return -EINVAL;
	} else {
		return -ENOMEM;
	}
@@ -602,6 +640,11 @@ spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *q
			      apptag_mask, apptag, true);
	if (req != NULL) {
		return nvme_qpair_submit_request(qpair, req);
	} else if (spdk_nvme_ns_check_request_length(lba_count,
			ns->sectors_per_max_io,
			ns->sectors_per_stripe,
			qpair->ctrlr->opts.io_queue_requests)) {
		return -EINVAL;
	} else {
		return -ENOMEM;
	}
@@ -631,6 +674,11 @@ spdk_nvme_ns_cmd_readv(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
			      io_flags, 0, 0, true);
	if (req != NULL) {
		return nvme_qpair_submit_request(qpair, req);
	} else if (spdk_nvme_ns_check_request_length(lba_count,
			ns->sectors_per_max_io,
			ns->sectors_per_stripe,
			qpair->ctrlr->opts.io_queue_requests)) {
		return -EINVAL;
	} else {
		return -ENOMEM;
	}
@@ -653,6 +701,11 @@ spdk_nvme_ns_cmd_write(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
			      io_flags, 0, 0, true);
	if (req != NULL) {
		return nvme_qpair_submit_request(qpair, req);
	} else if (spdk_nvme_ns_check_request_length(lba_count,
			ns->sectors_per_max_io,
			ns->sectors_per_stripe,
			qpair->ctrlr->opts.io_queue_requests)) {
		return -EINVAL;
	} else {
		return -ENOMEM;
	}
@@ -675,6 +728,11 @@ spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *
			      io_flags, apptag_mask, apptag, true);
	if (req != NULL) {
		return nvme_qpair_submit_request(qpair, req);
	} else if (spdk_nvme_ns_check_request_length(lba_count,
			ns->sectors_per_max_io,
			ns->sectors_per_stripe,
			qpair->ctrlr->opts.io_queue_requests)) {
		return -EINVAL;
	} else {
		return -ENOMEM;
	}
@@ -704,6 +762,11 @@ spdk_nvme_ns_cmd_writev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
			      io_flags, 0, 0, true);
	if (req != NULL) {
		return nvme_qpair_submit_request(qpair, req);
	} else if (spdk_nvme_ns_check_request_length(lba_count,
			ns->sectors_per_max_io,
			ns->sectors_per_stripe,
			qpair->ctrlr->opts.io_queue_requests)) {
		return -EINVAL;
	} else {
		return -ENOMEM;
	}
+5 −0
Original line number Diff line number Diff line
@@ -512,6 +512,11 @@ test_cmd_child_request(void)
	SPDK_CU_ASSERT_FATAL(g_request != NULL);
	CU_ASSERT(g_request->num_children == 4);

	rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, (DEFAULT_IO_QUEUE_REQUESTS + 1) * sector_size,
				   NULL,
				   NULL, 0);
	SPDK_CU_ASSERT_FATAL(rc == -EINVAL);

	TAILQ_FOREACH_SAFE(child, &g_request->children, child_tailq, tmp) {
		nvme_request_remove_child(g_request, child);
		CU_ASSERT(child->payload_offset == offset);