Commit eb37d519 authored by Ankit Kumar's avatar Ankit Kumar Committed by Jim Harris
Browse files

lib/nvme: include dword 13 field to extendable command structure



Add command dword 13 field to the extendable structure
spdk_nvme_ns_cmd_ext_io_opts. This now enables us to pass dspec
and dsm fields.

Signed-off-by: default avatarAnkit Kumar <ankit.kumar@samsung.com>
Change-Id: Id4d3dac14fdbf0e2a57e0bf287551dfd827dd503
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/16945


Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-by: default avatarAleksey Marchuk <alexeymar@nvidia.com>
Community-CI: Mellanox Build Bot
parent b42295db
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -593,8 +593,8 @@ struct spdk_nvme_ns_cmd_ext_io_opts {
	uint16_t apptag_mask;
	/** Application tag to use end-to-end protection information. */
	uint16_t apptag;
	/* Hole at bytes 44-47. */
	uint8_t reserved44[4];
	/** Command dword 13 specific field. */
	uint32_t cdw13;
} __attribute__((packed));
SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_ns_cmd_ext_io_opts) == 48, "Incorrect size");

+2 −0
Original line number Diff line number Diff line
@@ -4298,6 +4298,8 @@ SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_ns_streams_status) == 131072, "Incorr
#define SPDK_NVME_IO_FLAGS_FUSE_MASK (SPDK_NVME_CMD_FUSE_MASK << 0)
/** Enable Directive type as streams */
#define SPDK_NVME_IO_FLAGS_STREAMS_DIRECTIVE (1U << 20)
/** Enable Directive type as data placement */
#define SPDK_NVME_IO_FLAGS_DATA_PLACEMENT_DIRECTIVE (2U << 20)
/** Zone append specific, determines the contents of the reference tag written to the media */
#define SPDK_NVME_IO_FLAGS_ZONE_APPEND_PIREMAP (1U << 25)
/** Enable protection information checking of the Logical Block Reference Tag field */
+39 −36
Original line number Diff line number Diff line
@@ -13,7 +13,7 @@ static inline struct nvme_request *_nvme_ns_cmd_rw(struct spdk_nvme_ns *ns,
		const struct nvme_payload *payload, uint32_t payload_offset, uint32_t md_offset,
		uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn,
		void *cb_arg, uint32_t opc, uint32_t io_flags,
		uint16_t apptag_mask, uint16_t apptag, bool check_sgl, int *rc);
		uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, bool check_sgl, int *rc);

static bool
nvme_ns_check_request_length(uint32_t lba_count, uint32_t sectors_per_max_io,
@@ -76,13 +76,13 @@ _nvme_add_child_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
			const struct nvme_payload *payload,
			uint32_t payload_offset, uint32_t md_offset,
			uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
			uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag,
			uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13,
			struct nvme_request *parent, bool check_sgl, int *rc)
{
	struct nvme_request	*child;

	child = _nvme_ns_cmd_rw(ns, qpair, payload, payload_offset, md_offset, lba, lba_count, cb_fn,
				cb_arg, opc, io_flags, apptag_mask, apptag, check_sgl, rc);
				cb_arg, opc, io_flags, apptag_mask, apptag, cdw13, check_sgl, rc);
	if (child == NULL) {
		nvme_request_free_children(parent);
		nvme_free_request(parent);
@@ -102,7 +102,7 @@ _nvme_ns_cmd_split_request(struct spdk_nvme_ns *ns,
			   spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
			   uint32_t io_flags, struct nvme_request *req,
			   uint32_t sectors_per_max_io, uint32_t sector_mask,
			   uint16_t apptag_mask, uint16_t apptag, int *rc)
			   uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, int *rc)
{
	uint32_t		sector_size = _nvme_get_host_buffer_sector_size(ns, io_flags);
	uint32_t		remaining_lba_count = lba_count;
@@ -114,7 +114,7 @@ _nvme_ns_cmd_split_request(struct spdk_nvme_ns *ns,

		child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset,
						lba, lba_count, cb_fn, cb_arg, opc,
						io_flags, apptag_mask, apptag, req, true, rc);
						io_flags, apptag_mask, apptag, cdw13, req, true, rc);
		if (child == NULL) {
			return NULL;
		}
@@ -143,7 +143,8 @@ _is_io_flags_valid(uint32_t io_flags)
static void
_nvme_ns_cmd_setup_request(struct spdk_nvme_ns *ns, struct nvme_request *req,
			   uint32_t opc, uint64_t lba, uint32_t lba_count,
			   uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag)
			   uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag,
			   uint32_t cdw13)
{
	struct spdk_nvme_cmd	*cmd;

@@ -169,6 +170,8 @@ _nvme_ns_cmd_setup_request(struct spdk_nvme_ns *ns, struct nvme_request *req,
	cmd->cdw12 = lba_count - 1;
	cmd->cdw12 |= (io_flags & SPDK_NVME_IO_FLAGS_CDW12_MASK);

	cmd->cdw13 = cdw13;

	cmd->cdw15 = apptag_mask;
	cmd->cdw15 = (cmd->cdw15 << 16 | apptag);
}
@@ -181,7 +184,7 @@ _nvme_ns_cmd_split_request_prp(struct spdk_nvme_ns *ns,
			       uint64_t lba, uint32_t lba_count,
			       spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
			       uint32_t io_flags, struct nvme_request *req,
			       uint16_t apptag_mask, uint16_t apptag, int *rc)
			       uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, int *rc)
{
	spdk_nvme_req_reset_sgl_cb reset_sgl_fn = req->payload.reset_sgl_fn;
	spdk_nvme_req_next_sge_cb next_sge_fn = req->payload.next_sge_fn;
@@ -277,7 +280,7 @@ _nvme_ns_cmd_split_request_prp(struct spdk_nvme_ns *ns,
			child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset,
							child_lba, child_lba_count,
							cb_fn, cb_arg, opc, io_flags,
							apptag_mask, apptag, req, false, rc);
							apptag_mask, apptag, cdw13, req, false, rc);
			if (child == NULL) {
				return NULL;
			}
@@ -290,7 +293,7 @@ _nvme_ns_cmd_split_request_prp(struct spdk_nvme_ns *ns,

	if (child_length == req->payload_size) {
		/* No splitting was required, so setup the whole payload as one request. */
		_nvme_ns_cmd_setup_request(ns, req, opc, lba, lba_count, io_flags, apptag_mask, apptag);
		_nvme_ns_cmd_setup_request(ns, req, opc, lba, lba_count, io_flags, apptag_mask, apptag, cdw13);
	}

	return req;
@@ -304,7 +307,7 @@ _nvme_ns_cmd_split_request_sgl(struct spdk_nvme_ns *ns,
			       uint64_t lba, uint32_t lba_count,
			       spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
			       uint32_t io_flags, struct nvme_request *req,
			       uint16_t apptag_mask, uint16_t apptag, int *rc)
			       uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, int *rc)
{
	spdk_nvme_req_reset_sgl_cb reset_sgl_fn = req->payload.reset_sgl_fn;
	spdk_nvme_req_next_sge_cb next_sge_fn = req->payload.next_sge_fn;
@@ -361,7 +364,7 @@ _nvme_ns_cmd_split_request_sgl(struct spdk_nvme_ns *ns,
			child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset,
							child_lba, child_lba_count,
							cb_fn, cb_arg, opc, io_flags,
							apptag_mask, apptag, req, false, rc);
							apptag_mask, apptag, cdw13, req, false, rc);
			if (child == NULL) {
				return NULL;
			}
@@ -375,7 +378,7 @@ _nvme_ns_cmd_split_request_sgl(struct spdk_nvme_ns *ns,

	if (child_length == req->payload_size) {
		/* No splitting was required, so setup the whole payload as one request. */
		_nvme_ns_cmd_setup_request(ns, req, opc, lba, lba_count, io_flags, apptag_mask, apptag);
		_nvme_ns_cmd_setup_request(ns, req, opc, lba, lba_count, io_flags, apptag_mask, apptag, cdw13);
	}

	return req;
@@ -385,7 +388,7 @@ static inline struct nvme_request *
_nvme_ns_cmd_rw(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
		const struct nvme_payload *payload, uint32_t payload_offset, uint32_t md_offset,
		uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
		uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag, bool check_sgl, int *rc)
		uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, bool check_sgl, int *rc)
{
	struct nvme_request	*req;
	uint32_t		sector_size = _nvme_get_host_buffer_sector_size(ns, io_flags);
@@ -428,25 +431,25 @@ _nvme_ns_cmd_rw(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
		return _nvme_ns_cmd_split_request(ns, qpair, payload, payload_offset, md_offset, lba, lba_count,
						  cb_fn,
						  cb_arg, opc,
						  io_flags, req, sectors_per_stripe, sectors_per_stripe - 1, apptag_mask, apptag, rc);
						  io_flags, req, sectors_per_stripe, sectors_per_stripe - 1, apptag_mask, apptag, cdw13, rc);
	} else if (lba_count > sectors_per_max_io) {
		return _nvme_ns_cmd_split_request(ns, qpair, payload, payload_offset, md_offset, lba, lba_count,
						  cb_fn,
						  cb_arg, opc,
						  io_flags, req, sectors_per_max_io, 0, apptag_mask, apptag, rc);
						  io_flags, req, sectors_per_max_io, 0, apptag_mask, apptag, cdw13, rc);
	} else if (nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL && check_sgl) {
		if (ns->ctrlr->flags & SPDK_NVME_CTRLR_SGL_SUPPORTED) {
			return _nvme_ns_cmd_split_request_sgl(ns, qpair, payload, payload_offset, md_offset,
							      lba, lba_count, cb_fn, cb_arg, opc, io_flags,
							      req, apptag_mask, apptag, rc);
							      req, apptag_mask, apptag, cdw13, rc);
		} else {
			return _nvme_ns_cmd_split_request_prp(ns, qpair, payload, payload_offset, md_offset,
							      lba, lba_count, cb_fn, cb_arg, opc, io_flags,
							      req, apptag_mask, apptag, rc);
							      req, apptag_mask, apptag, cdw13, rc);
		}
	}

	_nvme_ns_cmd_setup_request(ns, req, opc, lba, lba_count, io_flags, apptag_mask, apptag);
	_nvme_ns_cmd_setup_request(ns, req, opc, lba, lba_count, io_flags, apptag_mask, apptag, cdw13);
	return req;
}

@@ -469,7 +472,7 @@ spdk_nvme_ns_cmd_compare(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
			      SPDK_NVME_OPC_COMPARE,
			      io_flags, 0,
			      0, false, &rc);
			      0, 0, false, &rc);
	if (req != NULL) {
		return nvme_qpair_submit_request(qpair, req);
	} else {
@@ -502,7 +505,7 @@ spdk_nvme_ns_cmd_compare_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair
	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
			      SPDK_NVME_OPC_COMPARE,
			      io_flags,
			      apptag_mask, apptag, false, &rc);
			      apptag_mask, apptag, 0, false, &rc);
	if (req != NULL) {
		return nvme_qpair_submit_request(qpair, req);
	} else {
@@ -537,7 +540,7 @@ spdk_nvme_ns_cmd_comparev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair

	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
			      SPDK_NVME_OPC_COMPARE,
			      io_flags, 0, 0, true, &rc);
			      io_flags, 0, 0, 0, true, &rc);
	if (req != NULL) {
		return nvme_qpair_submit_request(qpair, req);
	} else {
@@ -572,7 +575,7 @@ spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpai
	payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata);

	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
			      SPDK_NVME_OPC_COMPARE, io_flags, apptag_mask, apptag, true, &rc);
			      SPDK_NVME_OPC_COMPARE, io_flags, apptag_mask, apptag, 0, true, &rc);
	if (req != NULL) {
		return nvme_qpair_submit_request(qpair, req);
	} else {
@@ -602,7 +605,7 @@ spdk_nvme_ns_cmd_read(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, vo

	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
			      io_flags, 0,
			      0, false, &rc);
			      0, 0, false, &rc);
	if (req != NULL) {
		return nvme_qpair_submit_request(qpair, req);
	} else {
@@ -633,7 +636,7 @@ spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *q

	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
			      io_flags,
			      apptag_mask, apptag, false, &rc);
			      apptag_mask, apptag, 0, false, &rc);
	if (req != NULL) {
		return nvme_qpair_submit_request(qpair, req);
	} else {
@@ -667,7 +670,7 @@ spdk_nvme_ns_cmd_readv(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
	payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL);

	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
			      io_flags, 0, 0, true, &rc);
			      io_flags, 0, 0, 0, true, &rc);
	if (req != NULL) {
		return nvme_qpair_submit_request(qpair, req);
	} else {
@@ -702,7 +705,7 @@ spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *
	payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata);

	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
			      io_flags, apptag_mask, apptag, true, &rc);
			      io_flags, apptag_mask, apptag, 0, true, &rc);
	if (req != NULL) {
		return nvme_qpair_submit_request(qpair, req);
	} else {
@@ -739,11 +742,11 @@ spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpai
		payload.opts = opts;
		payload.md = opts->metadata;
		req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
				      opts->io_flags, opts->apptag_mask, opts->apptag, true, &rc);
				      opts->io_flags, opts->apptag_mask, opts->apptag, opts->cdw13, true, &rc);

	} else {
		req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
				      0, 0, 0, true, &rc);
				      0, 0, 0, 0, true, &rc);
	}

	if (req != NULL) {
@@ -774,7 +777,7 @@ spdk_nvme_ns_cmd_write(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
	payload = NVME_PAYLOAD_CONTIG(buffer, NULL);

	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
			      io_flags, 0, 0, false, &rc);
			      io_flags, 0, 0, 0, false, &rc);
	if (req != NULL) {
		return nvme_qpair_submit_request(qpair, req);
	} else {
@@ -829,7 +832,7 @@ nvme_ns_cmd_zone_append_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair

	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, zslba, lba_count, cb_fn, cb_arg,
			      SPDK_NVME_OPC_ZONE_APPEND,
			      io_flags, apptag_mask, apptag, false, &rc);
			      io_flags, apptag_mask, apptag, 0, false, &rc);
	if (req != NULL) {
		/*
		 * Zone append commands cannot be split (num_children has to be 0).
@@ -882,7 +885,7 @@ nvme_ns_cmd_zone_appendv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair

	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, zslba, lba_count, cb_fn, cb_arg,
			      SPDK_NVME_OPC_ZONE_APPEND,
			      io_flags, apptag_mask, apptag, true, &rc);
			      io_flags, apptag_mask, apptag, 0, true, &rc);
	if (req != NULL) {
		/*
		 * Zone append commands cannot be split (num_children has to be 0).
@@ -928,7 +931,7 @@ spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *
	payload = NVME_PAYLOAD_CONTIG(buffer, metadata);

	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
			      io_flags, apptag_mask, apptag, false, &rc);
			      io_flags, apptag_mask, apptag, 0, false, &rc);
	if (req != NULL) {
		return nvme_qpair_submit_request(qpair, req);
	} else {
@@ -962,7 +965,7 @@ spdk_nvme_ns_cmd_writev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
	payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL);

	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
			      io_flags, 0, 0, true, &rc);
			      io_flags, 0, 0, 0, true, &rc);
	if (req != NULL) {
		return nvme_qpair_submit_request(qpair, req);
	} else {
@@ -997,7 +1000,7 @@ spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair
	payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata);

	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
			      io_flags, apptag_mask, apptag, true, &rc);
			      io_flags, apptag_mask, apptag, 0, true, &rc);
	if (req != NULL) {
		return nvme_qpair_submit_request(qpair, req);
	} else {
@@ -1034,11 +1037,11 @@ spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpa
		payload.opts = opts;
		payload.md = opts->metadata;
		req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
				      opts->io_flags, opts->apptag_mask, opts->apptag, true, &rc);
				      opts->io_flags, opts->apptag_mask, opts->apptag, opts->cdw13, true, &rc);

	} else {
		req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
				      0, 0, 0, true, &rc);
				      0, 0, 0, 0, true, &rc);
	}

	if (req != NULL) {
+4 −2
Original line number Diff line number Diff line
@@ -2011,7 +2011,7 @@ test_nvme_ns_cmd_setup_request(void)
	ns.flags = SPDK_NVME_NS_DPS_PI_SUPPORTED;

	_nvme_ns_cmd_setup_request(&ns, &req, SPDK_NVME_OPC_READ,
				   1024, 256, SPDK_NVME_IO_FLAGS_PRACT, 1, 1);
				   1024, 256, SPDK_NVME_IO_FLAGS_PRACT, 1, 1, 0);
	CU_ASSERT(req.cmd.cdw10 == 1024);
	CU_ASSERT(req.cmd.opc == SPDK_NVME_OPC_READ);
	CU_ASSERT(req.cmd.nsid == 1);
@@ -2093,7 +2093,8 @@ test_spdk_nvme_ns_cmd_writev_ext(void)
					 NULL, &sge_length, nvme_request_reset_sgl,
					 nvme_request_next_sge, &ext_opts);
	CU_ASSERT(rc != 0);
	ext_opts.io_flags = SPDK_NVME_IO_FLAGS_PRCHK_REFTAG;
	ext_opts.io_flags = SPDK_NVME_IO_FLAGS_PRCHK_REFTAG | SPDK_NVME_IO_FLAGS_DATA_PLACEMENT_DIRECTIVE;
	ext_opts.cdw13 = (1 << 16);

	/* Empty reset_sgl cb. Expect fail */
	rc = spdk_nvme_ns_cmd_writev_ext(&ns, &qpair, 0x1000, lba_count,
@@ -2122,6 +2123,7 @@ test_spdk_nvme_ns_cmd_writev_ext(void)
	CU_ASSERT(g_request->payload.opts == &ext_opts);
	CU_ASSERT(g_request->cmd.nsid == ns.id);
	CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_CDW12_MASK) == ext_opts.io_flags);
	CU_ASSERT(g_request->cmd.cdw13 == ext_opts.cdw13);
	CU_ASSERT(g_request->cmd.cdw15 >> 16 == ext_opts.apptag_mask);
	CU_ASSERT((g_request->cmd.cdw15 & 0xff) == ext_opts.apptag);