Commit dc8d4d8d authored by Konrad Sztyber's avatar Konrad Sztyber Committed by Tomasz Zawadzki
Browse files

nvme: allow users to send IOs with an accel sequence



It'll only be allowed if the controller advertises support for accel
using the SPDK_NVME_CTRLR_ACCEL_SEQUENCE_SUPPORTED flag.  The transports
are expected to set this flag if they do support it.

Signed-off-by: default avatarKonrad Sztyber <konrad.sztyber@intel.com>
Change-Id: Ib640dc99d1177acbf266975d585fa838494814ad
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/18762


Reviewed-by: default avatarAleksey Marchuk <alexeymar@nvidia.com>
Community-CI: Mellanox Build Bot
Reviewed-by: default avatarShuhei Matsumoto <smatsumoto@nvidia.com>
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
parent 9933475b
Loading
Loading
Loading
Loading
+6 −1
Original line number Diff line number Diff line
@@ -593,6 +593,7 @@ enum spdk_nvme_ctrlr_flags {
	SPDK_NVME_CTRLR_ZONE_APPEND_SUPPORTED		= 1 << 5, /**< Zone Append is supported (within Zoned Namespaces) */
	SPDK_NVME_CTRLR_DIRECTIVES_SUPPORTED		= 1 << 6, /**< The Directives is supported */
	SPDK_NVME_CTRLR_MPTR_SGL_SUPPORTED		= 1 << 7, /**< MPTR containing SGL descriptor is supported */
	SPDK_NVME_CTRLR_ACCEL_SEQUENCE_SUPPORTED	= 1 << 8, /**< Support for sending I/O requests with accel sequnece */
};

/**
@@ -618,8 +619,12 @@ struct spdk_nvme_ns_cmd_ext_io_opts {
	uint16_t apptag;
	/** Command dword 13 specific field. */
	uint32_t cdw13;
	/** Accel sequence (only valid if SPDK_NVME_CTRLR_ACCEL_SEQUENCE_SUPPORTED is set and the
	 *  qpair is part of a poll group).
	 */
	void *accel_sequence;
};
SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_ns_cmd_ext_io_opts) == 48, "Incorrect size");
SPDK_STATIC_ASSERT(sizeof(struct spdk_nvme_ns_cmd_ext_io_opts) == 56, "Incorrect size");

/**
 * Parse the string representation of a transport ID.
+18 −0
Original line number Diff line number Diff line
@@ -375,6 +375,9 @@ struct nvme_request {
	spdk_nvme_cmd_cb		user_cb_fn;
	void				*user_cb_arg;
	void				*user_buffer;

	/** Sequence of accel operations associated with this request */
	void				*accel_sequence;
};

struct nvme_completion_poll_status {
@@ -1067,6 +1070,10 @@ struct nvme_driver {
	int				hotplug_fd;
};

#define nvme_ns_cmd_get_ext_io_opt(opts, field, defval) \
       ((opts) != NULL && offsetof(struct spdk_nvme_ns_cmd_ext_io_opts, field) + \
        sizeof((opts)->field) <= (opts)->size ? (opts)->field : (defval))

extern struct nvme_driver *g_spdk_nvme_driver;

int nvme_driver_init(void);
@@ -1267,6 +1274,7 @@ int nvme_ctrlr_parse_ana_log_page(struct spdk_nvme_ctrlr *ctrlr,
		req->md_size = _md_size;		\
		req->pid = g_spdk_nvme_pid;		\
		req->submit_tick = 0;			\
		req->accel_sequence = NULL;		\
	} while (0);

static inline struct nvme_request *
@@ -1331,6 +1339,16 @@ nvme_complete_request(spdk_nvme_cmd_cb cb_fn, void *cb_arg, struct spdk_nvme_qpa
	struct spdk_nvme_cpl            err_cpl;
	struct nvme_error_cmd           *cmd;

	if (spdk_unlikely(req->accel_sequence != NULL)) {
		struct spdk_nvme_poll_group *pg = qpair->poll_group->group;

		/* Transports are required to execuete the sequence and clear req->accel_sequence.
		 * If it's left non-NULL it must mean the request is failed. */
		assert(spdk_nvme_cpl_is_error(cpl));
		pg->accel_fn_table.abort_sequence(req->accel_sequence);
		req->accel_sequence = NULL;
	}

	/* error injection at completion path,
	 * only inject for successful completed commands
	 */
+78 −29
Original line number Diff line number Diff line
@@ -13,7 +13,8 @@ static inline struct nvme_request *_nvme_ns_cmd_rw(struct spdk_nvme_ns *ns,
		const struct nvme_payload *payload, uint32_t payload_offset, uint32_t md_offset,
		uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn,
		void *cb_arg, uint32_t opc, uint32_t io_flags,
		uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, bool check_sgl, int *rc);
		uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, bool check_sgl,
		void *accel_sequence, int *rc);

static bool
nvme_ns_check_request_length(uint32_t lba_count, uint32_t sectors_per_max_io,
@@ -82,7 +83,7 @@ _nvme_add_child_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
	struct nvme_request	*child;

	child = _nvme_ns_cmd_rw(ns, qpair, payload, payload_offset, md_offset, lba, lba_count, cb_fn,
				cb_arg, opc, io_flags, apptag_mask, apptag, cdw13, check_sgl, rc);
				cb_arg, opc, io_flags, apptag_mask, apptag, cdw13, check_sgl, NULL, rc);
	if (child == NULL) {
		nvme_request_free_children(parent);
		nvme_free_request(parent);
@@ -102,12 +103,19 @@ _nvme_ns_cmd_split_request(struct spdk_nvme_ns *ns,
			   spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
			   uint32_t io_flags, struct nvme_request *req,
			   uint32_t sectors_per_max_io, uint32_t sector_mask,
			   uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, int *rc)
			   uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13,
			   void *accel_sequence, int *rc)
{
	uint32_t		sector_size = _nvme_get_host_buffer_sector_size(ns, io_flags);
	uint32_t		remaining_lba_count = lba_count;
	struct nvme_request	*child;

	if (spdk_unlikely(accel_sequence != NULL)) {
		SPDK_ERRLOG("Splitting requests with accel sequence is unsupported\n");
		*rc = -EINVAL;
		return NULL;
	}

	while (remaining_lba_count > 0) {
		lba_count = sectors_per_max_io - (lba & sector_mask);
		lba_count = spdk_min(remaining_lba_count, lba_count);
@@ -140,6 +148,15 @@ _is_io_flags_valid(uint32_t io_flags)
	return true;
}

static inline bool
_is_accel_sequence_valid(struct spdk_nvme_qpair *qpair, void *seq)
{
	/* An accel sequence can only be executed if the controller supports accel and a qpair is
	 * part of a of a poll group */
	return seq == NULL || ((qpair->ctrlr->flags & SPDK_NVME_CTRLR_ACCEL_SEQUENCE_SUPPORTED) &&
			       qpair->poll_group != NULL);
}

static void
_nvme_ns_cmd_setup_request(struct spdk_nvme_ns *ns, struct nvme_request *req,
			   uint32_t opc, uint64_t lba, uint32_t lba_count,
@@ -184,7 +201,8 @@ _nvme_ns_cmd_split_request_prp(struct spdk_nvme_ns *ns,
			       uint64_t lba, uint32_t lba_count,
			       spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
			       uint32_t io_flags, struct nvme_request *req,
			       uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, int *rc)
			       uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13,
			       void *accel_sequence, int *rc)
{
	spdk_nvme_req_reset_sgl_cb reset_sgl_fn = req->payload.reset_sgl_fn;
	spdk_nvme_req_next_sge_cb next_sge_fn = req->payload.next_sge_fn;
@@ -271,6 +289,12 @@ _nvme_ns_cmd_split_request_prp(struct spdk_nvme_ns *ns,
				*rc = -EINVAL;
				return NULL;
			}
			if (spdk_unlikely(accel_sequence != NULL)) {
				SPDK_ERRLOG("Splitting requests with accel sequence is unsupported\n");
				*rc = -EINVAL;
				return NULL;
			}

			child_lba_count = child_length / ns->extended_lba_size;
			/*
			 * Note the last parameter is set to "false" - this tells the recursive
@@ -307,7 +331,8 @@ _nvme_ns_cmd_split_request_sgl(struct spdk_nvme_ns *ns,
			       uint64_t lba, uint32_t lba_count,
			       spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
			       uint32_t io_flags, struct nvme_request *req,
			       uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, int *rc)
			       uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13,
			       void *accel_sequence, int *rc)
{
	spdk_nvme_req_reset_sgl_cb reset_sgl_fn = req->payload.reset_sgl_fn;
	spdk_nvme_req_next_sge_cb next_sge_fn = req->payload.next_sge_fn;
@@ -355,6 +380,12 @@ _nvme_ns_cmd_split_request_sgl(struct spdk_nvme_ns *ns,
				*rc = -EINVAL;
				return NULL;
			}
			if (spdk_unlikely(accel_sequence != NULL)) {
				SPDK_ERRLOG("Splitting requests with accel sequence is unsupported\n");
				*rc = -EINVAL;
				return NULL;
			}

			child_lba_count = child_length / ns->extended_lba_size;
			/*
			 * Note the last parameter is set to "false" - this tells the recursive
@@ -388,7 +419,8 @@ static inline struct nvme_request *
_nvme_ns_cmd_rw(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
		const struct nvme_payload *payload, uint32_t payload_offset, uint32_t md_offset,
		uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
		uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, bool check_sgl, int *rc)
		uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag, uint32_t cdw13, bool check_sgl,
		void *accel_sequence, int *rc)
{
	struct nvme_request	*req;
	uint32_t		sector_size = _nvme_get_host_buffer_sector_size(ns, io_flags);
@@ -407,6 +439,7 @@ _nvme_ns_cmd_rw(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,

	req->payload_offset = payload_offset;
	req->md_offset = md_offset;
	req->accel_sequence = accel_sequence;

	/* Zone append commands cannot be split. */
	if (opc == SPDK_NVME_OPC_ZONE_APPEND) {
@@ -427,25 +460,28 @@ _nvme_ns_cmd_rw(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
	 */
	if (sectors_per_stripe > 0 &&
	    (((lba & (sectors_per_stripe - 1)) + lba_count) > sectors_per_stripe)) {

		return _nvme_ns_cmd_split_request(ns, qpair, payload, payload_offset, md_offset, lba, lba_count,
						  cb_fn,
						  cb_arg, opc,
						  io_flags, req, sectors_per_stripe, sectors_per_stripe - 1, apptag_mask, apptag, cdw13, rc);
						  io_flags, req, sectors_per_stripe, sectors_per_stripe - 1,
						  apptag_mask, apptag, cdw13,  accel_sequence, rc);
	} else if (lba_count > sectors_per_max_io) {
		return _nvme_ns_cmd_split_request(ns, qpair, payload, payload_offset, md_offset, lba, lba_count,
						  cb_fn,
						  cb_arg, opc,
						  io_flags, req, sectors_per_max_io, 0, apptag_mask, apptag, cdw13, rc);
						  io_flags, req, sectors_per_max_io, 0, apptag_mask,
						  apptag, cdw13, accel_sequence, rc);
	} else if (nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL && check_sgl) {
		if (ns->ctrlr->flags & SPDK_NVME_CTRLR_SGL_SUPPORTED) {
			return _nvme_ns_cmd_split_request_sgl(ns, qpair, payload, payload_offset, md_offset,
							      lba, lba_count, cb_fn, cb_arg, opc, io_flags,
							      req, apptag_mask, apptag, cdw13, rc);
							      req, apptag_mask, apptag, cdw13,
							      accel_sequence, rc);
		} else {
			return _nvme_ns_cmd_split_request_prp(ns, qpair, payload, payload_offset, md_offset,
							      lba, lba_count, cb_fn, cb_arg, opc, io_flags,
							      req, apptag_mask, apptag, cdw13, rc);
							      req, apptag_mask, apptag, cdw13,
							      accel_sequence, rc);
		}
	}

@@ -472,7 +508,7 @@ spdk_nvme_ns_cmd_compare(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
			      SPDK_NVME_OPC_COMPARE,
			      io_flags, 0,
			      0, 0, false, &rc);
			      0, 0, false, NULL, &rc);
	if (req != NULL) {
		return nvme_qpair_submit_request(qpair, req);
	} else {
@@ -505,7 +541,7 @@ spdk_nvme_ns_cmd_compare_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair
	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
			      SPDK_NVME_OPC_COMPARE,
			      io_flags,
			      apptag_mask, apptag, 0, false, &rc);
			      apptag_mask, apptag, 0, false, NULL, &rc);
	if (req != NULL) {
		return nvme_qpair_submit_request(qpair, req);
	} else {
@@ -540,7 +576,7 @@ spdk_nvme_ns_cmd_comparev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair

	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
			      SPDK_NVME_OPC_COMPARE,
			      io_flags, 0, 0, 0, true, &rc);
			      io_flags, 0, 0, 0, true, NULL, &rc);
	if (req != NULL) {
		return nvme_qpair_submit_request(qpair, req);
	} else {
@@ -575,7 +611,8 @@ spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpai
	payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata);

	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
			      SPDK_NVME_OPC_COMPARE, io_flags, apptag_mask, apptag, 0, true, &rc);
			      SPDK_NVME_OPC_COMPARE, io_flags, apptag_mask, apptag, 0, true,
			      NULL, &rc);
	if (req != NULL) {
		return nvme_qpair_submit_request(qpair, req);
	} else {
@@ -605,7 +642,7 @@ spdk_nvme_ns_cmd_read(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, vo

	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
			      io_flags, 0,
			      0, 0, false, &rc);
			      0, 0, false, NULL, &rc);
	if (req != NULL) {
		return nvme_qpair_submit_request(qpair, req);
	} else {
@@ -636,7 +673,7 @@ spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *q

	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
			      io_flags,
			      apptag_mask, apptag, 0, false, &rc);
			      apptag_mask, apptag, 0, false, NULL, &rc);
	if (req != NULL) {
		return nvme_qpair_submit_request(qpair, req);
	} else {
@@ -670,7 +707,7 @@ spdk_nvme_ns_cmd_readv(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
	payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL);

	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
			      io_flags, 0, 0, 0, true, &rc);
			      io_flags, 0, 0, 0, true, NULL, &rc);
	if (req != NULL) {
		return nvme_qpair_submit_request(qpair, req);
	} else {
@@ -705,7 +742,7 @@ spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *
	payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata);

	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
			      io_flags, apptag_mask, apptag, 0, true, &rc);
			      io_flags, apptag_mask, apptag, 0, true, NULL, &rc);
	if (req != NULL) {
		return nvme_qpair_submit_request(qpair, req);
	} else {
@@ -726,6 +763,7 @@ spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpai
{
	struct nvme_request *req;
	struct nvme_payload payload;
	void *seq;
	int rc = 0;

	if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
@@ -739,14 +777,19 @@ spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpai
			return -EINVAL;
		}

		seq = nvme_ns_cmd_get_ext_io_opt(opts, accel_sequence, NULL);
		if (spdk_unlikely(!_is_accel_sequence_valid(qpair, seq))) {
			return -EINVAL;
		}

		payload.opts = opts;
		payload.md = opts->metadata;
		req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
				      opts->io_flags, opts->apptag_mask, opts->apptag, opts->cdw13, true, &rc);
				      opts->io_flags, opts->apptag_mask, opts->apptag, opts->cdw13, true, seq, &rc);

	} else {
		req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
				      0, 0, 0, 0, true, &rc);
				      0, 0, 0, 0, true, NULL, &rc);
	}

	if (req != NULL) {
@@ -777,7 +820,7 @@ spdk_nvme_ns_cmd_write(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
	payload = NVME_PAYLOAD_CONTIG(buffer, NULL);

	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
			      io_flags, 0, 0, 0, false, &rc);
			      io_flags, 0, 0, 0, false, NULL, &rc);
	if (req != NULL) {
		return nvme_qpair_submit_request(qpair, req);
	} else {
@@ -832,7 +875,7 @@ nvme_ns_cmd_zone_append_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair

	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, zslba, lba_count, cb_fn, cb_arg,
			      SPDK_NVME_OPC_ZONE_APPEND,
			      io_flags, apptag_mask, apptag, 0, false, &rc);
			      io_flags, apptag_mask, apptag, 0, false, NULL, &rc);
	if (req != NULL) {
		/*
		 * Zone append commands cannot be split (num_children has to be 0).
@@ -885,7 +928,7 @@ nvme_ns_cmd_zone_appendv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair

	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, zslba, lba_count, cb_fn, cb_arg,
			      SPDK_NVME_OPC_ZONE_APPEND,
			      io_flags, apptag_mask, apptag, 0, true, &rc);
			      io_flags, apptag_mask, apptag, 0, true, NULL, &rc);
	if (req != NULL) {
		/*
		 * Zone append commands cannot be split (num_children has to be 0).
@@ -931,7 +974,7 @@ spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *
	payload = NVME_PAYLOAD_CONTIG(buffer, metadata);

	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
			      io_flags, apptag_mask, apptag, 0, false, &rc);
			      io_flags, apptag_mask, apptag, 0, false, NULL, &rc);
	if (req != NULL) {
		return nvme_qpair_submit_request(qpair, req);
	} else {
@@ -965,7 +1008,7 @@ spdk_nvme_ns_cmd_writev(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
	payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL);

	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
			      io_flags, 0, 0, 0, true, &rc);
			      io_flags, 0, 0, 0, true, NULL, &rc);
	if (req != NULL) {
		return nvme_qpair_submit_request(qpair, req);
	} else {
@@ -1000,7 +1043,7 @@ spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair
	payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata);

	req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
			      io_flags, apptag_mask, apptag, 0, true, &rc);
			      io_flags, apptag_mask, apptag, 0, true, NULL, &rc);
	if (req != NULL) {
		return nvme_qpair_submit_request(qpair, req);
	} else {
@@ -1021,6 +1064,7 @@ spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpa
{
	struct nvme_request *req;
	struct nvme_payload payload;
	void *seq;
	int rc = 0;

	if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
@@ -1034,14 +1078,19 @@ spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpa
			return -EINVAL;
		}

		seq = nvme_ns_cmd_get_ext_io_opt(opts, accel_sequence, NULL);
		if (spdk_unlikely(!_is_accel_sequence_valid(qpair, seq))) {
			return -EINVAL;
		}

		payload.opts = opts;
		payload.md = opts->metadata;
		req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
				      opts->io_flags, opts->apptag_mask, opts->apptag, opts->cdw13, true, &rc);
				      opts->io_flags, opts->apptag_mask, opts->apptag, opts->cdw13, true, seq, &rc);

	} else {
		req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
				      0, 0, 0, 0, true, &rc);
				      0, 0, 0, 0, true, NULL, &rc);
	}

	if (req != NULL) {