Commit 110335f1 authored by Alexey Marchuk's avatar Alexey Marchuk Committed by Tomasz Zawadzki
Browse files

nvme: Add functions spdk_nvme_ns_cmd_readv/writev_ext



These functions accept extendable structure with IO request options.
The options structure contains a memory domain that can be used to
translate or fetch data, metadata pointer and end-to-end data
protection parameters

Change-Id: I65bfba279904e77539348520c3dfac7aadbe80d9
Signed-off-by: default avatarAlexey Marchuk <alexeymar@mellanox.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/6270


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: default avatarZiye Yang <ziye.yang@intel.com>
parent a422d8b0
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
@@ -19,6 +19,11 @@ log level.

New API `spdk_nvme_ctrlr_get_memory_domain` has been added, it allows to get SPDK memory domain used by nvme controller.

New API functions `spdk_nvme_ns_cmd_readv_ext` and `spdk_nvme_ns_cmd_writev_ext`
have been added. These functions accept `spdk_nvme_ns_cmd_ext_io_opts` structure with extended IO request
options, e.g. DMA memory domain which describes data that may belong to another memory domain and
can't be accessed directly.

## v21.07:

### accel_fw
+85 −0
Original line number Diff line number Diff line
@@ -521,6 +521,27 @@ enum spdk_nvme_ctrlr_flags {
	SPDK_NVME_CTRLR_DIRECTIVES_SUPPORTED		= 1 << 6, /**< The Directives is supported */
};

/**
 * Structure with optional IO request parameters
 */
struct spdk_nvme_ns_cmd_ext_io_opts {
	/** size of this structure in bytes */
	size_t size;
	/** Memory domain which describes data payload in IO request. The controller must support
	 * the corresponding memory domain type, refer to \ref spdk_nvme_ctrlr_get_memory_domain */
	struct spdk_memory_domain *memory_domain;
	/** User context to be passed to memory domain operations */
	void *memory_domain_ctx;
	/** Flags for this IO, defined in nvme_spec.h */
	uint32_t io_flags;
	/** Virtual address pointer to the metadata payload, the length of metadata is specified by \ref spdk_nvme_ns_get_md_size */
	void *metadata;
	/** Application tag mask to use end-to-end protection information. */
	uint16_t apptag_mask;
	/** Application tag to use end-to-end protection information. */
	uint16_t apptag;
};

/**
 * Parse the string representation of a transport ID.
 *
@@ -2897,6 +2918,39 @@ int spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qp
				    spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
				    uint16_t apptag_mask, uint16_t apptag);

/**
 * Submit a write I/O to the specified NVMe namespace.
 *
 * The command is submitted to a qpair allocated by spdk_nvme_ctrlr_alloc_io_qpair().
 * The user must ensure that only one thread submits I/O on a given qpair at any
 * given time.
 *
 * \param ns NVMe namespace to submit the write I/O
 * \param qpair I/O queue pair to submit the request
 * \param lba starting LBA to write the data
 * \param lba_count length (in sectors) for the write operation
 * \param cb_fn callback function to invoke when the I/O is completed
 * \param cb_arg argument to pass to the callback function
 * \param reset_sgl_fn callback function to reset scattered payload
 * \param next_sge_fn callback function to iterate each scattered
 * payload memory segment
 * \param opts Optional structure with extended IO request options. If provided, the caller must
 * guarantee that this structure is accessible until IO completes
 *
 * \return 0 if successfully submitted, negated errnos on the following error conditions:
 * -EINVAL: The request is malformed.
 * -ENOMEM: The request cannot be allocated.
 * -ENXIO: The qpair is failed at the transport level.
 * -EFAULT: Invalid address was specified as part of payload.  cb_fn is also called
 *          with error status including dnr=1 in this case.
 */
int spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
				uint64_t lba, uint32_t lba_count,
				spdk_nvme_cmd_cb cb_fn, void *cb_arg,
				spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
				spdk_nvme_req_next_sge_cb next_sge_fn,
				struct spdk_nvme_ns_cmd_ext_io_opts *opts);

/**
 * Submit a write I/O to the specified NVMe namespace.
 *
@@ -3077,6 +3131,37 @@ int spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpa
				   spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata,
				   uint16_t apptag_mask, uint16_t apptag);

/**
 * Submit a read I/O to the specified NVMe namespace.
 *
 * The command is submitted to a qpair allocated by spdk_nvme_ctrlr_alloc_io_qpair().
 * The user must ensure that only one thread submits I/O on a given qpair at any given time.
 *
 * \param ns NVMe namespace to submit the read I/O
 * \param qpair I/O queue pair to submit the request
 * \param lba starting LBA to read the data
 * \param lba_count length (in sectors) for the read operation
 * \param cb_fn callback function to invoke when the I/O is completed
 * \param cb_arg argument to pass to the callback function
 * \param reset_sgl_fn callback function to reset scattered payload
 * \param next_sge_fn callback function to iterate each scattered
 * payload memory segment
 * \param opts Optional structure with extended IO request options. If provided, the caller must
 * guarantee that this structure is accessible until IO completes
 *
 * \return 0 if successfully submitted, negated errnos on the following error conditions:
 * -EINVAL: The request is malformed.
 * -ENOMEM: The request cannot be allocated.
 * -ENXIO: The qpair is failed at the transport level.
 * -EFAULT: Invalid address was specified as part of payload.  cb_fn is also called
 *          with error status including dnr=1 in this case.
 */
int spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
			       uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn,
			       void *cb_arg, spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
			       spdk_nvme_req_next_sge_cb next_sge_fn,
			       struct spdk_nvme_ns_cmd_ext_io_opts *opts);

/**
 * Submits a read I/O to the specified NVMe namespace.
 *
+4 −0
Original line number Diff line number Diff line
@@ -226,6 +226,10 @@ struct nvme_payload {
	spdk_nvme_req_reset_sgl_cb reset_sgl_fn;
	spdk_nvme_req_next_sge_cb next_sge_fn;

	/**
	 * Exended IO options passed by the user
	 */
	struct spdk_nvme_ns_cmd_ext_io_opts *opts;
	/**
	 * If reset_sgl_fn == NULL, this is a contig payload, and contig_or_cb_arg contains the
	 * virtual memory address of a single virtually contiguous buffer.
+87 −0
Original line number Diff line number Diff line
@@ -4,6 +4,7 @@
 *   Copyright (c) Intel Corporation.
 *   All rights reserved.
 *   Copyright (c) 2021 Mellanox Technologies LTD. All rights reserved.
 *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
 *
 *   Redistribution and use in source and binary forms, with or without
 *   modification, are permitted provided that the following conditions
@@ -740,6 +741,49 @@ spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *
	}
}

int
spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
			   uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn,
			   void *cb_arg, spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
			   spdk_nvme_req_next_sge_cb next_sge_fn,
			   struct spdk_nvme_ns_cmd_ext_io_opts *opts)
{
	struct nvme_request *req;
	struct nvme_payload payload;
	int rc = 0;

	if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
		return -EINVAL;
	}

	payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL);

	if (opts) {
		if (spdk_unlikely(!_is_io_flags_valid(opts->io_flags))) {
			return -EINVAL;
		}

		payload.opts = opts;
		payload.md = opts->metadata;
		req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
				      opts->io_flags, opts->apptag_mask, opts->apptag, true, &rc);

	} else {
		req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
				      0, 0, 0, true, &rc);
	}

	if (req != NULL) {
		return nvme_qpair_submit_request(qpair, req);
	} else {
		return nvme_ns_map_failure_rc(lba_count,
					      ns->sectors_per_max_io,
					      ns->sectors_per_stripe,
					      qpair->ctrlr->opts.io_queue_requests,
					      rc);
	}
}

int
spdk_nvme_ns_cmd_write(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
		       void *buffer, uint64_t lba,
@@ -992,6 +1036,49 @@ spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair
	}
}

int
spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t lba,
			    uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
			    spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
			    spdk_nvme_req_next_sge_cb next_sge_fn,
			    struct spdk_nvme_ns_cmd_ext_io_opts *opts)
{
	struct nvme_request *req;
	struct nvme_payload payload;
	int rc = 0;

	if (reset_sgl_fn == NULL || next_sge_fn == NULL) {
		return -EINVAL;
	}

	payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL);

	if (opts) {
		if (spdk_unlikely(!_is_io_flags_valid(opts->io_flags))) {
			return -EINVAL;
		}

		payload.opts = opts;
		payload.md = opts->metadata;
		req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
				      opts->io_flags, opts->apptag_mask, opts->apptag, true, &rc);

	} else {
		req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
				      0, 0, 0, true, &rc);
	}

	if (req != NULL) {
		return nvme_qpair_submit_request(qpair, req);
	} else {
		return nvme_ns_map_failure_rc(lba_count,
					      ns->sectors_per_max_io,
					      ns->sectors_per_stripe,
					      qpair->ctrlr->opts.io_queue_requests,
					      rc);
	}
}

int
spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair,
			      uint64_t lba, uint32_t lba_count,
+90 −35
Original line number Diff line number Diff line
@@ -278,6 +278,13 @@ struct spdk_nvme_rdma_rsp {
	struct nvme_rdma_wr	rdma_wr;
};

struct nvme_rdma_memory_translation_ctx {
	void *addr;
	size_t length;
	uint32_t lkey;
	uint32_t rkey;
};

static const char *rdma_cm_event_str[] = {
	"RDMA_CM_EVENT_ADDR_RESOLVED",
	"RDMA_CM_EVENT_ADDR_ERROR",
@@ -1340,6 +1347,55 @@ nvme_rdma_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qp
	return rc;
}

static inline int
nvme_rdma_get_memory_translation(struct nvme_request *req, struct nvme_rdma_qpair *rqpair,
				 struct nvme_rdma_memory_translation_ctx *_ctx)
{
	struct spdk_memory_domain_translation_ctx ctx;
	struct spdk_memory_domain_translation_result dma_translation;
	struct spdk_rdma_memory_translation rdma_translation;
	int rc;

	assert(req);
	assert(rqpair);
	assert(_ctx);

	if (req->payload.opts && req->payload.opts->memory_domain) {
		ctx.size = sizeof(struct spdk_memory_domain_translation_ctx);
		ctx.rdma.ibv_qp = rqpair->rdma_qp->qp;
		dma_translation.size = sizeof(struct spdk_memory_domain_translation_result);

		rc = spdk_memory_domain_translate_data(req->payload.opts->memory_domain,
						       req->payload.opts->memory_domain_ctx,
						       rqpair->memory_domain->domain, &ctx, _ctx->addr,
						       _ctx->length, &dma_translation);
		if (spdk_unlikely(rc)) {
			SPDK_ERRLOG("DMA memory translation failed, rc %d\n", rc);
			return rc;
		}

		_ctx->lkey = dma_translation.rdma.lkey;
		_ctx->rkey = dma_translation.rdma.rkey;
		_ctx->addr = dma_translation.addr;
		_ctx->length = dma_translation.len;
	} else {
		rc = spdk_rdma_get_translation(rqpair->mr_map, _ctx->addr, _ctx->length, &rdma_translation);
		if (spdk_unlikely(rc)) {
			SPDK_ERRLOG("RDMA memory translation failed, rc %d\n", rc);
			return rc;
		}
		if (rdma_translation.translation_type == SPDK_RDMA_TRANSLATION_MR) {
			_ctx->lkey = rdma_translation.mr_or_key.mr->lkey;
			_ctx->rkey = rdma_translation.mr_or_key.mr->rkey;
		} else {
			_ctx->lkey = _ctx->rkey = (uint32_t)rdma_translation.mr_or_key.key;
		}
	}

	return 0;
}


/*
 * Build SGL describing empty payload.
 */
@@ -1376,21 +1432,21 @@ nvme_rdma_build_contig_inline_request(struct nvme_rdma_qpair *rqpair,
				      struct spdk_nvme_rdma_req *rdma_req)
{
	struct nvme_request *req = rdma_req->req;
	struct spdk_rdma_memory_translation mem_translation;
	void *payload;
	struct nvme_rdma_memory_translation_ctx ctx = {
		.addr = req->payload.contig_or_cb_arg + req->payload_offset,
		.length = req->payload_size
	};
	int rc;

	payload = req->payload.contig_or_cb_arg + req->payload_offset;
	assert(req->payload_size != 0);
	assert(ctx.length != 0);
	assert(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_CONTIG);

	rc = spdk_rdma_get_translation(rqpair->mr_map, payload, req->payload_size, &mem_translation);
	rc = nvme_rdma_get_memory_translation(req, rqpair, &ctx);
	if (spdk_unlikely(rc)) {
		SPDK_ERRLOG("Memory translation failed, rc %d\n", rc);
		return -1;
	}

	rdma_req->send_sgl[1].lkey = spdk_rdma_memory_translation_get_lkey(&mem_translation);
	rdma_req->send_sgl[1].lkey = ctx.lkey;

	/* The first element of this SGL is pointing at an
	 * spdk_nvmf_cmd object. For this particular command,
@@ -1398,8 +1454,8 @@ nvme_rdma_build_contig_inline_request(struct nvme_rdma_qpair *rqpair,
	 * the NVMe command. */
	rdma_req->send_sgl[0].length = sizeof(struct spdk_nvme_cmd);

	rdma_req->send_sgl[1].addr = (uint64_t)payload;
	rdma_req->send_sgl[1].length = (uint32_t)req->payload_size;
	rdma_req->send_sgl[1].addr = (uint64_t)ctx.addr;
	rdma_req->send_sgl[1].length = (uint32_t)ctx.length;

	/* The RDMA SGL contains two elements. The first describes
	 * the NVMe command and the second describes the data
@@ -1409,7 +1465,7 @@ nvme_rdma_build_contig_inline_request(struct nvme_rdma_qpair *rqpair,
	req->cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG;
	req->cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
	req->cmd.dptr.sgl1.unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
	req->cmd.dptr.sgl1.unkeyed.length = (uint32_t)req->payload_size;
	req->cmd.dptr.sgl1.unkeyed.length = (uint32_t)ctx.length;
	/* Inline only supported for icdoff == 0 currently.  This function will
	 * not get called for controllers with other values. */
	req->cmd.dptr.sgl1.address = (uint64_t)0;
@@ -1425,8 +1481,10 @@ nvme_rdma_build_contig_request(struct nvme_rdma_qpair *rqpair,
			       struct spdk_nvme_rdma_req *rdma_req)
{
	struct nvme_request *req = rdma_req->req;
	void *payload = req->payload.contig_or_cb_arg + req->payload_offset;
	struct spdk_rdma_memory_translation mem_translation;
	struct nvme_rdma_memory_translation_ctx ctx = {
		.addr = req->payload.contig_or_cb_arg + req->payload_offset,
		.length = req->payload_size
	};
	int rc;

	assert(req->payload_size != 0);
@@ -1438,13 +1496,12 @@ nvme_rdma_build_contig_request(struct nvme_rdma_qpair *rqpair,
		return -1;
	}

	rc = spdk_rdma_get_translation(rqpair->mr_map, payload, req->payload_size, &mem_translation);
	rc = nvme_rdma_get_memory_translation(req, rqpair, &ctx);
	if (spdk_unlikely(rc)) {
		SPDK_ERRLOG("Memory translation failed, rc %d\n", rc);
		return -1;
	}

	req->cmd.dptr.sgl1.keyed.key = spdk_rdma_memory_translation_get_rkey(&mem_translation);
	req->cmd.dptr.sgl1.keyed.key = ctx.rkey;

	/* The first element of this SGL is pointing at an
	 * spdk_nvmf_cmd object. For this particular command,
@@ -1458,8 +1515,8 @@ nvme_rdma_build_contig_request(struct nvme_rdma_qpair *rqpair,
	req->cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG;
	req->cmd.dptr.sgl1.keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
	req->cmd.dptr.sgl1.keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
	req->cmd.dptr.sgl1.keyed.length = req->payload_size;
	req->cmd.dptr.sgl1.address = (uint64_t)payload;
	req->cmd.dptr.sgl1.keyed.length = (uint32_t)ctx.length;
	req->cmd.dptr.sgl1.address = (uint64_t)ctx.addr;

	return 0;
}
@@ -1473,8 +1530,7 @@ nvme_rdma_build_sgl_request(struct nvme_rdma_qpair *rqpair,
{
	struct nvme_request *req = rdma_req->req;
	struct spdk_nvmf_cmd *cmd = &rqpair->cmds[rdma_req->id];
	struct spdk_rdma_memory_translation mem_translation;
	void *virt_addr;
	struct nvme_rdma_memory_translation_ctx ctx;
	uint32_t remaining_size;
	uint32_t sge_length;
	int rc, max_num_sgl, num_sgl_desc;
@@ -1490,7 +1546,7 @@ nvme_rdma_build_sgl_request(struct nvme_rdma_qpair *rqpair,
	remaining_size = req->payload_size;
	num_sgl_desc = 0;
	do {
		rc = req->payload.next_sge_fn(req->payload.contig_or_cb_arg, &virt_addr, &sge_length);
		rc = req->payload.next_sge_fn(req->payload.contig_or_cb_arg, &ctx.addr, &sge_length);
		if (rc) {
			return -1;
		}
@@ -1502,19 +1558,19 @@ nvme_rdma_build_sgl_request(struct nvme_rdma_qpair *rqpair,
				    sge_length, NVME_RDMA_MAX_KEYED_SGL_LENGTH);
			return -1;
		}
		rc = spdk_rdma_get_translation(rqpair->mr_map, virt_addr, sge_length, &mem_translation);
		ctx.length = sge_length;
		rc = nvme_rdma_get_memory_translation(req, rqpair, &ctx);
		if (spdk_unlikely(rc)) {
			SPDK_ERRLOG("Memory translation failed, rc %d\n", rc);
			return -1;
		}

		cmd->sgl[num_sgl_desc].keyed.key = spdk_rdma_memory_translation_get_rkey(&mem_translation);
		cmd->sgl[num_sgl_desc].keyed.key = ctx.rkey;
		cmd->sgl[num_sgl_desc].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
		cmd->sgl[num_sgl_desc].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
		cmd->sgl[num_sgl_desc].keyed.length = sge_length;
		cmd->sgl[num_sgl_desc].address = (uint64_t)virt_addr;
		cmd->sgl[num_sgl_desc].keyed.length = (uint32_t)ctx.length;
		cmd->sgl[num_sgl_desc].address = (uint64_t)ctx.addr;

		remaining_size -= sge_length;
		remaining_size -= ctx.length;
		num_sgl_desc++;
	} while (remaining_size > 0 && num_sgl_desc < max_num_sgl);

@@ -1577,9 +1633,8 @@ nvme_rdma_build_sgl_inline_request(struct nvme_rdma_qpair *rqpair,
				   struct spdk_nvme_rdma_req *rdma_req)
{
	struct nvme_request *req = rdma_req->req;
	struct spdk_rdma_memory_translation mem_translation;
	struct nvme_rdma_memory_translation_ctx ctx;
	uint32_t length;
	void *virt_addr;
	int rc;

	assert(req->payload_size != 0);
@@ -1588,7 +1643,7 @@ nvme_rdma_build_sgl_inline_request(struct nvme_rdma_qpair *rqpair,
	assert(req->payload.next_sge_fn != NULL);
	req->payload.reset_sgl_fn(req->payload.contig_or_cb_arg, req->payload_offset);

	rc = req->payload.next_sge_fn(req->payload.contig_or_cb_arg, &virt_addr, &length);
	rc = req->payload.next_sge_fn(req->payload.contig_or_cb_arg, &ctx.addr, &length);
	if (rc) {
		return -1;
	}
@@ -1602,15 +1657,15 @@ nvme_rdma_build_sgl_inline_request(struct nvme_rdma_qpair *rqpair,
		length = req->payload_size;
	}

	rc = spdk_rdma_get_translation(rqpair->mr_map, virt_addr, length, &mem_translation);
	ctx.length = length;
	rc = nvme_rdma_get_memory_translation(req, rqpair, &ctx);
	if (spdk_unlikely(rc)) {
		SPDK_ERRLOG("Memory translation failed, rc %d\n", rc);
		return -1;
	}

	rdma_req->send_sgl[1].addr = (uint64_t)virt_addr;
	rdma_req->send_sgl[1].length = length;
	rdma_req->send_sgl[1].lkey = spdk_rdma_memory_translation_get_lkey(&mem_translation);
	rdma_req->send_sgl[1].addr = (uint64_t)ctx.addr;
	rdma_req->send_sgl[1].length = (uint32_t)ctx.length;
	rdma_req->send_sgl[1].lkey = ctx.lkey;

	rdma_req->send_wr.num_sge = 2;

@@ -1623,7 +1678,7 @@ nvme_rdma_build_sgl_inline_request(struct nvme_rdma_qpair *rqpair,
	req->cmd.psdt = SPDK_NVME_PSDT_SGL_MPTR_CONTIG;
	req->cmd.dptr.sgl1.unkeyed.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
	req->cmd.dptr.sgl1.unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
	req->cmd.dptr.sgl1.unkeyed.length = (uint32_t)req->payload_size;
	req->cmd.dptr.sgl1.unkeyed.length = (uint32_t)ctx.length;
	/* Inline only supported for icdoff == 0 currently.  This function will
	 * not get called for controllers with other values. */
	req->cmd.dptr.sgl1.address = (uint64_t)0;
Loading