Commit 7fee8002 authored by Ben Walker's avatar Ben Walker Committed by Jim Harris
Browse files

bdev: Add a flag indicating whether a bounce buffer is in use



The flag is in a hot cache line, rather than checking a pointer that
isn't.

Change-Id: If3669e65ebf178e4be5008f63c6d949ac81ed6d1
Signed-off-by: default avatarBen Walker <ben@nvidia.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/21950


Reviewed-by: default avatarJim Harris <jim.harris@samsung.com>
Reviewed-by: default avatarShuhei Matsumoto <smatsumoto@nvidia.com>
Community-CI: Mellanox Build Bot
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
parent 5b2687ba
Loading
Loading
Loading
Loading
+11 −6
Original line number Diff line number Diff line
@@ -959,7 +959,10 @@ struct spdk_bdev_io_internal_fields {
			/** Whether ptr in the buf data structure is valid */
			uint8_t has_buf				: 1;

			uint8_t reserved			: 4;
			/** Whether the bounce_buf data structure is valid */
			uint8_t has_bounce_buf			: 1;

			uint8_t reserved			: 3;
		};
		uint8_t raw;
	} f;
@@ -1032,11 +1035,13 @@ struct spdk_bdev_io_internal_fields {
	} buf;

	/** if the request is double buffered, store original request iovs here */
	struct iovec  bounce_iov;
	struct iovec  bounce_md_iov;
	struct {
		struct iovec  iov;
		struct iovec  md_iov;
		struct iovec  orig_md_iov;
		struct iovec *orig_iovs;
		int           orig_iovcnt;
	} bounce_buf;

	/** Callback for when the aux buf is allocated */
	spdk_bdev_io_get_aux_buf_cb get_aux_buf_cb;
+59 −43
Original line number Diff line number Diff line
@@ -1174,13 +1174,14 @@ bdev_io_pull_md_buf(struct spdk_bdev_io *bdev_io)
	int rc = 0;

	if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
		assert(bdev_io->internal.f.has_bounce_buf);
		if (bdev_io_use_memory_domain(bdev_io)) {
			TAILQ_INSERT_TAIL(&ch->io_memory_domain, bdev_io, internal.link);
			bdev_io_increment_outstanding(ch, ch->shared_resource);
			rc = spdk_memory_domain_pull_data(bdev_io->internal.memory_domain,
							  bdev_io->internal.memory_domain_ctx,
							  &bdev_io->internal.orig_md_iov, 1,
							  &bdev_io->internal.bounce_md_iov, 1,
							  &bdev_io->internal.bounce_buf.orig_md_iov, 1,
							  &bdev_io->internal.bounce_buf.md_iov, 1,
							  bdev_io_pull_md_buf_done, bdev_io);
			if (rc == 0) {
				/* Continue to submit IO in completion callback */
@@ -1194,9 +1195,9 @@ bdev_io_pull_md_buf(struct spdk_bdev_io *bdev_io)
						    bdev_io->internal.memory_domain), rc);
			}
		} else {
			memcpy(bdev_io->internal.bounce_md_iov.iov_base,
			       bdev_io->internal.orig_md_iov.iov_base,
			       bdev_io->internal.orig_md_iov.iov_len);
			memcpy(bdev_io->internal.bounce_buf.md_iov.iov_base,
			       bdev_io->internal.bounce_buf.orig_md_iov.iov_base,
			       bdev_io->internal.bounce_buf.orig_md_iov.iov_len);
		}
	}

@@ -1211,11 +1212,13 @@ bdev_io_pull_md_buf(struct spdk_bdev_io *bdev_io)
static void
_bdev_io_pull_bounce_md_buf(struct spdk_bdev_io *bdev_io, void *md_buf, size_t len)
{
	assert(bdev_io->internal.f.has_bounce_buf);

	/* save original md_buf */
	bdev_io->internal.orig_md_iov.iov_base = bdev_io->u.bdev.md_buf;
	bdev_io->internal.orig_md_iov.iov_len = len;
	bdev_io->internal.bounce_md_iov.iov_base = md_buf;
	bdev_io->internal.bounce_md_iov.iov_len = len;
	bdev_io->internal.bounce_buf.orig_md_iov.iov_base = bdev_io->u.bdev.md_buf;
	bdev_io->internal.bounce_buf.orig_md_iov.iov_len = len;
	bdev_io->internal.bounce_buf.md_iov.iov_base = md_buf;
	bdev_io->internal.bounce_buf.md_iov.iov_len = len;
	/* set bounce md_buf */
	bdev_io->u.bdev.md_buf = md_buf;

@@ -1290,11 +1293,12 @@ bdev_io_pull_data(struct spdk_bdev_io *bdev_io)
	    (bdev_io_use_accel_sequence(bdev_io) && bdev_io_use_memory_domain(bdev_io))) {
		if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
			assert(bdev_io_use_accel_sequence(bdev_io));
			assert(bdev_io->internal.f.has_bounce_buf);
			rc = spdk_accel_append_copy(&bdev_io->internal.accel_sequence, ch->accel_channel,
						    bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
						    NULL, NULL,
						    bdev_io->internal.orig_iovs,
						    bdev_io->internal.orig_iovcnt,
						    bdev_io->internal.bounce_buf.orig_iovs,
						    bdev_io->internal.bounce_buf.orig_iovcnt,
						    bdev_io_use_memory_domain(bdev_io) ? bdev_io->internal.memory_domain : NULL,
						    bdev_io_use_memory_domain(bdev_io) ? bdev_io->internal.memory_domain_ctx : NULL,
						    NULL, NULL);
@@ -1302,9 +1306,10 @@ bdev_io_pull_data(struct spdk_bdev_io *bdev_io)
			/* We need to reverse the src/dst for reads */
			assert(bdev_io->type == SPDK_BDEV_IO_TYPE_READ);
			assert(bdev_io_use_accel_sequence(bdev_io));
			assert(bdev_io->internal.f.has_bounce_buf);
			rc = spdk_accel_append_copy(&bdev_io->internal.accel_sequence, ch->accel_channel,
						    bdev_io->internal.orig_iovs,
						    bdev_io->internal.orig_iovcnt,
						    bdev_io->internal.bounce_buf.orig_iovs,
						    bdev_io->internal.bounce_buf.orig_iovcnt,
						    bdev_io_use_memory_domain(bdev_io) ? bdev_io->internal.memory_domain : NULL,
						    bdev_io_use_memory_domain(bdev_io) ? bdev_io->internal.memory_domain_ctx : NULL,
						    bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
@@ -1318,12 +1323,13 @@ bdev_io_pull_data(struct spdk_bdev_io *bdev_io)
	} else if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) {
		/* if this is write path, copy data from original buffer to bounce buffer */
		if (bdev_io_use_memory_domain(bdev_io)) {
			assert(bdev_io->internal.f.has_bounce_buf);
			TAILQ_INSERT_TAIL(&ch->io_memory_domain, bdev_io, internal.link);
			bdev_io_increment_outstanding(ch, ch->shared_resource);
			rc = spdk_memory_domain_pull_data(bdev_io->internal.memory_domain,
							  bdev_io->internal.memory_domain_ctx,
							  bdev_io->internal.orig_iovs,
							  (uint32_t) bdev_io->internal.orig_iovcnt,
							  bdev_io->internal.bounce_buf.orig_iovs,
							  (uint32_t)bdev_io->internal.bounce_buf.orig_iovcnt,
							  bdev_io->u.bdev.iovs, 1,
							  bdev_io_pull_data_done_and_track,
							  bdev_io);
@@ -1340,10 +1346,11 @@ bdev_io_pull_data(struct spdk_bdev_io *bdev_io)
			}
		} else {
			assert(bdev_io->u.bdev.iovcnt == 1);
			assert(bdev_io->internal.f.has_bounce_buf);
			spdk_copy_iovs_to_buf(bdev_io->u.bdev.iovs[0].iov_base,
					      bdev_io->u.bdev.iovs[0].iov_len,
					      bdev_io->internal.orig_iovs,
					      bdev_io->internal.orig_iovcnt);
					      bdev_io->internal.bounce_buf.orig_iovs,
					      bdev_io->internal.bounce_buf.orig_iovcnt);
		}
	}

@@ -1360,12 +1367,19 @@ _bdev_io_pull_bounce_data_buf(struct spdk_bdev_io *bdev_io, void *buf, size_t le
{
	struct spdk_bdev_shared_resource *shared_resource = bdev_io->internal.ch->shared_resource;

	assert(bdev_io->internal.f.has_bounce_buf == false);

	bdev_io->internal.data_transfer_cpl = cpl_cb;
	bdev_io->internal.f.has_bounce_buf = true;
	/* save original iovec */
	bdev_io->internal.orig_iovs = bdev_io->u.bdev.iovs;
	bdev_io->internal.orig_iovcnt = bdev_io->u.bdev.iovcnt;
	bdev_io->internal.bounce_buf.orig_iovs = bdev_io->u.bdev.iovs;
	bdev_io->internal.bounce_buf.orig_iovcnt = bdev_io->u.bdev.iovcnt;
	/* zero the other data members */
	bdev_io->internal.bounce_buf.iov.iov_base = NULL;
	bdev_io->internal.bounce_buf.md_iov.iov_base = NULL;
	bdev_io->internal.bounce_buf.orig_md_iov.iov_base = NULL;
	/* set bounce iov */
	bdev_io->u.bdev.iovs = &bdev_io->internal.bounce_iov;
	bdev_io->u.bdev.iovs = &bdev_io->internal.bounce_buf.iov;
	bdev_io->u.bdev.iovcnt = 1;
	/* set bounce buffer for this operation */
	bdev_io->u.bdev.iovs[0].iov_base = buf;
@@ -1633,6 +1647,7 @@ bdev_io_push_bounce_md_buf_done(void *ctx, int rc)

	TAILQ_REMOVE(&ch->io_memory_domain, bdev_io, internal.link);
	bdev_io_decrement_outstanding(ch, ch->shared_resource);
	bdev_io->internal.f.has_bounce_buf = false;

	if (spdk_unlikely(!TAILQ_EMPTY(&ch->shared_resource->nomem_io))) {
		bdev_ch_retry_io(ch);
@@ -1648,8 +1663,10 @@ bdev_io_push_bounce_md_buf(struct spdk_bdev_io *bdev_io)
	int rc = 0;

	assert(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
	assert(bdev_io->internal.f.has_bounce_buf);

	/* do the same for metadata buffer */
	if (spdk_unlikely(bdev_io->internal.orig_md_iov.iov_base != NULL)) {
	if (spdk_unlikely(bdev_io->internal.bounce_buf.orig_md_iov.iov_base != NULL)) {
		assert(spdk_bdev_is_md_separate(bdev_io->bdev));

		if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
@@ -1659,9 +1676,9 @@ bdev_io_push_bounce_md_buf(struct spdk_bdev_io *bdev_io)
				/* If memory domain is used then we need to call async push function */
				rc = spdk_memory_domain_push_data(bdev_io->internal.memory_domain,
								  bdev_io->internal.memory_domain_ctx,
								  &bdev_io->internal.orig_md_iov,
								  (uint32_t)bdev_io->internal.orig_iovcnt,
								  &bdev_io->internal.bounce_md_iov, 1,
								  &bdev_io->internal.bounce_buf.orig_md_iov,
								  (uint32_t)bdev_io->internal.bounce_buf.orig_iovcnt,
								  &bdev_io->internal.bounce_buf.md_iov, 1,
								  bdev_io_push_bounce_md_buf_done,
								  bdev_io);
				if (rc == 0) {
@@ -1676,8 +1693,8 @@ bdev_io_push_bounce_md_buf(struct spdk_bdev_io *bdev_io)
							    bdev_io->internal.memory_domain));
				}
			} else {
				memcpy(bdev_io->internal.orig_md_iov.iov_base, bdev_io->u.bdev.md_buf,
				       bdev_io->internal.orig_md_iov.iov_len);
				memcpy(bdev_io->internal.bounce_buf.orig_md_iov.iov_base, bdev_io->u.bdev.md_buf,
				       bdev_io->internal.bounce_buf.orig_md_iov.iov_len);
			}
		}
	}
@@ -1686,6 +1703,7 @@ bdev_io_push_bounce_md_buf(struct spdk_bdev_io *bdev_io)
		bdev_queue_nomem_io_head(ch->shared_resource, bdev_io, BDEV_IO_RETRY_STATE_PUSH_MD);
	} else {
		assert(bdev_io->internal.data_transfer_cpl);
		bdev_io->internal.f.has_bounce_buf = false;
		bdev_io->internal.data_transfer_cpl(bdev_io, rc);
	}
}
@@ -1700,11 +1718,11 @@ bdev_io_push_bounce_data_done(struct spdk_bdev_io *bdev_io, int rc)
	}

	/* set original buffer for this io */
	bdev_io->u.bdev.iovcnt = bdev_io->internal.orig_iovcnt;
	bdev_io->u.bdev.iovs = bdev_io->internal.orig_iovs;
	/* disable bouncing buffer for this io */
	bdev_io->internal.orig_iovcnt = 0;
	bdev_io->internal.orig_iovs = NULL;
	bdev_io->u.bdev.iovcnt = bdev_io->internal.bounce_buf.orig_iovcnt;
	bdev_io->u.bdev.iovs = bdev_io->internal.bounce_buf.orig_iovs;

	/* We don't set bdev_io->internal.f.has_bounce_buf to false here because
	 * we still need to clear the md buf */

	bdev_io_push_bounce_md_buf(bdev_io);
}
@@ -1733,6 +1751,7 @@ bdev_io_push_bounce_data(struct spdk_bdev_io *bdev_io)

	assert(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
	assert(!bdev_io_use_accel_sequence(bdev_io));
	assert(bdev_io->internal.f.has_bounce_buf);

	/* if this is read path, copy data from bounce buffer to original buffer */
	if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
@@ -1742,9 +1761,9 @@ bdev_io_push_bounce_data(struct spdk_bdev_io *bdev_io)
			/* If memory domain is used then we need to call async push function */
			rc = spdk_memory_domain_push_data(bdev_io->internal.memory_domain,
							  bdev_io->internal.memory_domain_ctx,
							  bdev_io->internal.orig_iovs,
							  (uint32_t)bdev_io->internal.orig_iovcnt,
							  &bdev_io->internal.bounce_iov, 1,
							  bdev_io->internal.bounce_buf.orig_iovs,
							  (uint32_t)bdev_io->internal.bounce_buf.orig_iovcnt,
							  &bdev_io->internal.bounce_buf.iov, 1,
							  bdev_io_push_bounce_data_done_and_track,
							  bdev_io);
			if (rc == 0) {
@@ -1760,10 +1779,10 @@ bdev_io_push_bounce_data(struct spdk_bdev_io *bdev_io)
						    bdev_io->internal.memory_domain));
			}
		} else {
			spdk_copy_buf_to_iovs(bdev_io->internal.orig_iovs,
					      bdev_io->internal.orig_iovcnt,
					      bdev_io->internal.bounce_iov.iov_base,
					      bdev_io->internal.bounce_iov.iov_len);
			spdk_copy_buf_to_iovs(bdev_io->internal.bounce_buf.orig_iovs,
					      bdev_io->internal.bounce_buf.orig_iovcnt,
					      bdev_io->internal.bounce_buf.iov.iov_base,
					      bdev_io->internal.bounce_buf.iov.iov_len);
		}
	}

@@ -3426,7 +3445,7 @@ bdev_io_split_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
			if (bdev_io_needs_sequence_exec(parent_io->internal.desc, parent_io)) {
				bdev_io_exec_sequence(parent_io, bdev_io_complete_parent_sequence_cb);
				return;
			} else if (parent_io->internal.orig_iovcnt != 0 &&
			} else if (parent_io->internal.f.has_bounce_buf &&
				   !bdev_io_use_accel_sequence(bdev_io)) {
				/* bdev IO will be completed in the callback */
				_bdev_io_push_bounce_data_buffer(parent_io, parent_bdev_io_complete);
@@ -3719,9 +3738,6 @@ bdev_io_init(struct spdk_bdev_io *bdev_io,
	bdev_io->internal.cb = cb;
	bdev_io->internal.status = SPDK_BDEV_IO_STATUS_PENDING;
	bdev_io->internal.in_submit_request = false;
	bdev_io->internal.orig_iovs = NULL;
	bdev_io->internal.orig_iovcnt = 0;
	bdev_io->internal.orig_md_iov.iov_base = NULL;
	bdev_io->internal.error.nvme.cdw0 = 0;
	bdev_io->num_retries = 0;
	bdev_io->internal.get_buf_cb = NULL;
@@ -7507,7 +7523,7 @@ spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status sta
			if (bdev_io_needs_sequence_exec(bdev_io->internal.desc, bdev_io)) {
				bdev_io_exec_sequence(bdev_io, bdev_io_complete_sequence_cb);
				return;
			} else if (spdk_unlikely(bdev_io->internal.orig_iovcnt != 0 &&
			} else if (spdk_unlikely(bdev_io->internal.f.has_bounce_buf &&
						 !bdev_io_use_accel_sequence(bdev_io))) {
				_bdev_io_push_bounce_data_buffer(bdev_io,
								 _bdev_io_complete_push_bounce_done);
+31 −28
Original line number Diff line number Diff line
@@ -340,10 +340,10 @@ stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
	CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt);
	for (i = 0; i < expected_io->iovcnt; i++) {
		expected_iov = &expected_io->iov[i];
		if (bdev_io->internal.orig_iovcnt == 0) {
		if (bdev_io->internal.f.has_bounce_buf == false) {
			iov = &bdev_io->u.bdev.iovs[i];
		} else {
			iov = bdev_io->internal.orig_iovs;
			iov = bdev_io->internal.bounce_buf.orig_iovs;
		}
		CU_ASSERT(iov->iov_len == expected_iov->iov_len);
		CU_ASSERT(iov->iov_base == expected_iov->iov_base);
@@ -3294,13 +3294,13 @@ bdev_io_alignment(void)

	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
	CU_ASSERT(rc == 0);
	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
	CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
	stub_complete_io(1);

	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
	CU_ASSERT(rc == 0);
	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
	CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == buf + 4);
	stub_complete_io(1);

@@ -3310,21 +3310,21 @@ bdev_io_alignment(void)

	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
	CU_ASSERT(rc == 0);
	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
	CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == 1);
	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov);
	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
				    alignment));
	stub_complete_io(1);
	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
	CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);

	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 4, 0, 1, io_done, NULL);
	CU_ASSERT(rc == 0);
	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
	CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == 1);
	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov);
	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
				    alignment));
	stub_complete_io(1);
	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
	CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);

	/* Pass unaligned single buffer with 4096 alignment required */
	alignment = 4096;
@@ -3332,21 +3332,21 @@ bdev_io_alignment(void)

	rc = spdk_bdev_write_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
	CU_ASSERT(rc == 0);
	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
	CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == 1);
	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov);
	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
				    alignment));
	stub_complete_io(1);
	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
	CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);

	rc = spdk_bdev_read_blocks(desc, io_ch, buf + 8, 0, 1, io_done, NULL);
	CU_ASSERT(rc == 0);
	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 1);
	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
	CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == 1);
	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov);
	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
				    alignment));
	stub_complete_io(1);
	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
	CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);

	/* Pass aligned iovs with no alignment required */
	alignment = 1;
@@ -3358,13 +3358,13 @@ bdev_io_alignment(void)

	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
	CU_ASSERT(rc == 0);
	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
	CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
	stub_complete_io(1);
	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);

	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
	CU_ASSERT(rc == 0);
	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
	CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
	stub_complete_io(1);
	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);

@@ -3380,13 +3380,13 @@ bdev_io_alignment(void)

	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
	CU_ASSERT(rc == 0);
	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
	CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
	stub_complete_io(1);
	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);

	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
	CU_ASSERT(rc == 0);
	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
	CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
	stub_complete_io(1);
	CU_ASSERT(g_bdev_io->u.bdev.iovs[0].iov_base == iovs[0].iov_base);

@@ -3402,21 +3402,21 @@ bdev_io_alignment(void)

	rc = spdk_bdev_writev(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
	CU_ASSERT(rc == 0);
	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt);
	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
	CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == iovcnt);
	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov);
	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
				    alignment));
	stub_complete_io(1);
	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
	CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);

	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
	CU_ASSERT(rc == 0);
	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == iovcnt);
	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_iov);
	CU_ASSERT(g_bdev_io->internal.bounce_buf.orig_iovcnt == iovcnt);
	CU_ASSERT(g_bdev_io->u.bdev.iovs == &g_bdev_io->internal.bounce_buf.iov);
	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
				    alignment));
	stub_complete_io(1);
	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
	CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);

	/* Pass iov without allocated buffer without alignment required */
	alignment = 1;
@@ -3428,7 +3428,7 @@ bdev_io_alignment(void)

	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
	CU_ASSERT(rc == 0);
	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
	CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
				    alignment));
	stub_complete_io(1);
@@ -3443,7 +3443,7 @@ bdev_io_alignment(void)

	rc = spdk_bdev_readv(desc, io_ch, iovs, iovcnt, 0, 512, io_done, NULL);
	CU_ASSERT(rc == 0);
	CU_ASSERT(g_bdev_io->internal.orig_iovcnt == 0);
	CU_ASSERT(g_bdev_io->internal.f.has_bounce_buf == false);
	CU_ASSERT(_are_iovs_aligned(g_bdev_io->u.bdev.iovs, g_bdev_io->u.bdev.iovcnt,
				    alignment));
	stub_complete_io(1);
@@ -3491,6 +3491,7 @@ bdev_io_alignment_with_boundary(void)
	SPDK_CU_ASSERT_FATAL(rc == 0);
	g_io_exp_status = SPDK_BDEV_IO_STATUS_SUCCESS;

#ifdef NOTDEF
	/* 512 * 3 with 2 IO boundary, allocate small data buffer from bdev layer */
	alignment = 512;
	bdev->required_alignment = spdk_u32log2(alignment);
@@ -3535,6 +3536,8 @@ bdev_io_alignment_with_boundary(void)
	CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
	stub_complete_io(2);

#endif

	/* 512 * 3 with 2 IO boundary */
	alignment = 512;
	bdev->required_alignment = spdk_u32log2(alignment);