Commit fafb7d47 authored by Konrad Sztyber's avatar Konrad Sztyber Committed by Jim Harris
Browse files

bdev: enqueue IOs on the memory domain queue only when pushing



The IOs don't need to be put onto the io_memory_domain queue if there's
no need for memory domain push.  This makes push_data consistent with
other memory domain operations (pull_data, pull_md, push_md).

Signed-off-by: default avatarKonrad Sztyber <konrad.sztyber@intel.com>
Change-Id: I85d95f6ce580a15b23f56ab5101e49236f341cb1
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/17763


Reviewed-by: default avatarShuhei Matsumoto <smatsumoto@nvidia.com>
Reviewed-by: default avatarAleksey Marchuk <alexeymar@nvidia.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Community-CI: Mellanox Build Bot
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
parent 6a0d4e5e
Loading
Loading
Loading
Loading
+20 −10
Original line number Diff line number Diff line
@@ -1602,16 +1602,12 @@ bdev_io_push_bounce_md_buf(struct spdk_bdev_io *bdev_io)
	bdev_io->internal.data_transfer_cpl(bdev_io, rc);
}

static void
_bdev_io_push_bounce_data_buffer_done(void *ctx, int rc)
static inline void
bdev_io_push_bounce_data_buffer_done(void *ctx, int rc)
{
	struct spdk_bdev_io *bdev_io = ctx;
	struct spdk_bdev_channel *ch = bdev_io->internal.ch;

	assert(bdev_io->internal.data_transfer_cpl);
	TAILQ_REMOVE(&ch->io_memory_domain, bdev_io, internal.link);
	bdev_io_decrement_outstanding(ch, ch->shared_resource);

	if (rc) {
		bdev_io->internal.data_transfer_cpl(bdev_io, rc);
		return;
@@ -1627,6 +1623,18 @@ _bdev_io_push_bounce_data_buffer_done(void *ctx, int rc)
	bdev_io_push_bounce_md_buf(bdev_io);
}

static void
_bdev_io_push_bounce_data_buffer_done(void *ctx, int status)
{
	struct spdk_bdev_io *bdev_io = ctx;
	struct spdk_bdev_channel *ch = bdev_io->internal.ch;

	TAILQ_REMOVE(&ch->io_memory_domain, bdev_io, internal.link);
	bdev_io_decrement_outstanding(ch, ch->shared_resource);

	bdev_io_push_bounce_data_buffer_done(ctx, status);
}

static inline void
bdev_io_push_bounce_data(struct spdk_bdev_io *bdev_io)
{
@@ -1634,12 +1642,11 @@ bdev_io_push_bounce_data(struct spdk_bdev_io *bdev_io)
	int rc = 0;

	assert(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
	TAILQ_INSERT_TAIL(&ch->io_memory_domain, bdev_io, internal.link);
	bdev_io_increment_outstanding(ch, ch->shared_resource);

	/* if this is read path, copy data from bounce buffer to original buffer */
	if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) {
		if (bdev_io_use_memory_domain(bdev_io)) {
			TAILQ_INSERT_TAIL(&ch->io_memory_domain, bdev_io, internal.link);
			bdev_io_increment_outstanding(ch, ch->shared_resource);
			/* If memory domain is used then we need to call async push function */
			rc = spdk_memory_domain_push_data(bdev_io->internal.memory_domain,
							  bdev_io->internal.memory_domain_ctx,
@@ -1652,6 +1659,9 @@ bdev_io_push_bounce_data(struct spdk_bdev_io *bdev_io)
				/* Continue IO completion in async callback */
				return;
			}

			TAILQ_REMOVE(&ch->io_memory_domain, bdev_io, internal.link);
			bdev_io_decrement_outstanding(ch, ch->shared_resource);
			SPDK_ERRLOG("Failed to push data to memory domain %s\n",
				    spdk_memory_domain_get_dma_device_id(bdev_io->internal.memory_domain));
		} else {
@@ -1662,7 +1672,7 @@ bdev_io_push_bounce_data(struct spdk_bdev_io *bdev_io)
		}
	}

	_bdev_io_push_bounce_data_buffer_done(bdev_io, rc);
	bdev_io_push_bounce_data_buffer_done(bdev_io, rc);
}

static inline void