Commit 29f86a26 authored by Shuhei Matsumoto's avatar Shuhei Matsumoto Committed by Tomasz Zawadzki
Browse files

lib/bdev: spdk_bdev_abort supports queued I/O due to buffer allocation



Buffer allocation is done after redirection to the QoS thread.
Hence add a new helper function bdev_abort_queued_io() and add
its call to bdev_io_do_submit() for both buf_need_small and
buf_need_large.

For zcopy API, buffer allocation is done before buffer allocation
but the caller can get bdev I/O object, and can abort the I/O
directly if needed.

Signed-off-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Change-Id: I2d6170de5ab2ba4d260df99db3e376c0e2c5ffaf
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/2250


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Mellanox Build Bot
Community-CI: Broadcom CI
Reviewed-by: default avatarAleksey Marchuk <alexeymar@mellanox.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-by: default avatarMichael Haeuptle <michaelhaeuptle@gmail.com>
parent c2ce2f85
Loading
Loading
Loading
Loading
+21 −1
Original line number Diff line number Diff line
@@ -373,6 +373,7 @@ bdev_unlock_lba_range(struct spdk_bdev_desc *desc, struct spdk_io_channel *_ch,
static inline void bdev_io_complete(void *ctx);

static bool bdev_abort_queued_io(bdev_io_tailq_t *queue, struct spdk_bdev_io *bio_to_abort);
static bool bdev_abort_buf_io(bdev_io_stailq_t *queue, struct spdk_bdev_io *bio_to_abort);

void
spdk_bdev_get_opts(struct spdk_bdev_opts *opts)
@@ -1679,9 +1680,12 @@ bdev_io_do_submit(struct spdk_bdev_channel *bdev_ch, struct spdk_bdev_io *bdev_i
	struct spdk_bdev_shared_resource *shared_resource = bdev_ch->shared_resource;

	if (spdk_unlikely(bdev_io->type == SPDK_BDEV_IO_TYPE_ABORT)) {
		struct spdk_bdev_mgmt_channel *mgmt_channel = shared_resource->mgmt_ch;
		struct spdk_bdev_io *bio_to_abort = bdev_io->u.abort.bio_to_abort;

		if (bdev_abort_queued_io(&shared_resource->nomem_io, bio_to_abort)) {
		if (bdev_abort_queued_io(&shared_resource->nomem_io, bio_to_abort) ||
		    bdev_abort_buf_io(&mgmt_channel->need_buf_small, bio_to_abort) ||
		    bdev_abort_buf_io(&mgmt_channel->need_buf_large, bio_to_abort)) {
			_bdev_io_complete_in_submit(bdev_ch, bdev_io,
						    SPDK_BDEV_IO_STATUS_SUCCESS);
			return;
@@ -2676,6 +2680,22 @@ bdev_abort_queued_io(bdev_io_tailq_t *queue, struct spdk_bdev_io *bio_to_abort)
	return false;
}

static bool
bdev_abort_buf_io(bdev_io_stailq_t *queue, struct spdk_bdev_io *bio_to_abort)
{
	struct spdk_bdev_io *bdev_io;

	STAILQ_FOREACH(bdev_io, queue, internal.buf_link) {
		if (bdev_io == bio_to_abort) {
			STAILQ_REMOVE(queue, bio_to_abort, spdk_bdev_io, internal.buf_link);
			spdk_bdev_io_complete(bio_to_abort, SPDK_BDEV_IO_STATUS_ABORTED);
			return true;
		}
	}

	return false;
}

static void
bdev_qos_channel_destroy(void *cb_arg)
{