Commit 3be29571 authored by Artur Paszkiewicz's avatar Artur Paszkiewicz Committed by Tomasz Zawadzki
Browse files

lib/bdev: fix use after free when locking a range and freeing channel



Lba locking uses a poller to wait for IOs in the locked range to
complete on the channel. The channel may get freed while the poller is
running, so take a channel reference when starting the poller to prevent
it.

Change-Id: I83e92629e1958d1502f47ee927f97d6194c02440
Signed-off-by: default avatarArtur Paszkiewicz <artur.paszkiewicz@solidigm.com>
Reviewed-on: https://review.spdk.io/c/spdk/spdk/+/26492


Community-CI: Mellanox Build Bot
Reviewed-by: default avatarShuhei Matsumoto <smatsumoto@nvidia.com>
Tested-by: default avatarSPDK Automated Test System <spdkbot@gmail.com>
Reviewed-by: default avatarBen Walker <ben@nvidia.com>
Reviewed-by: default avatarJim Harris <jim.harris@nvidia.com>
parent 15de0539
Loading
Loading
Loading
Loading
+12 −0
Original line number Diff line number Diff line
@@ -10380,6 +10380,7 @@ struct locked_lba_range_ctx {
	struct lba_range		*current_range;
	struct lba_range		*owner_range;
	struct spdk_poller		*poller;
	struct spdk_io_channel		*ch_ref;
	lock_range_cb			cb_fn;
	void				*cb_arg;
};
@@ -10448,11 +10449,22 @@ bdev_lock_lba_range_check_io(void *_i)
	 */
	TAILQ_FOREACH(bdev_io, &ch->io_submitted, internal.ch_link) {
		if (bdev_io_range_is_locked(bdev_io, range)) {
			if (ctx->ch_ref == NULL) {
				/* Take another reference to ch to prevent it getting freed while
				 * the poller is running. */
				ctx->ch_ref = spdk_get_io_channel(__bdev_to_io_dev(bdev_io->bdev));
				assert(ctx->ch_ref != NULL);
			}
			ctx->poller = SPDK_POLLER_REGISTER(bdev_lock_lba_range_check_io, i, 100);
			return SPDK_POLLER_BUSY;
		}
	}

	if (ctx->ch_ref != NULL) {
		spdk_put_io_channel(ctx->ch_ref);
		ctx->ch_ref = NULL;
	}

	spdk_bdev_for_each_channel_continue(i, 0);
	return SPDK_POLLER_BUSY;
}