Commit 35c6d6ef authored by Yalong Wang's avatar Yalong Wang Committed by Tomasz Zawadzki
Browse files

bdev/raid: allow unmap during raid process



Previously, unmap was blocked during raid1 rebuild,
causing lvol deletion on raidbdev-backed lvstores to fail.
This patch allows unmap requests during rebuild
to ensure proper lvol deletion and capacity reclamation.

Change-Id: I78a595fde1de7f3b040978eb6ff286f0c7e4b271
Signed-off-by: default avatarYalong Wang <yalong9@staff.sina.com.cn>
Reviewed-on: https://review.spdk.io/c/spdk/spdk/+/26598


Tested-by: default avatarSPDK Automated Test System <spdkbot@gmail.com>
Community-CI: Mellanox Build Bot
Reviewed-by: default avatarMateusz Kozlowski <mateusz.kozlowski@solidigm.com>
Reviewed-by: default avatarArtur Paszkiewicz <artur.paszkiewicz@solidigm.com>
Reviewed-by: default avatarChangpeng Liu <changpeliu@tencent.com>
parent 28f448a6
Loading
Loading
Loading
Loading
+30 −9
Original line number Diff line number Diff line
@@ -636,7 +636,21 @@ raid_bdev_io_complete(struct raid_bdev_io *raid_io, enum spdk_bdev_io_status sta
				raid_io->base_bdev_io_submitted = 0;
				raid_io->raid_ch = raid_io->raid_ch->process.ch_processed;

				switch (bdev_io->type) {
				case SPDK_BDEV_IO_TYPE_READ:
				case SPDK_BDEV_IO_TYPE_WRITE:
					raid_io->raid_bdev->module->submit_rw_request(raid_io);
					break;

				case SPDK_BDEV_IO_TYPE_FLUSH:
				case SPDK_BDEV_IO_TYPE_UNMAP:
					raid_io->raid_bdev->module->submit_null_payload_request(raid_io);
					break;
				default:
					SPDK_ERRLOG("io type %u should not happen split\n", bdev_io->type);
					raid_bdev_io_complete(raid_io, SPDK_BDEV_IO_STATUS_FAILED);
					break;
				}
				return;
			}
		}
@@ -838,8 +852,8 @@ raid_bdev_io_split(struct raid_bdev_io *raid_io, uint64_t split_offset)
	}
}

static void
raid_bdev_submit_rw_request(struct raid_bdev_io *raid_io)
static inline void
_raid_bdev_request_split(struct raid_bdev_io *raid_io)
{
	struct raid_bdev_io_channel *raid_ch = raid_io->raid_ch;

@@ -867,10 +881,22 @@ raid_bdev_submit_rw_request(struct raid_bdev_io *raid_io)
			raid_io->raid_ch = raid_ch->process.ch_processed;
		}
	}
}

static void
raid_bdev_submit_rw_request(struct raid_bdev_io *raid_io)
{
	_raid_bdev_request_split(raid_io);
	raid_io->raid_bdev->module->submit_rw_request(raid_io);
}

static void
raid_bdev_submit_null_payload_request(struct raid_bdev_io *raid_io)
{
	_raid_bdev_request_split(raid_io);
	raid_io->raid_bdev->module->submit_null_payload_request(raid_io);
}

/*
 * brief:
 * Callback function to spdk_bdev_io_get_buf.
@@ -965,12 +991,7 @@ raid_bdev_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_i

	case SPDK_BDEV_IO_TYPE_FLUSH:
	case SPDK_BDEV_IO_TYPE_UNMAP:
		if (raid_io->raid_bdev->process != NULL) {
			/* TODO: rebuild support */
			raid_bdev_io_complete(raid_io, SPDK_BDEV_IO_STATUS_FAILED);
			return;
		}
		raid_io->raid_bdev->module->submit_null_payload_request(raid_io);
		raid_bdev_submit_null_payload_request(raid_io);
		break;

	default:
+36 −6
Original line number Diff line number Diff line
@@ -75,20 +75,20 @@ ut_raid_start(struct raid_bdev *raid_bdev)
}

static void
ut_raid_submit_rw_request_defered_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
ut_raid_submit_request_defered_cb(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
{
	struct raid_bdev_io *raid_io = cb_arg;

	raid_bdev_io_complete(raid_io, success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED);
}

static void
ut_raid_submit_rw_request(struct raid_bdev_io *raid_io)
static inline void
ut_raid_submit_request(struct raid_bdev_io *raid_io)
{
	if (g_bdev_io_defer_completion) {
		struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(raid_io);

		bdev_io->internal.cb = ut_raid_submit_rw_request_defered_cb;
		bdev_io->internal.cb = ut_raid_submit_request_defered_cb;
		bdev_io->internal.caller_ctx = raid_io;
		TAILQ_INSERT_TAIL(&g_deferred_ios, bdev_io, internal.link);
		return;
@@ -97,11 +97,16 @@ ut_raid_submit_rw_request(struct raid_bdev_io *raid_io)
			      g_child_io_status_flag ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED);
}

static void
ut_raid_submit_rw_request(struct raid_bdev_io *raid_io)
{
	ut_raid_submit_request(raid_io);
}

static void
ut_raid_submit_null_payload_request(struct raid_bdev_io *raid_io)
{
	raid_bdev_io_complete(raid_io,
			      g_child_io_status_flag ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED);
	ut_raid_submit_request(raid_io);
}

static void
@@ -1739,6 +1744,31 @@ test_raid_io_split(void)

	bdev_io_cleanup(bdev_io);

	/* test split of unmap io */
	bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
	SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
	raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx;
	_bdev_io_initialize(bdev_io, ch, &pbdev->bdev, 0, g_strip_size, SPDK_BDEV_IO_TYPE_UNMAP, 0,
			    0);

	split_offset = 1;
	raid_ch->process.offset = split_offset;
	raid_bdev_submit_request(ch, bdev_io);
	CU_ASSERT(raid_io->num_blocks == g_strip_size - split_offset);
	CU_ASSERT(raid_io->offset_blocks == split_offset);

	complete_deferred_ios();
	CU_ASSERT(raid_io->num_blocks == split_offset);
	CU_ASSERT(raid_io->offset_blocks == 0);

	complete_deferred_ios();
	CU_ASSERT(raid_io->num_blocks == g_strip_size);
	CU_ASSERT(raid_io->offset_blocks == 0);

	CU_ASSERT(g_io_comp_status == g_child_io_status_flag);

	bdev_io_cleanup(bdev_io);

	spdk_put_io_channel(ch);
	free_test_req(&req);
	pbdev->process = NULL;