Commit 32e69f29 authored by paul luse's avatar paul luse Committed by Jim Harris
Browse files

bdev/raid: remove randomness from test_multi_raid_with_io()



No additional value in randomizing read vs write, which raid and
which channel. Still do read and write but fix the other values.

Change-Id: I5a4f023731119230d3eb49ae28421819144b90bc
Signed-off-by: default avatarpaul luse <paul.e.luse@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/454509


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
parent c009c2eb
Loading
Loading
Loading
Loading
+24 −25
Original line number Diff line number Diff line
@@ -2139,7 +2139,7 @@ test_multi_raid_no_io(void)
	reset_globals();
}

/* Create multiple raids, fire IOs randomly on various raids */
/* Create multiple raids, fire IOs on raids */
static void
test_multi_raid_with_io(void)
{
@@ -2154,11 +2154,8 @@ test_multi_raid_with_io(void)
	struct raid_bdev_io_channel *ch_ctx;
	struct spdk_bdev_io *bdev_io;
	uint64_t io_len;
	uint64_t lba;
	struct spdk_io_channel *ch_random;
	struct raid_bdev_io_channel *ch_ctx_random;
	uint64_t lba = 0;
	int16_t iotype;
	uint32_t raid_random;

	set_globals();
	construct_req = calloc(g_max_raids, sizeof(struct rpc_construct_raid_bdev));
@@ -2196,28 +2193,30 @@ test_multi_raid_with_io(void)
		}
	}

	lba = 0;
	/* This will perform a write on the first raid and a read on the second. It can be
	 * expanded in the future to perform r/w on each raid device in the event that
	 * multiple raid levels are supported.
	 */
	for (i = 0; i < g_max_raids; i++) {
		bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
		SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
	io_len = (rand() % g_strip_size) + 1;
	iotype = (rand() % 2) ? SPDK_BDEV_IO_TYPE_WRITE : SPDK_BDEV_IO_TYPE_READ;
		io_len = g_strip_size;
		iotype = (i) ? SPDK_BDEV_IO_TYPE_WRITE : SPDK_BDEV_IO_TYPE_READ;
		memset(g_io_output, 0, (g_max_io_size / g_strip_size) + 1 * sizeof(struct io_output));
		g_io_output_index = 0;
	raid_random = rand() % g_max_raids;
	ch_random = &ch[raid_random];
	ch_ctx_random = spdk_io_channel_get_ctx(ch_random);
		TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {
		if (strcmp(pbdev->bdev.name, construct_req[raid_random].name) == 0) {
			if (strcmp(pbdev->bdev.name, construct_req[i].name) == 0) {
				break;
			}
		}
		bdev_io_initialize(bdev_io, &pbdev->bdev, lba, io_len, iotype);
		CU_ASSERT(pbdev != NULL);
	raid_bdev_submit_request(ch_random, bdev_io);
	verify_io(bdev_io, g_max_base_drives, ch_ctx_random, pbdev,
		raid_bdev_submit_request(ch, bdev_io);
		verify_io(bdev_io, g_max_base_drives, ch_ctx, pbdev,
			  g_child_io_status_flag);
		bdev_io_cleanup(bdev_io);
		free(bdev_io);
	}

	for (i = 0; i < g_max_raids; i++) {
		TAILQ_FOREACH(pbdev, &g_raid_bdev_list, global_link) {