Commit 0d4a9b1d authored by Tomasz Zawadzki's avatar Tomasz Zawadzki
Browse files

test/lvs_grow: perform lvs grow during I/O



Added test that focuses on performing lvs grow operation
during I/O.

First test case verifies increasing lvs size while bdevperf
is issuing I/O. At the end the lvs is reloaded while
allowing for persist of metadata in a clean way.

Second test case follows the above, but adds
dirty shutdown after changing lvs size.
This additionally verifies blobstore recovery
path for grow operation.

Change-Id: I3d9c37468befbfeaebcec9535f8ffbd78d437223
Signed-off-by: default avatarTomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/19162


Reviewed-by: default avatarShuhei Matsumoto <smatsumoto@nvidia.com>
Reviewed-by: default avatarMichal Berger <michal.berger@intel.com>
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarJim Harris <jim.harris@samsung.com>
parent 52de9a10
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -45,6 +45,7 @@ fi

run_test "nvmf_host_management" $rootdir/test/nvmf/target/host_management.sh "${TEST_ARGS[@]}"
run_test "nvmf_lvol" $rootdir/test/nvmf/target/nvmf_lvol.sh "${TEST_ARGS[@]}"
run_test "nvmf_lvs_grow" $rootdir/test/nvmf/target/nvmf_lvs_grow.sh "${TEST_ARGS[@]}"
run_test "nvmf_bdev_io_wait" $rootdir/test/nvmf/target/bdev_io_wait.sh "${TEST_ARGS[@]}"
run_test "nvmf_queue_depth" $rootdir/test/nvmf/target/queue_depth.sh "${TEST_ARGS[@]}"
run_test "nvmf_multipath" $rootdir/test/nvmf/target/multipath.sh "${TEST_ARGS[@]}"
+102 −0
Original line number Diff line number Diff line
#!/usr/bin/env bash
#  SPDX-License-Identifier: BSD-3-Clause
#  Copyright (C) 2023 Intel Corporation
#  All rights reserved.
#
testdir=$(readlink -f "$(dirname "$0")")
rootdir=$(readlink -f "$testdir/../../..")
source "$rootdir/test/common/autotest_common.sh"
source "$rootdir/test/nvmf/common.sh"

rpc_py="$rootdir/scripts/rpc.py"
bdevperf_rpc_sock="/var/tmp/bdevperf.sock"

lvs_grow() {
	local aio_bdev lvs lvol
	local data_clusters free_clusters
	local bdevperf_pid run_test_pid
	local aio_init_size_mb=200
	local aio_final_size_mb=400
	local lvol_bdev_size_mb=150

	# Create an AIO bdev for the logical volume store
	rm -f "$testdir/aio_bdev"
	truncate -s "${aio_init_size_mb}M" "$testdir/aio_bdev"
	aio_bdev=$($rpc_py bdev_aio_create "$testdir/aio_bdev" aio_bdev 4096)

	# Create the logical volume store on the AIO bdev, with predictable cluster size and remaining md pages for grow
	lvs=$($rpc_py bdev_lvol_create_lvstore --cluster-sz 4194304 --md-pages-per-cluster-ratio 300 $aio_bdev lvs)
	data_clusters=$($rpc_py bdev_lvol_get_lvstores -u $lvs | jq -r '.[0].total_data_clusters')
	((data_clusters == 49))

	# Create a thin provisioned logical volume on the logical volume store
	lvol=$($rpc_py bdev_lvol_create -u $lvs lvol $lvol_bdev_size_mb)

	# Increase the AIO file size, without yet increasing the logical volume store size
	truncate -s "${aio_final_size_mb}M" "$testdir/aio_bdev"
	$rpc_py bdev_aio_rescan $aio_bdev
	((data_clusters == $($rpc_py bdev_lvol_get_lvstores -u $lvs | jq -r '.[0].total_data_clusters')))

	# Create an NVMe-oF subsystem and add the logical volume as a namespace
	$rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode0 -a -s SPDK0
	$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode0 $lvol
	$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode0 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
	$rpc_py nvmf_subsystem_add_listener discovery -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT

	# Start random writes in the background
	$SPDK_EXAMPLE_DIR/bdevperf -r $bdevperf_rpc_sock -m 0x2 -o 4096 -q 128 -w randwrite -t 10 -S 1 -z &
	bdevperf_pid=$!
	trap 'killprocess $bdevperf_pid; nvmftestfini; exit 1' SIGINT SIGTERM EXIT
	waitforlisten $bdevperf_pid $bdevperf_rpc_sock

	$rpc_py -s $bdevperf_rpc_sock bdev_nvme_attach_controller -b Nvme0 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT -f ipv4 -n nqn.2016-06.io.spdk:cnode0
	$rpc_py -s $bdevperf_rpc_sock bdev_get_bdevs -b Nvme0n1 -t 3000

	$rootdir/examples/bdev/bdevperf/bdevperf.py -s $bdevperf_rpc_sock perform_tests &
	run_test_pid=$!
	sleep 2

	# Perform grow operation on the logical volume store
	$rpc_py bdev_lvol_grow_lvstore -u $lvs
	data_clusters=$($rpc_py bdev_lvol_get_lvstores -u $lvs | jq -r '.[0].total_data_clusters')
	((data_clusters == 99))

	# Wait for I/O to complete
	wait $run_test_pid
	killprocess $bdevperf_pid

	$rpc_py nvmf_delete_subsystem nqn.2016-06.io.spdk:cnode0
	free_clusters=$($rpc_py bdev_lvol_get_lvstores -u $lvs | jq -r '.[0].free_clusters')

	if [[ "$1" == "dirty" ]]; then
		# Immediately shutdown nvmf_tgt without a chance to persist metadata after grow
		kill -9 $nvmfpid
		wait $nvmfpid || true
		nvmfappstart -m 0x1
		aio_bdev=$($rpc_py bdev_aio_create "$testdir/aio_bdev" aio_bdev 4096)
		waitforbdev $lvol
		((free_clusters == $($rpc_py bdev_lvol_get_lvstores -u $lvs | jq -r '.[0].free_clusters')))
		((data_clusters == $($rpc_py bdev_lvol_get_lvstores -u $lvs | jq -r '.[0].total_data_clusters')))
	fi

	# Reload the logical volume store, making sure that number of clusters remain unchanged
	$rpc_py bdev_aio_delete $aio_bdev
	NOT $rpc_py bdev_lvol_get_lvstores -u $lvs
	$rpc_py bdev_aio_create "$testdir/aio_bdev" $aio_bdev 4096
	waitforbdev $lvol
	((free_clusters == $($rpc_py bdev_lvol_get_lvstores -u $lvs | jq -r '.[0].free_clusters')))
	((data_clusters == $($rpc_py bdev_lvol_get_lvstores -u $lvs | jq -r '.[0].total_data_clusters')))

	# Clean up
	$rpc_py bdev_lvol_delete $lvol
	$rpc_py bdev_lvol_delete_lvstore -u $lvs
	$rpc_py bdev_aio_delete $aio_bdev
	rm -f "$testdir/aio_bdev"
}

nvmftestinit
nvmfappstart -m 0x1
$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192

run_test "lvs_grow_clean" lvs_grow
run_test "lvs_grow_dirty" lvs_grow dirty