Commit 29e9fdc8 authored by Pawel Kaminski's avatar Pawel Kaminski Committed by Tomasz Zawadzki
Browse files

test/hotremove: Select test cases to be run for scci and blk hotremove.



With this change user can defined test cases that he wants to run.
It could enable to run hotremove test cases per-patch and nightly.

Change-Id: I6876f39886a347ada4dd548a2f45b99a37207c3e
Signed-off-by: default avatarPawel Kaminski <pawelx.kaminski@intel.com>
Signed-off-by: default avatarKarol Latecki <karol.latecki@intel.com>
Signed-off-by: default avatarPawel Kaminski <pawelx.kaminski@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/768


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
parent 32c6f860
Loading
Loading
Loading
Loading
+37 −22
Original line number Diff line number Diff line
@@ -44,11 +44,11 @@ function blk_hotremove_tc1() {
    echo "Blk hotremove test case 1"
    traddr=""
    # 1. Run the command to hot remove NVMe disk.
    get_traddr "Nvme0"
    delete_nvme "Nvme0"
    delete_nvme $hotnvmename
    # 2. If vhost had crashed then tests would stop running
    sleep 1
    add_nvme "HotInNvme0" "$traddr"
    set_hotnvmename
    add_nvme $hotnvmename "$traddr"
    sleep 1
}

@@ -56,7 +56,7 @@ function blk_hotremove_tc1() {
function blk_hotremove_tc2() {
    echo "Blk hotremove test case 2"
    # 1. Use rpc command to create blk controllers.
    $rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 HotInNvme0n1p0
    $rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 "${hotnvmename}n1p0"
    $rpc_py vhost_create_blk_controller naa.Nvme0n1p1.0 Mallocp0
    $rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 Mallocp1
    $rpc_py vhost_create_blk_controller naa.Nvme0n1p3.1 Mallocp2
@@ -72,7 +72,7 @@ function blk_hotremove_tc2() {
    local last_pid=$!
    sleep 3
    # 4. Run the command to hot remove NVMe disk.
    delete_nvme "HotInNvme0"
    delete_nvme $hotnvmename
    local retcode=0
    wait_for_finish $last_pid || retcode=$?
    # 5. Check that fio job run on hot-removed device stopped.
@@ -90,7 +90,8 @@ function blk_hotremove_tc2() {
    check_fio_retcode "Blk hotremove test case 2: Iteration 2." 1 $retcode
    vm_shutdown_all
    vhost_delete_controllers
    add_nvme "HotInNvme1" "$traddr"
    set_hotnvmename
    add_nvme $hotnvmename "$traddr"
    sleep 1
}

@@ -98,9 +99,9 @@ function blk_hotremove_tc2() {
function blk_hotremove_tc3() {
    echo "Blk hotremove test case 3"
    # 1. Use rpc command to create blk controllers.
    $rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 HotInNvme1n1p0
    $rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 "${hotnvmename}n1p0"
    $rpc_py vhost_create_blk_controller naa.Nvme0n1p1.0 Mallocp0
    $rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 HotInNvme1n1p1
    $rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 "${hotnvmename}n1p1"
    $rpc_py vhost_create_blk_controller naa.Nvme0n1p3.1 Mallocp1
    # 2. Run two VMs and attach every VM to two blk controllers.
    vm_run_with_arg "0 1"
@@ -114,7 +115,7 @@ function blk_hotremove_tc3() {
    local last_pid=$!
    sleep 3
    # 4. Run the command to hot remove of first NVMe disk.
    delete_nvme "HotInNvme1"
    delete_nvme $hotnvmename
    local retcode=0
    wait_for_finish $last_pid || retcode=$?
    # 6. Check that fio job run on hot-removed device stopped.
@@ -132,7 +133,8 @@ function blk_hotremove_tc3() {
    check_fio_retcode "Blk hotremove test case 3: Iteration 2." 1 $retcode
    vm_shutdown_all
    vhost_delete_controllers
    add_nvme "HotInNvme2" "$traddr"
    set_hotnvmename
    add_nvme $hotnvmename "$traddr"
    sleep 1
}

@@ -140,9 +142,9 @@ function blk_hotremove_tc3() {
function blk_hotremove_tc4() {
    echo "Blk hotremove test case 4"
    # 1. Use rpc command to create blk controllers.
    $rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 HotInNvme2n1p0
    $rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 "${hotnvmename}n1p0"
    $rpc_py vhost_create_blk_controller naa.Nvme0n1p1.0 Mallocp0
    $rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 HotInNvme2n1p1
    $rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 "${hotnvmename}n1p1"
    $rpc_py vhost_create_blk_controller naa.Nvme0n1p3.1 Mallocp1
    # 2. Run two VM, attached to blk controllers.
    vm_run_with_arg "0 1"
@@ -161,7 +163,7 @@ function blk_hotremove_tc4() {
    sleep 3
    prepare_fio_cmd_tc1 "0 1"
    # 5. Run the command to hot remove of first NVMe disk.
    delete_nvme "HotInNvme2"
    delete_nvme $hotnvmename
    local retcode_vm0=0
    local retcode_vm1=0
    wait_for_finish $last_pid_vm0 || retcode_vm0=$?
@@ -183,7 +185,8 @@ function blk_hotremove_tc4() {

    vm_shutdown_all
    vhost_delete_controllers
    add_nvme "HotInNvme3" "$traddr"
    set_hotnvmename
    add_nvme $hotnvmename "$traddr"
    sleep 1
}

@@ -191,7 +194,7 @@ function blk_hotremove_tc4() {
function blk_hotremove_tc5() {
    echo "Blk hotremove test case 5"
    # 1. Use rpc command to create blk controllers.
    $rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 HotInNvme3n1p0
    $rpc_py vhost_create_blk_controller naa.Nvme0n1p0.0 "${hotnvmename}n1p0"
    $rpc_py vhost_create_blk_controller naa.Nvme0n1p1.0 Mallocp0
    $rpc_py vhost_create_blk_controller naa.Nvme0n1p2.1 Mallocp1
    $rpc_py vhost_create_blk_controller naa.Nvme0n1p3.1 Mallocp2
@@ -205,7 +208,7 @@ function blk_hotremove_tc5() {
    local last_pid=$!
    sleep 3
    # 4. Run the command to hot remove of first NVMe disk.
    delete_nvme "HotInNvme3"
    delete_nvme $hotnvmename
    local retcode=0
    wait_for_finish $last_pid || retcode=$?
    # 5. Check that fio job run on hot-removed device stopped.
@@ -223,13 +226,25 @@ function blk_hotremove_tc5() {
    check_fio_retcode "Blk hotremove test case 5: Iteration 2." 1 $retcode
    vm_shutdown_all
    vhost_delete_controllers
    add_nvme "HotInNvme4" "$traddr"
    set_hotnvmename
    add_nvme $hotnvmename "$traddr"
    sleep 1
}

vms_setup
get_traddr "Nvme0"
if $tc1; then
    blk_hotremove_tc1
fi
if $tc2; then
    blk_hotremove_tc2
fi
if $tc3; then
    blk_hotremove_tc3
fi
if $tc4; then
    blk_hotremove_tc4
fi
if $tc5; then
    blk_hotremove_tc5
fi
+35 −0
Original line number Diff line number Diff line
@@ -16,6 +16,7 @@ x=""
scsi_hot_remove_test=0
blk_hot_remove_test=0
readonly=""
test_cases="all"


function usage() {
@@ -38,6 +39,9 @@ function usage() {
    echo "                          DISKS - VM os test disks/devices path (virtio - optional, kernel_vhost - mandatory)"
    echo "    --scsi-hotremove-test Run scsi hotremove tests"
    echo "    --readonly            Use readonly for fio"
    echo "    --blk-hotremove-test  Run blk hotremove tests."
    echo "    --test-cases=[num]    Run comma separated test cases. Assign all if all test cases should be run"
    echo "                          Default value is all"
    exit 0
}

@@ -53,6 +57,7 @@ while getopts 'xh-:' optchar; do
            scsi-hotremove-test) scsi_hot_remove_test=1 ;;
            blk-hotremove-test) blk_hot_remove_test=1 ;;
            readonly) readonly="--readonly" ;;
            test-cases=*) test_cases="${OPTARG#*=}" ;;
            *) usage $0 "Invalid argument '$OPTARG'" ;;
        esac
        ;;
@@ -64,6 +69,36 @@ while getopts 'xh-:' optchar; do
done
shift $(( OPTIND - 1 ))

if [ ${test_cases} == "all" ]; then
    test_cases="1,2,3,4,5"
fi
tc1=false
tc2=false
tc3=false
tc4=false
tc5=false
IFS=',' read -ra tc <<< "${test_cases}"
for i in "${tc[@]}"; do
    if [ $i == "1" ]; then
        tc1=true
    elif [ $i == "2" ]; then
        tc2=true
    elif [ $i == "3" ]; then
        tc3=true
    elif [ $i == "4" ]; then
        tc4=true
    elif [ $i == "5" ]; then
        tc5=true
    fi
done

hotnvmenumber=0
hotnvmename="Nvme0"
function set_hotnvmename() {
    hotnvmename="HotInNvme${hotnvmenumber}"
    hotnvmenumber=$((hotnvmenumber + 1))
}

fio_job=$testdir/fio_jobs/default_integrity.job
tmp_attach_job=$testdir/fio_jobs/fio_attach.job.tmp
tmp_detach_job=$testdir/fio_jobs/fio_detach.job.tmp
+35 −22
Original line number Diff line number Diff line
@@ -43,23 +43,21 @@ function scsi_hotremove_tc1() {
    traddr=""
    get_traddr "Nvme0"
    # 1. Run the command to hot remove NVMe disk.
    delete_nvme "Nvme0"
    delete_nvme $hotnvmename
    # 2. If vhost had crashed then tests would stop running
    sleep 1
    add_nvme "HotInNvme0" "$traddr"
    set_hotnvmename
    add_nvme $hotnvmename "$traddr"
}

# Test Case 2
function scsi_hotremove_tc2() {
    echo "Scsi hotremove test case 2"
    # 1. Attach split NVMe bdevs to scsi controller.
    $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p0.0 0 HotInNvme0n1p0
    $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p1.0 0 Mallocp0
    $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p2.1 0 HotInNvme0n1p1
    $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p3.1 0 Mallocp1
    $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p0.0 0 "${hotnvmename}n1p0"
    $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p2.1 0 "${hotnvmename}n1p1"

    # 2. Run two VMs, attached to scsi controllers.
    vms_setup
    vm_run_with_arg 0 1
    vms_prepare "0 1"

@@ -74,7 +72,7 @@ function scsi_hotremove_tc2() {
    local last_pid=$!
    sleep 3
    # 4. Run the command to hot remove NVMe disk.
    delete_nvme "HotInNvme0"
    delete_nvme $hotnvmename

    # 5. Check that fio job run on hot-remove device stopped on VM.
    #    Expected: Fio should return error message and return code != 0.
@@ -95,7 +93,8 @@ function scsi_hotremove_tc2() {
    #     Expected: Fio should return error message and return code != 0.
    check_fio_retcode "Scsi hotremove test case 2: Iteration 2." 1 $retcode
    vm_shutdown_all
    add_nvme "HotInNvme1" "$traddr"
    set_hotnvmename
    add_nvme $hotnvmename "$traddr"
    sleep 1
}

@@ -103,7 +102,7 @@ function scsi_hotremove_tc2() {
function scsi_hotremove_tc3() {
    echo "Scsi hotremove test case 3"
    # 1. Attach added NVMe bdev to scsi controller.
    $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p0.0 0 HotInNvme1n1p0
    $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p0.0 0 "${hotnvmename}n1p0"
    # 2. Run two VM, attached to scsi controllers.
    vm_run_with_arg 0 1
    vms_prepare "0 1"
@@ -117,7 +116,7 @@ function scsi_hotremove_tc3() {
    local last_pid=$!
    sleep 3
    # 4. Run the command to hot remove NVMe disk.
    delete_nvme "HotInNvme1"
    delete_nvme $hotnvmename
    # 5. Check that fio job run on hot-remove device stopped on first VM.
    #    Expected: Fio should return error message and return code != 0.
    wait_for_finish $last_pid || retcode=$?
@@ -136,7 +135,8 @@ function scsi_hotremove_tc3() {
    #    Expected: Fio should return error message and return code != 0.
    check_fio_retcode "Scsi hotremove test case 3: Iteration 2." 1 $retcode
    vm_shutdown_all
    add_nvme "HotInNvme2" "$traddr"
    set_hotnvmename
    add_nvme $hotnvmename "$traddr"
    sleep 1
}

@@ -144,8 +144,8 @@ function scsi_hotremove_tc3() {
function scsi_hotremove_tc4() {
    echo "Scsi hotremove test case 4"
    # 1. Attach NVMe bdevs to scsi controllers.
    $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p0.0 0 HotInNvme2n1p0
    $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p2.1 0 HotInNvme2n1p1
    $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p0.0 0 "${hotnvmename}n1p0"
    $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p2.1 0 "${hotnvmename}n1p1"
    # 2. Run two VMs, attach to scsi controller.
    vm_run_with_arg 0 1
    vms_prepare "0 1"
@@ -168,7 +168,7 @@ function scsi_hotremove_tc4() {
    # 5. Run the command to hot remove NVMe disk.
    traddr=""
    get_traddr "Nvme0"
    delete_nvme "HotInNvme2"
    delete_nvme $hotnvmename
    # 6. Check that fio job run on hot-removed devices stopped.
    #    Expected: Fio should return error message and return code != 0.
    local retcode_vm0=0
@@ -205,10 +205,9 @@ function scsi_hotremove_tc4() {
    #     Expected: Fio should return return code == 0.
    check_fio_retcode "Scsi hotremove test case 4: Iteration 4." 0 $retcode
    vm_shutdown_all
    add_nvme "HotInNvme3" "$traddr"
    set_hotnvmename
    add_nvme $hotnvmename "$traddr"
    sleep 1
    $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p1.0 0
    $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p3.1 0
}

function pre_scsi_hotremove_test_case() {
@@ -216,9 +215,13 @@ function pre_scsi_hotremove_test_case() {
    $rpc_py vhost_create_scsi_controller naa.Nvme0n1p1.0
    $rpc_py vhost_create_scsi_controller naa.Nvme0n1p2.1
    $rpc_py vhost_create_scsi_controller naa.Nvme0n1p3.1
    $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p1.0 0 Mallocp0
    $rpc_py vhost_scsi_controller_add_target naa.Nvme0n1p3.1 0 Mallocp1
}

function post_scsi_hotremove_test_case() {
    $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p1.0 0
    $rpc_py vhost_scsi_controller_remove_target naa.Nvme0n1p3.1 0
    $rpc_py vhost_delete_controller naa.Nvme0n1p0.0
    $rpc_py vhost_delete_controller naa.Nvme0n1p1.0
    $rpc_py vhost_delete_controller naa.Nvme0n1p2.1
@@ -226,8 +229,18 @@ function post_scsi_hotremove_test_case() {
}

pre_scsi_hotremove_test_case
vms_setup
if $tc1; then
    scsi_hotremove_tc1
fi
if $tc2; then
    scsi_hotremove_tc2
fi
if $tc3; then
    scsi_hotremove_tc3
fi
if $tc4; then
    scsi_hotremove_tc4
fi
sleep 1
post_scsi_hotremove_test_case
+13 −3
Original line number Diff line number Diff line
@@ -19,6 +19,7 @@ case $1 in
		echo "  -shr|--scsi-hot-remove               for running scsi hot remove tests"
		echo "  -bhr|--blk-hot-remove                for running blk hot remove tests"
		echo "  -h |--help                           prints this message"
		echo "  -tc|--test-cases                     define test cases to run for hotremove test"
		echo ""
		echo "Environment:"
		echo "  VM_IMAGE        path to QCOW2 VM image used during test (default: $HOME/vhost_vm_image.qcow2)"
@@ -50,6 +51,13 @@ DISKS_NUMBER=$(lspci -mm -n | grep 0108 | tr -d '"' | awk -F " " '{print "0000:"

WORKDIR=$(readlink -f $(dirname $0))

test_cases="all"
if [ -n "$2" ]; then
	case $2 in
		-tc=*|-test-cases=*) test_cases="${2#*=}" ;;
	esac
fi

case $1 in
	-hp|--hotplug)
		echo 'Running hotplug tests suite...'
@@ -68,7 +76,8 @@ case $1 in
			--vm=1,$VM_IMAGE,Nvme0n1p2:Nvme0n1p3 \
			--test-type=spdk_vhost_scsi \
			--scsi-hotremove-test \
			--fio-jobs=$WORKDIR/hotplug/fio_jobs/default_integrity.job
			--fio-jobs=$WORKDIR/hotplug/fio_jobs/default_integrity.job \
			--test-cases=$test_cases
		;;
	-bhr|--blk-hot-remove)
		echo 'Running blk hotremove tests suite...'
@@ -77,7 +86,8 @@ case $1 in
			--vm=1,$VM_IMAGE,Nvme0n1p2:Nvme0n1p3 \
			--test-type=spdk_vhost_blk \
			--blk-hotremove-test \
			--fio-jobs=$WORKDIR/hotplug/fio_jobs/default_integrity.job
			--fio-jobs=$WORKDIR/hotplug/fio_jobs/default_integrity.job \
			--test-cases=$test_cases
	;;
	*)
		echo "unknown test type: $1"
+6 −0
Original line number Diff line number Diff line
@@ -94,6 +94,12 @@ if [ $RUN_NIGHTLY -eq 1 ]; then
	echo 'Running migration suite...'
	run_test "vhost_migration" $WORKDIR/migration/migration.sh -x \
	--fio-bin=$FIO_BIN --os=$VM_IMAGE

	echo "Running scsi hotremove test"
	run_test "scsi_hotremove" $WORKDIR/vhost/manual.sh -shr --test-cases=1,2,3,4

	echo "Running blk hotremove test"
	run_test "blk_hotremove" $WORKDIR/vhost/manual.sh -bhr --test-cases=1,2,3,4,5
fi

echo 'Running lvol integrity suite...'