Commit 0616b2a5 authored by Pawel Kaminski's avatar Pawel Kaminski Committed by Jim Harris
Browse files

test/vhost: Preparation for hotremove tests



Code refactoring to accommodate for coming hot-remove tests.

Change-Id: I41cfb04b467bf21a5a991712791ee6bfc87c241c
Signed-off-by: default avatarPawel Kaminski <pawelx.kaminski@intel.com>
Reviewed-on: https://review.gerrithub.io/391957


Tested-by: default avatarSPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: default avatarKarol Latecki <karol.latecki@intel.com>
Reviewed-by: default avatarDaniel Verkamp <daniel.verkamp@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
parent 0eff26b3
Loading
Loading
Loading
Loading
+8 −0
Original line number Diff line number Diff line
@@ -89,7 +89,15 @@ function hotattach_tc4() {
    check_fio_retcode "Hotattach test case 4: Iteration 3." 0 $?
}

function cleanup_after_tests() {
    $rpc_py remove_vhost_scsi_target naa.Nvme0n1p0.0 0
    $rpc_py remove_vhost_scsi_target naa.Nvme0n1p0.0 1
    $rpc_py remove_vhost_scsi_target naa.Nvme0n1p1.0 0
    $rpc_py remove_vhost_scsi_target naa.Nvme0n1p2.1 0
}

hotattach_tc1
hotattach_tc2
hotattach_tc3
hotattach_tc4
cleanup_after_tests
+8 −3
Original line number Diff line number Diff line
@@ -151,13 +151,13 @@ function hotdetach_tc1() {
    second_disk=""
    get_first_disk "2" second_disk
    check_disks $first_disk $second_disk
    clear_after_tests
}

# During fio test for device from third VM remove first device from fifth controller and check if fio fails.
# Also check if disc has been removed from VM.
function hotdetach_tc2() {
    notice "Hotdetach test case 2"
    $SPDK_BUILD_DIR/scripts/rpc.py add_vhost_scsi_lun naa.Nvme0n1p4.2 0 Nvme0n1p8
    sleep 2
    first_disk=""
    get_first_disk "2" first_disk
@@ -173,13 +173,13 @@ function hotdetach_tc2() {
    second_disk=""
    get_first_disk "2" second_disk
    check_disks $first_disk $second_disk
    clear_after_tests
}

# Run fio test for all devices except one, then remove this device and check if fio passes.
# Also check if disc has been removed from VM.
function hotdetach_tc3() {
    notice "Hotdetach test case 3"
    $SPDK_BUILD_DIR/scripts/rpc.py add_vhost_scsi_lun naa.Nvme0n1p4.2 0 Nvme0n1p8
    sleep 2
    first_disk=""
    get_first_disk "2" first_disk
@@ -193,6 +193,7 @@ function hotdetach_tc3() {
    second_disk=""
    get_first_disk "2" second_disk
    check_disks $first_disk $second_disk
    clear_after_tests
}

# Run fio test for all devices except one and run separate fio test for this device.
@@ -201,7 +202,6 @@ function hotdetach_tc3() {
# After reboot run fio test for remaining devices and check if fio passes.
function hotdetach_tc4() {
    notice "Hotdetach test case 4"
    $SPDK_BUILD_DIR/scripts/rpc.py add_vhost_scsi_lun naa.Nvme0n1p4.2 0 Nvme0n1p8
    sleep 2
    first_disk=""
    get_first_disk "2" first_disk
@@ -228,6 +228,11 @@ function hotdetach_tc4() {
    prepare_fio_cmd_tc2_iter2 "2 3"
    $run_fio
    check_fio_retcode "Hotdetach test case 4: Iteration 3." 0 $?
    clear_after_tests
}

function clear_after_tests() {
    $rpc_py add_vhost_scsi_lun naa.Nvme0n1p4.2 0 Nvme0n1p8
}

hotdetach_tc1
+41 −29
Original line number Diff line number Diff line
@@ -16,46 +16,58 @@ END_OF_CONFIG

# Run spdk by calling run_vhost from hotplug/common.sh.
# Then prepare vhost with rpc calls and setup and run 4 VMs.
function pre_test_case() {
function pre_hot_attach_detach_test_case() {
    used_vms=""
    run_vhost
    rm $BASE_DIR/vhost.conf.in
    $SPDK_BUILD_DIR/scripts/rpc.py construct_vhost_scsi_controller naa.Nvme0n1p0.0
    $SPDK_BUILD_DIR/scripts/rpc.py construct_vhost_scsi_controller naa.Nvme0n1p1.0
    $SPDK_BUILD_DIR/scripts/rpc.py construct_vhost_scsi_controller naa.Nvme0n1p2.1
    $SPDK_BUILD_DIR/scripts/rpc.py construct_vhost_scsi_controller naa.Nvme0n1p3.1
    $SPDK_BUILD_DIR/scripts/rpc.py construct_vhost_scsi_controller naa.Nvme0n1p4.2
    $SPDK_BUILD_DIR/scripts/rpc.py construct_vhost_scsi_controller naa.Nvme0n1p5.2
    $SPDK_BUILD_DIR/scripts/rpc.py construct_vhost_scsi_controller naa.Nvme0n1p6.3
    $SPDK_BUILD_DIR/scripts/rpc.py construct_vhost_scsi_controller naa.Nvme0n1p7.3
    $SPDK_BUILD_DIR/scripts/rpc.py add_vhost_scsi_lun naa.Nvme0n1p4.2 0 Nvme0n1p8
    $SPDK_BUILD_DIR/scripts/rpc.py add_vhost_scsi_lun naa.Nvme0n1p4.2 1 Nvme0n1p9
    $SPDK_BUILD_DIR/scripts/rpc.py add_vhost_scsi_lun naa.Nvme0n1p5.2 0 Nvme0n1p10
    $SPDK_BUILD_DIR/scripts/rpc.py add_vhost_scsi_lun naa.Nvme0n1p5.2 1 Nvme0n1p11
    $SPDK_BUILD_DIR/scripts/rpc.py add_vhost_scsi_lun naa.Nvme0n1p6.3 0 Nvme0n1p12
    $SPDK_BUILD_DIR/scripts/rpc.py add_vhost_scsi_lun naa.Nvme0n1p6.3 1 Nvme0n1p13
    $SPDK_BUILD_DIR/scripts/rpc.py add_vhost_scsi_lun naa.Nvme0n1p7.3 0 Nvme0n1p14
    $SPDK_BUILD_DIR/scripts/rpc.py add_vhost_scsi_lun naa.Nvme0n1p7.3 1 Nvme0n1p15
    $rpc_py construct_vhost_scsi_controller naa.Nvme0n1p0.0
    $rpc_py construct_vhost_scsi_controller naa.Nvme0n1p1.0
    $rpc_py construct_vhost_scsi_controller naa.Nvme0n1p2.1
    $rpc_py construct_vhost_scsi_controller naa.Nvme0n1p3.1
    $rpc_py construct_vhost_scsi_controller naa.Nvme0n1p4.2
    $rpc_py construct_vhost_scsi_controller naa.Nvme0n1p5.2
    $rpc_py construct_vhost_scsi_controller naa.Nvme0n1p6.3
    $rpc_py construct_vhost_scsi_controller naa.Nvme0n1p7.3
    $rpc_py add_vhost_scsi_lun naa.Nvme0n1p4.2 0 Nvme0n1p8
    $rpc_py add_vhost_scsi_lun naa.Nvme0n1p4.2 1 Nvme0n1p9
    $rpc_py add_vhost_scsi_lun naa.Nvme0n1p5.2 0 Nvme0n1p10
    $rpc_py add_vhost_scsi_lun naa.Nvme0n1p5.2 1 Nvme0n1p11
    $rpc_py add_vhost_scsi_lun naa.Nvme0n1p6.3 0 Nvme0n1p12
    $rpc_py add_vhost_scsi_lun naa.Nvme0n1p6.3 1 Nvme0n1p13
    $rpc_py add_vhost_scsi_lun naa.Nvme0n1p7.3 0 Nvme0n1p14
    $rpc_py add_vhost_scsi_lun naa.Nvme0n1p7.3 1 Nvme0n1p15
    vms_setup_and_run "0 1 2 3"
    vms_prepare "0 1 2 3"
}

function reboot_all_and_prepare() {
    vms_reboot_all $1
    vms_prepare $1
}

function post_test_case() {
    vm_shutdown_all
    spdk_vhost_kill
function clear_vhost_config() {
    $rpc_py remove_vhost_scsi_target naa.Nvme0n1p4.2 0
    $rpc_py remove_vhost_scsi_target naa.Nvme0n1p4.2 1
    $rpc_py remove_vhost_scsi_target naa.Nvme0n1p5.2 0
    $rpc_py remove_vhost_scsi_target naa.Nvme0n1p5.2 1
    $rpc_py remove_vhost_scsi_target naa.Nvme0n1p6.3 0
    $rpc_py remove_vhost_scsi_target naa.Nvme0n1p6.3 1
    $rpc_py remove_vhost_scsi_target naa.Nvme0n1p7.3 0
    $rpc_py remove_vhost_scsi_target naa.Nvme0n1p7.3 1
    $rpc_py remove_vhost_controller naa.Nvme0n1p0.0
    $rpc_py remove_vhost_controller naa.Nvme0n1p1.0
    $rpc_py remove_vhost_controller naa.Nvme0n1p2.1
    $rpc_py remove_vhost_controller naa.Nvme0n1p3.1
    $rpc_py remove_vhost_controller naa.Nvme0n1p4.2
    $rpc_py remove_vhost_controller naa.Nvme0n1p5.2
    $rpc_py remove_vhost_controller naa.Nvme0n1p6.3
    $rpc_py remove_vhost_controller naa.Nvme0n1p7.3
}

trap 'error_exit "${FUNCNAME}" "${LINENO}"' ERR
gen_config
pre_test_case
run_vhost
rm $BASE_DIR/vhost.conf.in
pre_hot_attach_detach_test_case
$BASE_DIR/scsi_hotattach.sh --fio-bin=$fio_bin &
first_script=$!
$BASE_DIR/scsi_hotdetach.sh --fio-bin=$fio_bin &
second_script=$!
wait $first_script
wait $second_script
post_test_case
vm_shutdown_all
clear_vhost_config
spdk_vhost_kill
+1 −1
Original line number Diff line number Diff line
@@ -142,7 +142,7 @@ case $1 in
			--vm=2,$VM_IMAGE,Nvme0n1p4:Nvme0n1p5 \
			--vm=3,$VM_IMAGE,Nvme0n1p6:Nvme0n1p7 \
			--test-type=spdk_vhost_scsi \
			--fio-job=$WORKDIR/hotplug/fio_jobs/default_integrity.job -x
			--fio-jobs=$WORKDIR/hotplug/fio_jobs/default_integrity.job -x
		;;
	-ro|--readonly)
		echo 'Running readonly tests suite...'