Commit 62556204 authored by Pawel Kaminski's avatar Pawel Kaminski Committed by Jim Harris
Browse files

test/vhost: Vhost blk hotremove test plan.



Change-Id: I2deb9ae125eecf5592d59902cc5a530993f0fa54
Signed-off-by: default avatarPawel Kaminski <pawelx.kaminski@intel.com>
Reviewed-on: https://review.gerrithub.io/402276


Tested-by: default avatarSPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: default avatarKarol Latecki <karol.latecki@intel.com>
Reviewed-by: default avatarDaniel Verkamp <daniel.verkamp@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
parent 8f1f9137
Loading
Loading
Loading
Loading
+65 −0
Original line number Diff line number Diff line
# Vhost blk hot remove tests
#
# Objective
# The purpose of these tests is to verify that SPDK vhost remains stable during
# hot-remove operations performed on SCSI and BLK controllers devices.
# Hot-remove is a scenario where a NVMe device is removed when already in use.
#
# Test cases description
# 1. FIO I/O traffic is run during hot-remove operations.
#    By default FIO uses default_integrity*.job config files located in
#    test/vhost/hotplug/fio_jobs directory.
# 2. FIO mode of operation is random write (randwrite) with verification enabled
#    which results in also performing read operations.
# 3. In test cases fio status is checked after every run if any errors occurred.

function prepare_fio_cmd_tc1() {
    print_test_fio_header

@@ -23,40 +38,56 @@ function remove_vhost_controllers() {
    $rpc_py remove_vhost_controller naa.Nvme0n1p3.1
}

# Vhost blk hot remove test cases
#
# Test Case 1
function blk_hotremove_tc1() {
    echo "Blk hotremove test case 1"
    traddr=""
    # 1. Run the command to hot remove NVMe disk.
    get_traddr "Nvme0"
    delete_nvme "Nvme0n1"
    # 2. If vhost had crashed then tests would stop running
    sleep 1
    add_nvme "HotInNvme0" "$traddr"
    sleep 1
}

# Test Case 2
function blk_hotremove_tc2() {
    echo "Blk hotremove test case 2"
    # 1. Use rpc command to create blk controllers.
    $rpc_py construct_vhost_blk_controller naa.Nvme0n1p0.0 HotInNvme0n1p0
    $rpc_py construct_vhost_blk_controller naa.Nvme0n1p1.0 Nvme1n1p0
    $rpc_py construct_vhost_blk_controller naa.Nvme0n1p2.1 Nvme1n1p1
    $rpc_py construct_vhost_blk_controller naa.Nvme0n1p3.1 Nvme1n1p2
    # 2. Run two VMs and attach every VM to two blk controllers.
    vm_run_with_arg "0 1"
    vms_prepare "0"

    traddr=""
    get_traddr "Nvme0"
    prepare_fio_cmd_tc1 "0"
    # 3. Run FIO I/O traffic with verification enabled on NVMe disk.
    $run_fio &
    local last_pid=$!
    sleep 3
    # 4. Run the command to hot remove NVMe disk.
    delete_nvme "HotInNvme0n1"
    local retcode=0
    wait_for_finish $last_pid || retcode=$?
    # 5. Check that fio job run on hot-removed device stopped.
    #    Expected: Fio should return error message and return code != 0.
    check_fio_retcode "Blk hotremove test case 2: Iteration 1." 1 $retcode

    # 6. Reboot VM
    reboot_all_and_prepare "0"
    # 7. Run FIO I/O traffic with verification enabled on NVMe disk.
    $run_fio &
    local retcode=0
    wait_for_finish $! || retcode=$?
    # 8. Check that fio job run on hot-removed device stopped.
    #    Expected: Fio should return error message and return code != 0.
    check_fio_retcode "Blk hotremove test case 2: Iteration 2." 1 $retcode
    vm_shutdown_all
    remove_vhost_controllers
@@ -64,30 +95,41 @@ function blk_hotremove_tc2() {
    sleep 1
}

# ## Test Case 3
function blk_hotremove_tc3() {
    echo "Blk hotremove test case 3"
    # 1. Use rpc command to create blk controllers.
    $rpc_py construct_vhost_blk_controller naa.Nvme0n1p0.0 HotInNvme1n1p0
    $rpc_py construct_vhost_blk_controller naa.Nvme0n1p1.0 Nvme1n1p0
    $rpc_py construct_vhost_blk_controller naa.Nvme0n1p2.1 HotInNvme1n1p1
    $rpc_py construct_vhost_blk_controller naa.Nvme0n1p3.1 Nvme1n1p1
    # 2. Run two VMs and attach every VM to two blk controllers.
    vm_run_with_arg "0 1"
    vms_prepare "0 1"

    traddr=""
    get_traddr "Nvme0"
    prepare_fio_cmd_tc1 "0"
    # 3. Run FIO I/O traffic with verification enabled on first NVMe disk.
    $run_fio &
    local last_pid=$!
    sleep 3
    # 4. Run the command to hot remove of first NVMe disk.
    delete_nvme "HotInNvme1n1"
    local retcode=0
    wait_for_finish $last_pid || retcode=$?
    # 6. Check that fio job run on hot-removed device stopped.
    #    Expected: Fio should return error message and return code != 0.
    check_fio_retcode "Blk hotremove test case 3: Iteration 1." 1 $retcode

    # 7. Reboot VM
    reboot_all_and_prepare "0"
    local retcode=0
    # 8. Run FIO I/O traffic with verification enabled on removed NVMe disk.
    $run_fio &
    wait_for_finish $! || retcode=$?
    # 9. Check that fio job run on hot-removed device stopped.
    #    Expected: Fio should return error message and return code != 0.
    check_fio_retcode "Blk hotremove test case 3: Iteration 2." 1 $retcode
    vm_shutdown_all
    remove_vhost_controllers
@@ -95,37 +137,49 @@ function blk_hotremove_tc3() {
    sleep 1
}

# Test Case 4
function blk_hotremove_tc4() {
    echo "Blk hotremove test case 4"
    # 1. Use rpc command to create blk controllers.
    $rpc_py construct_vhost_blk_controller naa.Nvme0n1p0.0 HotInNvme2n1p0
    $rpc_py construct_vhost_blk_controller naa.Nvme0n1p1.0 Nvme1n1p0
    $rpc_py construct_vhost_blk_controller naa.Nvme0n1p2.1 HotInNvme2n1p1
    $rpc_py construct_vhost_blk_controller naa.Nvme0n1p3.1 Nvme1n1p1
    # 2. Run two VM, attached to blk controllers.
    vm_run_with_arg "0 1"
    vms_prepare "0 1"

    prepare_fio_cmd_tc1 "0"
    # 3. Run FIO I/O traffic on first VM with verification enabled on both NVMe disks.
    $run_fio &
    local last_pid_vm0=$!

    prepare_fio_cmd_tc1 "1"
    # 4. Run FIO I/O traffic on second VM with verification enabled on both NVMe disks.
    $run_fio &
    local last_pid_vm1=$!

    sleep 3
    prepare_fio_cmd_tc1 "0 1"
    # 5. Run the command to hot remove of first NVMe disk.
    delete_nvme "HotInNvme2n1"
    local retcode_vm0=0
    local retcode_vm1=0
    wait_for_finish $last_pid_vm0 || retcode_vm0=$?
    wait_for_finish $last_pid_vm1 || retcode_vm1=$?
    # 6. Check that fio job run on hot-removed device stopped.
    #    Expected: Fio should return error message and return code != 0.
    check_fio_retcode "Blk hotremove test case 4: Iteration 1." 1 $retcode_vm0
    check_fio_retcode "Blk hotremove test case 4: Iteration 2." 1 $retcode_vm1

    # 7. Reboot all VMs.
    reboot_all_and_prepare "0 1"
    # 8. Run FIO I/O traffic with verification enabled on removed NVMe disk.
    $run_fio &
    local retcode=0
    wait_for_finish $! || retcode=$?
    # 9. Check that fio job run on hot-removed device stopped.
    #    Expected: Fio should return error message and return code != 0.
    check_fio_retcode "Blk hotremove test case 4: Iteration 3." 1 $retcode

    vm_shutdown_all
@@ -134,28 +188,39 @@ function blk_hotremove_tc4() {
    sleep 1
}

# Test Case 5
function blk_hotremove_tc5() {
    echo "Blk hotremove test case 5"
    # 1. Use rpc command to create blk controllers.
    $rpc_py construct_vhost_blk_controller naa.Nvme0n1p0.0 HotInNvme3n1p0
    $rpc_py construct_vhost_blk_controller naa.Nvme0n1p1.0 Nvme1n1p0
    $rpc_py construct_vhost_blk_controller naa.Nvme0n1p2.1 Nvme1n1p1
    $rpc_py construct_vhost_blk_controller naa.Nvme0n1p3.1 Nvme1n1p2
    # 2. Run two VM, attached to blk controllers.
    vm_run_with_arg "0 1"
    vms_prepare "0 1"

    prepare_fio_cmd_tc1 "0"
    # 3. Run FIO I/O traffic on first VM with verification enabled on both NVMe disks.
    $run_fio &
    local last_pid=$!
    sleep 3
    # 4. Run the command to hot remove of first NVMe disk.
    delete_nvme "HotInNvme3n1"
    local retcode=0
    wait_for_finish $last_pid || retcode=$?
    # 5. Check that fio job run on hot-removed device stopped.
    #    Expected: Fio should return error message and return code != 0.
    check_fio_retcode "Blk hotremove test case 5: Iteration 1." 1 $retcode

    # 6. Reboot VM.
    reboot_all_and_prepare "0"
    local retcode=0
    # 7. Run FIO I/O traffic with verification enabled on removed NVMe disk.
    $run_fio &
    wait_for_finish $! || retcode=$?
    # 8. Check that fio job run on hot-removed device stopped.
    #    Expected: Fio should return error message and return code != 0.
    check_fio_retcode "Blk hotremove test case 5: Iteration 2." 1 $retcode
    vm_shutdown_all
    remove_vhost_controllers