Commit 5d0b4b27 authored by Daniel Verkamp's avatar Daniel Verkamp Committed by Jim Harris
Browse files

test/nvmf: remove nvmf_tgt+pmem test



This has been replaced with a pmem component-level test.

Change-Id: I3c433d9edbb4c4f0ff26d7716cd11ee9df5c0ad7
Signed-off-by: default avatarDaniel Verkamp <daniel.verkamp@intel.com>
Reviewed-on: https://review.gerrithub.io/405910


Tested-by: default avatarSPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
Reviewed-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: default avatarKarol Latecki <karol.latecki@intel.com>
parent 6b5a1d6c
Loading
Loading
Loading
Loading
+0 −10
Original line number Diff line number Diff line
@@ -29,16 +29,6 @@ run_test test/nvmf/nvme_cli/nvme_cli.sh
run_test test/nvmf/lvol/nvmf_lvol.sh
run_test test/nvmf/shutdown/shutdown.sh

if [ $SPDK_TEST_NVML -eq 1 ]; then
	if [ $RUN_NIGHTLY -eq 1 ]; then
		run_test test/nvmf/pmem/nvmf_pmem.sh 30
		report_test_completion "nightly_nvmf_pmem"
	else
		run_test test/nvmf/pmem/nvmf_pmem.sh 10
		report_test_completion "nvmf_pmem"
	fi
fi

if [ $RUN_NIGHTLY_FAILING -eq 1 ]; then
	run_test test/nvmf/multiconnection/multiconnection.sh
fi

test/nvmf/pmem/nvmf.conf

deleted100644 → 0
+0 −5
Original line number Diff line number Diff line
[Global]
  Comment "Global section"

[Nvmf]
  MaxQueuesPerSession 16

test/nvmf/pmem/nvmf_pmem.sh

deleted100755 → 0
+0 −95
Original line number Diff line number Diff line
#!/usr/bin/env bash

testdir=$(readlink -f $(dirname $0))
rootdir=$(readlink -f $testdir/../../..)
source $rootdir/test/common/autotest_common.sh
source $rootdir/test/nvmf/common.sh

RUNTIME=$1
PMEM_BDEVS=""
SUBSYS_NR=1
PMEM_PER_SUBSYS=8
rpc_py="python $rootdir/scripts/rpc.py"

function disconnect_nvmf()
{
	for i in `seq 1 $SUBSYS_NR`; do
		nvme disconnect -n "nqn.2016-06.io.spdk:cnode${i}" || true
	done
}

function clear_pmem_pool()
{
	for pmem in $PMEM_BDEVS; do
		$rpc_py delete_bdev $pmem
	done

	for i in `seq 1 $SUBSYS_NR`; do
		for c in `seq 1 $PMEM_PER_SUBSYS`; do
			$rpc_py delete_pmem_pool /tmp/pool_file${i}_${c}
		done
	done
}

set -e

timing_enter nvmf_pmem

RDMA_IP_LIST=$(get_available_rdma_ips)
NVMF_FIRST_TARGET_IP=$(echo "$RDMA_IP_LIST" | head -n 1)
if [ -z $NVMF_FIRST_TARGET_IP ]; then
	echo "no NIC for nvmf test"
	exit 0
fi

timing_enter start_nvmf_tgt
# Start up the NVMf target in another process
$NVMF_APP -c $testdir/../nvmf.conf &
pid=$!

trap "disconnect_nvmf; rm -f /tmp/pool_file*; killprocess $pid; exit 1" SIGINT SIGTERM EXIT

waitforlisten $pid
timing_exit start_nvmf_tgt

modprobe -v nvme-rdma

timing_enter setup
# Create pmem backends on each subsystem
for i in `seq 1 $SUBSYS_NR`; do
	bdevs=""
	for c in `seq 1 $PMEM_PER_SUBSYS`; do
		$rpc_py create_pmem_pool /tmp/pool_file${i}_${c} 32 512
		bdevs+="$($rpc_py construct_pmem_bdev -n pmem${i}_${c} /tmp/pool_file${i}_${c}) "
	done
	$rpc_py construct_nvmf_subsystem nqn.2016-06.io.spdk:cnode$i "trtype:RDMA traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT" '' -a -s SPDK$i -n "$bdevs"
	PMEM_BDEVS+=$bdevs
done
timing_exit setup

timing_enter nvmf_connect
for i in `seq 1 $SUBSYS_NR`; do
	nvme connect -t rdma -n "nqn.2016-06.io.spdk:cnode${i}" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
done
timing_exit nvmf_connect

timing_enter fio_test
$testdir/../fio/nvmf_fio.py 131072 64 randwrite $RUNTIME verify
timing_exit fio_test

sync
disconnect_nvmf

for i in `seq 1 $SUBSYS_NR`; do
	$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode$i
done

clear_pmem_pool

rm -f ./local-job*

trap - SIGINT SIGTERM EXIT

nvmfcleanup
killprocess $pid
timing_exit nvmf_pmem
+0 −17
Original line number Diff line number Diff line
@@ -36,23 +36,6 @@ quick test an 10 minutes for longer nightly test.
- Step 9: Disconnect kernel initiator from NVMe-oF subsystems.
- Step 10: Delete NVMe-oF subsystems from configuration.

#### Test 2: NVMe-OF namespace on a Pmem device
This test configures a SPDK NVMe-OF subsystem backed by pmem
devices and uses FIO to generate I/Os that target those subsystems.
Test steps:
- Step 1: Assign IP addresses to RDMA NICs.
- Step 2: Start SPDK nvmf_tgt application.
- Step 3: Create NVMe-OF subsystem with 10 pmem bdevs namespaces
- Step 4: Repeat step 3 nine more times to get a total of 10 NVMeOF subsystems,
each with 10 pmem bdev namespaces.
- Step 5: Connect to NVMe-OF susbsystems with kernel initiator.
- Step 6: Run FIO with workload parameters: blocksize=128kB, iodepth=16,
    workload=randwrite; varify flag is enabled so that FIO reads and verifies
    the data written to the pmem device. The run time is 10 seconds for a
    quick test an 10 minutes for longer nightly test.
- Step 7: Disconnect kernel initiator from NVMe-OF subsystems.
- Step 8: Delete NVMe-OF subsystems from configuration.

### Compatibility testing

- Verify functionality of SPDK `nvmf_tgt` with Linux kernel NVMe-oF host