Commit c4d5d2fd authored by Jim Harris's avatar Jim Harris Committed by Ben Walker
Browse files

test/nvmf: run host/perf.sh separately for each transport



While here, remove NVMF_TCP_IP_ADDRESS since we already
have NVMF_FIRST_TARGET_IP which serves the same purpose
when testing the TCP transport.

Signed-off-by: default avatarJim Harris <james.r.harris@intel.com>
Change-Id: I7cc4712cd9746377937e889127aa5a61566d8846

Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/456705


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarDarek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
parent aa429c80
Loading
Loading
Loading
Loading
+6 −1
Original line number Diff line number Diff line
@@ -186,7 +186,12 @@ function nvmfappstart()
	nvmfpid=$!
	trap "process_shm --id $NVMF_APP_SHM_ID; nvmftestfini; exit 1" SIGINT SIGTERM EXIT
	waitforlisten $nvmfpid
	modprobe nvme-$TEST_TRANSPORT
	# currently we run the host/perf test for TCP even on systems without kernel nvme-tcp
	#  support; that's fine since the host/perf test uses the SPDK initiator
	# maybe later we will enforce modprobe to succeed once we have systems in the test pool
	#  with nvme-tcp kernel support - but until then let this pass so we can still run the
	#  host/perf test with the tcp transport
	modprobe nvme-$TEST_TRANSPORT || true
	timing_exit start_nvmf_tgt
}

+47 −76
Original line number Diff line number Diff line
@@ -12,26 +12,12 @@ rpc_py="$rootdir/scripts/rpc.py"

set -e

nvmftestinit

TYPES="TCP"
if [ -z $NVMF_FIRST_TARGET_IP ]; then
	echo "no RDMA NIC for nvmf test, will only test TCP/IP transport"
else
	TYPES=${TYPES}" RDMA"
fi

timing_enter perf
timing_enter start_nvmf_tgt

$NVMF_APP -m 0xF &
nvmfpid=$!

trap "process_shm --id $NVMF_APP_SHM_ID; nvmftestfini; exit 1" SIGINT SIGTERM EXIT
nvmftestinit
nvmfappstart "-m 0xF"

waitforlisten $nvmfpid
$rootdir/scripts/gen_nvme.sh --json | $rpc_py load_subsystem_config
timing_exit start_nvmf_tgt

local_nvme_trid="trtype:PCIe traddr:"$($rpc_py get_subsystem_config bdev | jq -r '.[].params | select(.name=="Nvme0").traddr')
bdevs="$bdevs $($rpc_py construct_malloc_bdev $MALLOC_BDEV_SIZE $MALLOC_BLOCK_SIZE)"
@@ -40,23 +26,19 @@ if [ -n "$local_nvme_trid" ]; then
	bdevs="$bdevs Nvme0n1"
fi

function test_perf()
{
	TYPE=$1
	NVMF_TARGET_IP=$2
	$rpc_py nvmf_create_transport -t $TYPE
$rpc_py nvmf_create_transport -t $TEST_TRANSPORT
$rpc_py nvmf_subsystem_create nqn.2016-06.io.spdk:cnode1 -a -s SPDK00000000000001
for bdev in $bdevs; do
	$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $bdev
done
	$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TYPE -a $NVMF_TARGET_IP -s $NVMF_PORT
$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT

# Test multi-process access to local NVMe device
if [ -n "$local_nvme_trid" ]; then
	$rootdir/examples/nvme/perf/perf -i $NVMF_APP_SHM_ID -q 32 -o 4096 -w randrw -M 50 -t 1 -r "$local_nvme_trid"
fi

	$rootdir/examples/nvme/perf/perf -q 32 -o 4096 -w randrw -M 50 -t 1 -r "trtype:$TYPE adrfam:IPv4 traddr:$NVMF_TARGET_IP trsvcid:$NVMF_PORT"
$rootdir/examples/nvme/perf/perf -q 32 -o 4096 -w randrw -M 50 -t 1 -r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT"
sync
$rpc_py delete_nvmf_subsystem nqn.2016-06.io.spdk:cnode1

@@ -75,13 +57,13 @@ function test_perf()
		for bdev in $lb_nested_guid; do
			$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 $bdev
		done
			$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TYPE -a $NVMF_TARGET_IP -s $NVMF_PORT
		$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT
		# Test perf as host with different io_size and qd_depth in nightly
		qd_depth=("1" "128")
		io_size=("512" "131072")
		for qd in ${qd_depth[@]}; do
			for o in ${io_size[@]}; do
					$rootdir/examples/nvme/perf/perf -q $qd -o $o -w randrw -M 50 -t 10 -r "trtype:$TYPE adrfam:IPv4 traddr:$NVMF_TARGET_IP trsvcid:$NVMF_PORT"
				$rootdir/examples/nvme/perf/perf -q $qd -o $o -w randrw -M 50 -t 10 -r "trtype:$TEST_TRANSPORT adrfam:IPv4 traddr:$NVMF_FIRST_TARGET_IP trsvcid:$NVMF_PORT"
			done
		done

@@ -93,17 +75,6 @@ function test_perf()
		$rpc_py destroy_lvol_store -l lvs_0
	fi
fi
}

for type in $TYPES; do
	if [ $type == "TCP" ]; then
		nvmf_tgt_ip=$NVMF_TCP_IP_ADDRESS
	else
		nvmf_tgt_ip=$NVMF_FIRST_TARGET_IP
	fi

	test_perf $type $nvmf_tgt_ip
done

trap - SIGINT SIGTERM EXIT

+9 −0
Original line number Diff line number Diff line
@@ -43,6 +43,15 @@ timing_enter host
run_test suite test/nvmf/host/bdevperf.sh $TEST_ARGS
run_test suite test/nvmf/host/identify.sh $TEST_ARGS
run_test suite test/nvmf/host/perf.sh $TEST_ARGS
# This script has traditionally tested the tcp transport, and then
# also the rdma transport if it's available.  Now that this script
# is parameterized, explicitly run the test a second time for the
# tcp transport, at least until the test pool is set up with a VM
# that can run all of the tcp tests.  At that point, this whole
# script will be run twice, once for rdma and once for tcp, and
# then this second invocation can be removed.
run_test suite test/nvmf/host/perf.sh $TEST_ARGS --transport=tcp

# TODO: disabled due to intermittent failures (RDMA_CM_EVENT_UNREACHABLE/ETIMEDOUT)
#run_test test/nvmf/host/identify_kernel_nvmf.sh $TEST_ARGS
run_test suite test/nvmf/host/aer.sh $TEST_ARGS