Commit ef489303 authored by Tomasz Zawadzki's avatar Tomasz Zawadzki Committed by Ben Walker
Browse files

net/vpp: include VPP into iSCSI test scripts



Tests that were thus far performed using posix net framework
can now be run with VPP. This patch adds network interface
configuration for VPP to work in iSCSI tests.

Some tests are disabled on purpose:
- IP Migration, RBD and NVMe-oF due to their tests lacking network
  namespace support
- rpc_config adding/deleting IP, as VPP has separate utility for that

calsoft.sh doesn't handle TCP stream properly and fails decoding iSCSI
requests when are divided by TCP segmentation. This is very common
situation for VPP and causes that calsoft.sh never PASS.

Change-Id: I7c80427ca1675a1789ce7440796cc8d9956f1c9e
Signed-off-by: default avatarSlawomir Mrozowicz <slawomirx.mrozowicz@intel.com>
Signed-off-by: default avatarTomasz Zawadzki <tomasz.zawadzki@intel.com>
Signed-off-by: default avatarTomasz Kulasek <tomaszx.kulasek@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/394174


Reviewed-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: default avatarDarek Stojaczyk <dariusz.stojaczyk@intel.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
parent 29408eba
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -199,6 +199,10 @@ if [ $SPDK_RUN_FUNCTIONAL_TEST -eq 1 ]; then
		run_test suite test/spdkcli/raid.sh
	fi

	if [ $SPDK_TEST_VPP -eq 1 ]; then
		run_test suite ./test/iscsi_tgt/iscsi_tgt.sh vpp
	fi

	if [ $SPDK_TEST_BLOBFS -eq 1 ]; then
		run_test suite ./test/blobfs/rocksdb/rocksdb.sh
		run_test suite ./test/blobstore/blobstore.sh
+85 −5
Original line number Diff line number Diff line
@@ -13,6 +13,9 @@ INITIATOR_TAG=2
INITIATOR_NAME=ANY
PORTAL_TAG=1
ISCSI_APP="$TARGET_NS_CMD ./app/iscsi_tgt/iscsi_tgt"
if [ $SPDK_TEST_VPP -eq 1 ]; then
	ISCSI_APP+=" -L sock_vpp"
fi
ISCSI_TEST_CORE_MASK=0xFF

function create_veth_interfaces() {
@@ -34,17 +37,25 @@ function create_veth_interfaces() {
	# Accept connections from veth interface
	iptables -I INPUT 1 -i $INITIATOR_INTERFACE -p tcp --dport $ISCSI_PORT -j ACCEPT

	$TARGET_NS_CMD ip link set $TARGET_INTERFACE up

	if [ "$1" == "posix" ]; then
		$TARGET_NS_CMD ip link set lo up
		$TARGET_NS_CMD ip addr add $TARGET_IP/24 dev $TARGET_INTERFACE
	$TARGET_NS_CMD ip link set $TARGET_INTERFACE up

		# Verify connectivity
		ping -c 1 $TARGET_IP
		ip netns exec $TARGET_NAMESPACE ping -c 1 $INITIATOR_IP
	else
		start_vpp
	fi
}

function cleanup_veth_interfaces() {
	# $1 = test type (posix/vpp)
	if [ "$1" == "vpp" ]; then
		kill_vpp
	fi

	# Cleanup veth interfaces and network namespace
	# Note: removing one veth, removes the pair
@@ -90,3 +101,72 @@ function iscsitestfini() {
		$rootdir/scripts/setup.sh reset
	fi
}

function start_vpp() {
	# We need to make sure that posix side doesn't send jumbo packets while
	# for VPP side maximal size of MTU for TCP is 1460 and tests doesn't work
	# stable with larger packets
	MTU=1460
	ip link set dev $INITIATOR_INTERFACE mtu $MTU
	ethtool -K $INITIATOR_INTERFACE tso off
	ethtool -k $INITIATOR_INTERFACE

	# Start VPP process in SPDK target network namespace
	$TARGET_NS_CMD vpp \
		unix { nodaemon cli-listen /run/vpp/cli.sock } \
		dpdk { no-pci num-mbufs 128000 } \
		session { evt_qs_memfd_seg } \
		socksvr { socket-name /run/vpp-api.sock } \
		plugins { \
			plugin default { disable } \
			plugin dpdk_plugin.so { enable } \
		} &

	vpp_pid=$!
	echo "VPP Process pid: $vpp_pid"

	# Wait until VPP starts responding
	xtrace_disable
	counter=40
	while [ $counter -gt 0 ] ; do
		vppctl show version &> /dev/null && break
		counter=$(( $counter - 1 ))
		sleep 0.5
	done
	xtrace_restore
	if [ $counter -eq 0 ] ; then
		return 1
	fi

	# Setup host interface
	vppctl create host-interface name $TARGET_INTERFACE
	VPP_TGT_INT="host-$TARGET_INTERFACE"
	vppctl set interface state $VPP_TGT_INT up
	vppctl set interface ip address $VPP_TGT_INT $TARGET_IP/24
	vppctl set interface mtu $MTU $VPP_TGT_INT

	vppctl show interface

	# Disable session layer
	# NOTE: VPP net framework should enable it itself.
	vppctl session disable

	# Verify connectivity
	vppctl show int addr
	ip addr show $INITIATOR_INTERFACE
	ip netns exec $TARGET_NAMESPACE ip addr show $TARGET_INTERFACE
	sleep 3
	ping -c 1 $TARGET_IP -s $(( $MTU - 28 )) -M do
	vppctl ping $INITIATOR_IP repeat 1 size $(( $MTU - (28 + 8) )) verbose
}

function kill_vpp() {
	vppctl delete host-interface name $TARGET_INTERFACE

	# Dump VPP configuration before kill
	vppctl show api clients
	vppctl show session
	vppctl show errors

	killprocess $vpp_pid
}
+1 −0
Original line number Diff line number Diff line
@@ -53,6 +53,7 @@ $rpc_py bdev_inject_error EE_Malloc0 'all' 'failure' -n 1000
dev=$(iscsiadm -m session -P 3 | grep "Attached scsi disk" | awk '{print $4}')

set +e
waitforfile /dev/$dev
mkfs.ext4 -F /dev/$dev
if [ $? -eq 0 ]; then
	echo "mkfs successful - expected failure"
+24 −6
Original line number Diff line number Diff line
@@ -24,15 +24,25 @@ create_veth_interfaces $TEST_TYPE

trap "cleanup_veth_interfaces $TEST_TYPE; exit 1" SIGINT SIGTERM EXIT

run_test suite ./test/iscsi_tgt/sock/sock.sh
run_test suite ./test/iscsi_tgt/sock/sock.sh $TEST_TYPE
if [ "$TEST_TYPE" == "posix" ]; then
	# calsoft doesn't handle TCP stream properly and fails decoding iSCSI
	# requests when are divided by TCP segmentation. This is very common
	# situation for VPP and causes that calsoft.sh never PASS.
	run_test suite ./test/iscsi_tgt/calsoft/calsoft.sh
fi
run_test suite ./test/iscsi_tgt/filesystem/filesystem.sh
run_test suite ./test/iscsi_tgt/reset/reset.sh
run_test suite ./test/iscsi_tgt/rpc_config/rpc_config.sh $TEST_TYPE
run_test suite ./test/iscsi_tgt/lvol/iscsi_lvol.sh
run_test suite ./test/iscsi_tgt/fio/fio.sh
run_test suite ./test/iscsi_tgt/qos/qos.sh

# IP Migration tests do not support network namespaces,
# they can only be run on posix sockets.
if [ "$TEST_TYPE" == "posix" ]; then
	run_test suite ./test/iscsi_tgt/ip_migration/ip_migration.sh
fi
run_test suite ./test/iscsi_tgt/trace_record/trace_record.sh

if [ $RUN_NIGHTLY -eq 1 ]; then
@@ -43,15 +53,23 @@ if [ $RUN_NIGHTLY -eq 1 ]; then
	run_test suite ./test/iscsi_tgt/digests/digests.sh
fi
if [ $SPDK_TEST_RBD -eq 1 ]; then
	# RBD tests do not support network namespaces,
	# they can only be run on posix sockets.
	if [ "$TEST_TYPE" == "posix" ]; then
		run_test suite ./test/iscsi_tgt/rbd/rbd.sh
	fi
fi

trap "cleanup_veth_interfaces $TEST_TYPE; exit 1" SIGINT SIGTERM EXIT

if [ $SPDK_TEST_NVMF -eq 1 ]; then
	# NVMe-oF tests do not support network namespaces,
	# they can only be run on posix sockets.
	if [ "$TEST_TYPE" == "posix" ]; then
		# Test configure remote NVMe device from rpc and conf file
		run_test suite ./test/iscsi_tgt/nvme_remote/fio_remote_nvme.sh
	fi
fi

if [ $RUN_NIGHTLY -eq 1 ]; then
	run_test suite ./test/iscsi_tgt/multiconnection/multiconnection.sh
+4 −1
Original line number Diff line number Diff line
@@ -486,6 +486,9 @@ if __name__ == "__main__":
    try:
        verify_log_flag_rpc_methods(rpc_py, rpc_param)
        verify_get_interfaces(rpc_py)
        # Add/delete IP will not be supported in VPP.
        # It has separate vppctl utility for that.
        if test_type == 'posix':
            verify_add_delete_ip_address(rpc_py)
        create_malloc_bdevs_rpc_methods(rpc_py, rpc_param)
        verify_portal_groups_rpc_methods(rpc_py, rpc_param)
Loading