Commit cbda5664 authored by Michal Berger's avatar Michal Berger Committed by Jim Harris
Browse files

test/nvmf: Remove support for soft-RoCE setups

Motivation: https://github.com/spdk/spdk/issues/2277



Signed-off-by: default avatarMichal Berger <michalx.berger@intel.com>
Change-Id: I6a85816c65ebecf63c2f454e4b97484542faef9e
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/10929


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Reviewed-by: default avatarTomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
parent 9c37603b
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -41,7 +41,7 @@ if [ $(uname -s) = Linux ]; then
	fi
fi

trap "autotest_cleanup || :; revert_soft_roce; exit 1" SIGINT SIGTERM EXIT
trap "autotest_cleanup || :; exit 1" SIGINT SIGTERM EXIT

timing_enter autotest

@@ -240,6 +240,7 @@ if [ $SPDK_RUN_FUNCTIONAL_TEST -eq 1 ]; then
	fi

	if [ $SPDK_TEST_NVMF -eq 1 ]; then
		export NET_TYPE
		# The NVMe-oF run test cases are split out like this so that the parser that compiles the
		# list of all tests can properly differentiate them. Please do not merge them into one line.
		if [ "$SPDK_TEST_NVMF_TRANSPORT" = "rdma" ]; then
@@ -330,7 +331,6 @@ fi

timing_enter cleanup
autotest_cleanup
revert_soft_roce
timing_exit cleanup

timing_exit autotest
+5 −22
Original line number Diff line number Diff line
@@ -55,11 +55,6 @@ function load_ib_rdma_modules() {
	modprobe rdma_ucm
}

function detect_soft_roce_nics() {
	rxe_cfg stop # make sure we run tests with a clean slate
	rxe_cfg start
}

function allocate_nic_ips() {
	((count = NVMF_IP_LEAST_ADDR))
	for nic_name in $(get_rdma_if_list); do
@@ -86,9 +81,7 @@ function get_rdma_if_list() {
	mapfile -t rxe_net_devs < <(rxe_cfg rxe-net)

	if ((${#net_devs[@]} == 0)); then
		# No rdma-capable nics on board, using soft-RoCE
		printf '%s\n' "${rxe_net_devs[@]}"
		return 0
		return 1
	fi

	# Pick only these devices which were found during gather_supported_nvmf_pci_devs() run
@@ -388,12 +381,13 @@ prepare_net_devs() {
	fi

	# NET_TYPE == virt or phy-fallback
	if [[ $TEST_TRANSPORT == rdma ]]; then
		detect_soft_roce_nics
	elif [[ $TEST_TRANSPORT == tcp ]]; then
	if [[ $TEST_TRANSPORT == tcp ]]; then
		nvmf_veth_init
		return 0
	fi

	echo "ERROR: virt and fallback setup is not supported for $TEST_TRANSPORT"
	return 1
}

function nvmftestinit() {
@@ -459,17 +453,6 @@ function rdma_device_init() {
	allocate_nic_ips
}

function revert_soft_roce() {
	rxe_cfg stop
}

function check_ip_is_soft_roce() {
	if [ "$TEST_TRANSPORT" != "rdma" ]; then
		return 0
	fi
	rxe_cfg status rxe | grep -wq "$1"
}

function nvme_connect() {
	local init_count
	init_count=$(nvme list | wc -l)
+0 −6
Original line number Diff line number Diff line
@@ -14,12 +14,6 @@ bdevperf_rpc_sock=/var/tmp/bdevperf.sock

nvmftestinit

# This issue brings up a weird error in soft roce where the RDMA WC doesn't point to the correct qpair.
if check_ip_is_soft_roce $NVMF_FIRST_TARGET_IP && [ "$TEST_TRANSPORT" == "rdma" ]; then
	echo "Using software RDMA, not running this test due to a known issue."
	exit 0
fi

nvmfappstart -m 0xF

$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192
+2 −8
Original line number Diff line number Diff line
@@ -53,10 +53,7 @@ fi
run_test "nvmf_nmic" test/nvmf/target/nmic.sh "${TEST_ARGS[@]}"
run_test "nvmf_fio_target" test/nvmf/target/fio.sh "${TEST_ARGS[@]}"
run_test "nvmf_bdevio" test/nvmf/target/bdevio.sh "${TEST_ARGS[@]}"

if ! check_ip_is_soft_roce $NVMF_FIRST_TARGET_IP; then
	# Soft-RoCE will return invalid values in the WC field after a qp has been
	# destroyed which lead to NULL pointer references not seen in real hardware.
if [[ $NET_TYPE == phy ]]; then
	run_test "nvmf_shutdown" test/nvmf/target/shutdown.sh "${TEST_ARGS[@]}"
	#TODO: disabled due to intermittent failures. Need to triage.
	# run_test "nvmf_srq_overwhelm" test/nvmf/target/srq_overwhelm.sh $TEST_ARGS
@@ -76,9 +73,7 @@ run_test "nvmf_discovery" test/nvmf/host/discovery.sh "${TEST_ARGS[@]}"
#run_test test/nvmf/host/identify_kernel_nvmf.sh $TEST_ARGS
run_test "nvmf_fio_host" test/nvmf/host/fio.sh "${TEST_ARGS[@]}"

# There is an intermittent error relating to those tests and Soft-RoCE.
# Skip those tests if we are using rxe.
if ! check_ip_is_soft_roce $NVMF_FIRST_TARGET_IP; then
if [[ $NET_TYPE == phy ]]; then
	# GitHub issue #1165
	run_test "nvmf_bdevperf" test/nvmf/host/bdevperf.sh "${TEST_ARGS[@]}"
	# GitHub issue #1043
@@ -88,4 +83,3 @@ fi
timing_exit host

trap - SIGINT SIGTERM EXIT
revert_soft_roce
+0 −8
Original line number Diff line number Diff line
@@ -15,14 +15,6 @@ rpc_py="$rootdir/scripts/rpc.py"
nvmftestinit
nvmfappstart -m 0xF

# SoftRoce does not have enough queues available for
# multiconnection tests. Detect if we're using software RDMA.
# If so - lower the number of subsystems for test.
if check_ip_is_soft_roce $NVMF_FIRST_TARGET_IP; then
	echo "Using software RDMA, lowering number of NVMeOF subsystems."
	NVMF_SUBSYS=1
fi

$rpc_py nvmf_create_transport $NVMF_TRANSPORT_OPTS -u 8192

for i in $(seq 1 $NVMF_SUBSYS); do
Loading