Commit 8078db7a authored by Karol Latecki's avatar Karol Latecki Committed by Tomasz Zawadzki
Browse files

test/nvmf: reduce number of RDMA IO queues when using ConnectX-5 NICs



Reduce maximum number of queues when connecting with
ConnectX-5 NICs. When using host systems with nproc > 64
connecting with default options and creating all IO queues
takes too much time and results in keep-alive timeout.

Change-Id: I3b3a000fad3a69d4ce5657df494245be2b2a0a81
Signed-off-by: default avatarKarol Latecki <karol.latecki@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/15571


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
Reviewed-by: default avatarTomasz Zawadzki <tomasz.zawadzki@intel.com>
parent a2ca928a
Loading
Loading
Loading
Loading
+16 −0
Original line number Diff line number Diff line
@@ -6,6 +6,7 @@ NVMF_IP_LEAST_ADDR=8
NVMF_TCP_IP_ADDRESS="127.0.0.1"
NVMF_TRANSPORT_OPTS=""
NVMF_SERIAL=SPDK00000000000001
NVME_CONNECT="nvme connect"
NET_TYPE=${NET_TYPE:-phy-fallback}

function build_nvmf_app_args() {
@@ -291,6 +292,7 @@ function gather_supported_nvmf_pci_devs() {
	x722+=(${pci_bus_cache["$intel:0x37d2"]})
	# ConnectX-5
	mlx+=(${pci_bus_cache["$mellanox:0x1017"]})
	mlx+=(${pci_bus_cache["$mellanox:0x1019"]})
	# ConnectX-4
	mlx+=(${pci_bus_cache["$mellanox:0x1015"]})
	mlx+=(${pci_bus_cache["$mellanox:0x1013"]})
@@ -325,6 +327,20 @@ function gather_supported_nvmf_pci_devs() {
			echo "$pci not bound, needs ${pci_mod_resolved["$pci"]}"
			pci_drivers["${pci_mod_resolved["$pci"]}"]=1
		fi
		if [[ ${pci_ids_device["$pci"]} == "0x1017" ]] \
			|| [[ ${pci_ids_device["$pci"]} == "0x1019" ]] \
			|| [[ $TEST_TRANSPORT == rdma ]]; then
			# Reduce maximum number of queues when connecting with
			# ConnectX-5 NICs. When using host systems with nproc > 64
			# connecting with default options (where default equals to
			# number of host online CPUs) creating all IO queues
			# takes too much time and results in keep-alive timeout.
			# See:
			# https://github.com/spdk/spdk/issues/2772
			# 0x1017 - MT27800 Family ConnectX-5
			# 0x1019 - MT28800 Family ConnectX-5 Ex
			NVME_CONNECT="nvme connect -i 15"
		fi
	done

	if ((${#pci_drivers[@]} > 0)); then
+3 −2
Original line number Diff line number Diff line
@@ -22,14 +22,15 @@ $rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPOR

if [ $RUN_NIGHTLY -eq 1 ]; then
	num_iterations=100
	IO_QUEUES="-i 8"
	# Reduce number of IO queues to shorten connection time
	NVME_CONNECT="nvme connect -i 8"
else
	num_iterations=10
fi

set +x
for i in $(seq 1 $num_iterations); do
	nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT" $IO_QUEUES
	$NVME_CONNECT -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
	waitforserial "$NVMF_SERIAL"
	nvme disconnect -n "nqn.2016-06.io.spdk:cnode1"
	waitforserial_disconnect "$NVMF_SERIAL"
+1 −1
Original line number Diff line number Diff line
@@ -54,7 +54,7 @@ function nvmf_filesystem_part() {

	malloc_size=$(($(get_bdev_size $malloc_name) * 1024 * 1024))

	nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
	$NVME_CONNECT -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"

	waitforserial "$NVMF_SERIAL"
	nvme_name=$(lsblk -l -o NAME,SERIAL | grep -oP "([\w]*)(?=\s+${NVMF_SERIAL})")
+1 −1
Original line number Diff line number Diff line
@@ -40,7 +40,7 @@ $rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 raid0
# Append the concat0 bdev into subsystem
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 concat0

nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
$NVME_CONNECT -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"

waitforserial $NVMF_SERIAL 4

+1 −1
Original line number Diff line number Diff line
@@ -23,7 +23,7 @@ $rpc_py nvmf_create_subsystem nqn.2016-06.io.spdk:cnode1 -a -s $NVMF_SERIAL
$rpc_py nvmf_subsystem_add_ns nqn.2016-06.io.spdk:cnode1 Delay0
$rpc_py nvmf_subsystem_add_listener nqn.2016-06.io.spdk:cnode1 -t $TEST_TRANSPORT -a $NVMF_FIRST_TARGET_IP -s $NVMF_PORT

nvme connect -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"
$NVME_CONNECT -t $TEST_TRANSPORT -n "nqn.2016-06.io.spdk:cnode1" -a "$NVMF_FIRST_TARGET_IP" -s "$NVMF_PORT"

waitforserial "$NVMF_SERIAL"

Loading