Commit acb84975 authored by Karol Latecki's avatar Karol Latecki Committed by Tomasz Zawadzki
Browse files

test/nvme: add support for lvol bdevs to perf scripts



Signed-off-by: default avatarKarol Latecki <karol.latecki@intel.com>
Signed-off-by: default avatarPawel Piatek <pawelx.piatek@intel.com>
Change-Id: If7345e3750ee9cbc866b2b7a290074bd82a410bc
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/18130


Reviewed-by: default avatarKonrad Sztyber <konrad.sztyber@intel.com>
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarTomasz Zawadzki <tomasz.zawadzki@intel.com>
parent 44fad16e
Loading
Loading
Loading
Loading
+74 −4
Original line number Diff line number Diff line
@@ -36,15 +36,20 @@ function discover_bdevs() {
	rm -f /var/run/spdk_bdev0
}

function get_disk_cfg() {
	grep -vP "^\s*#" "$DISKCFG"
}

function create_spdk_bdev_conf() {
	local output
	local disk_cfg
	local bdev_io_cache_size=$1
	local bdev_io_pool_size=$2
	local bdev_json_cfg=()
	local bdev_opts=()
	local dev_opts=()
	local i

	disk_cfg=($(grep -vP "^\s*#" "$DISKCFG"))
	disk_cfg=($(get_disk_cfg))

	if [[ -n "$bdev_io_cache_size" ]]; then
		bdev_opts+=("\"bdev_io_cache_size\": $bdev_io_cache_size")
@@ -153,9 +158,8 @@ function get_numa_node() {

function get_disks() {
	local plugin=$1
	local disk_cfg
	local disk_cfg=($(get_disk_cfg))

	disk_cfg=($(grep -vP "^\s*#" "$DISKCFG"))
	if [[ "$plugin" =~ "nvme" ]]; then
		# PCI BDF address is enough for nvme-perf and nvme-fio-plugin,
		# so just print them from configuration file
@@ -496,3 +500,69 @@ function create_spdk_xnvme_bdev_conf() {
	done
	gen_conf > "$testdir/bdev.conf"
}

# LVOL support functions
function start_spdk_tgt() {
	$SPDK_BIN_DIR/spdk_tgt -g &
	spdk_tgt_pid=$!

	waitforlisten $spdk_tgt_pid
}

function stop_spdk_tgt() {
	killprocess $spdk_tgt_pid
}

function attach_bdevs() {
	local disk_cfg=($(get_disk_cfg))
	local i
	for i in "${!disk_cfg[@]}"; do
		$rpc_py bdev_nvme_attach_controller -b "Nvme${i}" -t pcie -a "${disk_cfg[i]}"
		echo "Attached NVMe Bdev $nvme_bdev with BDF"
	done
}

function cleanup_lvol_cfg() {
	local -a lvol_stores
	local -a lvol_bdevs
	local lvol_store lvol_bdev

	echo "Cleanup lvols"
	lvol_stores=($($rpc_py bdev_lvol_get_lvstores | jq -r '.[].uuid'))
	for lvol_store in "${lvol_stores[@]}"; do
		lvol_bdevs=($($rpc_py bdev_lvol_get_lvols -u $lvol_store | jq -r '.[].uuid'))
		for lvol_bdev in "${lvol_bdevs[@]}"; do
			$rpc_py bdev_lvol_delete $lvol_bdev
			echo "lvol bdev $lvol_bdev removed"
		done

		$rpc_py bdev_lvol_delete_lvstore -u $lvol_store
		echo "lvol store $lvol_store removed"
	done
}

function cleanup_lvols() {
	start_spdk_tgt
	attach_bdevs
	cleanup_lvol_cfg
	stop_spdk_tgt
}

function create_lvols() {
	start_spdk_tgt
	attach_bdevs
	cleanup_lvol_cfg

	nvme_bdevs=($($rpc_py bdev_get_bdevs | jq -r '.[].name'))
	for nvme_bdev in "${nvme_bdevs[@]}"; do
		ls_guid=$($rpc_py bdev_lvol_create_lvstore $nvme_bdev lvs_0 --clear-method none)
		echo "Created LVOL Store $ls_guid on Bdev $nvme_bdev"

		free_mb=$(get_lvs_free_mb "$ls_guid")
		lb_name=$($rpc_py bdev_lvol_create -u $ls_guid lbd_0 $free_mb --clear-method none)
		LVOL_BDEVS+=("$lb_name")
		echo "Created LVOL Bdev $lb_name ($free_mb MB) on Lvol Store $ls_guid on Bdev $nvme_bdev"
	done

	stop_spdk_tgt
}
+23 −1
Original line number Diff line number Diff line
@@ -41,6 +41,7 @@ MAIN_CORE=""
TMP_BPF_FILE=$testdir/bpftraces.txt
PLUGIN="nvme"
DISKCFG=""
USE_LVOL_BDEVS=false
BDEV_CACHE=""
BDEV_POOL=""
DISKNO="ALL"
@@ -50,6 +51,7 @@ CPUFREQ=""
PERFTOP=false
DPDKMEM=false
BPFTRACES=()
LVOL_BDEVS=()
DATE="$(date +'%m_%d_%Y_%H%M%S')"

function usage() {
@@ -100,6 +102,8 @@ function usage() {
	echo "                            It consists a single column of PCI addresses. SPDK Bdev names will be assigned"
	echo "                            and Kernel block device names detected."
	echo "                            Lines starting with # are ignored as comments."
	echo "    --use-lvol-bdevs        Create Logical Volume Store and Bdev on top of each NVMe drive"
	echo "                            To be used only with spdk-*-bdev driver options."
	echo "    --bdev-io-cache-size    Set IO cache size for for SPDK bdev subsystem."
	echo "    --bdev-io-pool-size     Set IO pool size for for SPDK bdev subsystem."
	echo "    --max-disk=INT,ALL      Number of disks to test on, this will run multiple workloads with increasing number of disk each run."
@@ -156,6 +160,7 @@ while getopts 'h-:' optchar; do
						exit 1
					fi
					;;
				use-lvol-bdevs) USE_LVOL_BDEVS=true ;;
				bdev-io-cache-size=*) BDEV_CACHE="${OPTARG#*=}" ;;
				bdev-io-pool-size=*) BDEV_POOL="${OPTARG#*=}" ;;
				max-disk=*) DISKNO="${OPTARG#*=}" ;;
@@ -199,9 +204,17 @@ echo "num_of_disks,iops,avg_lat[usec],p90[usec],p99[usec],p99.99[usec],stdev[use

trap 'rm -f *.state $testdir/bdev.conf; kill $perf_pid; wait $dpdk_mem_pid; print_backtrace' ERR SIGTERM SIGABRT

if $USE_LVOL_BDEVS && ! [[ "$PLUGIN" =~ 'bdev' ]]; then
	echo 'ERROR: lvol bdevs are supported only with bdev plugin'
	exit 1
fi

if [[ "$PLUGIN" =~ "xnvme" ]]; then
	create_spdk_xnvme_bdev_conf "$BDEV_CACHE" "$BDEV_POOL"
elif [[ "$PLUGIN" =~ "bdev" ]]; then
	if $USE_LVOL_BDEVS; then
		create_lvols
	fi
	create_spdk_bdev_conf "$BDEV_CACHE" "$BDEV_POOL"
fi

@@ -330,8 +343,12 @@ for ((j = 0; j < REPEAT_NO; j++)); do
		max_lat_disks_usec=$(bc "$max_lat_disks_usec + $max_lat")

		cp $TMP_RESULT_FILE $result_dir/perf_results_${MIX}_${PLUGIN}_${NO_CORES}cpus_${DATE}_${k}_disks_${j}.output
	else
		if $USE_LVOL_BDEVS; then
			create_fio_config $DISKNO $PLUGIN "$LVOL_BDEVS" "$DISKS_NUMA" "$CORES"
		else
			create_fio_config $DISKNO $PLUGIN "$DISK_NAMES" "$DISKS_NUMA" "$CORES"
		fi

		if $LATENCY_LOG; then
			write_log_opt="--write_lat_log=$result_dir/perf_lat_${BLK_SIZE}BS_${IODEPTH}QD_${RW}_${MIX}MIX_${PLUGIN}_${DATE}_${k}disks_${j}"
@@ -420,4 +437,9 @@ if [[ $PLUGIN = "kernel-io-uring" || $PLUGIN =~ "xnvme" ]]; then
		cat $backup_dir/$disk/io_poll_delay > $sysfs/io_poll_delay
	done
fi

if $USE_LVOL_BDEVS; then
	cleanup_lvols
fi

rm -f $testdir/bdev.conf $testdir/config.fio