Commit 3a141b72 authored by Marcin Spiewak's avatar Marcin Spiewak Committed by Jim Harris
Browse files

bdevperf: send test results in RPC response



When bdevperf test is triggered with 'bdevperf.py perform_tests' RPC
command, the tool starts test jobs and when they are finished, the
test results are printed on the console. This patch adds reporting
of the test results in RPC response. The response consists of two
sections, an object indicating the number of cores on which the tests
were run, and a table with test results. When a bdevperf is started
on 8 cores with a command:
'sudo ./build/examples/bdevperf -m 0xFF -z'
an example response to the command starting tests:
'sudo PYTHONPATH=python examples/bdev/bdevperf/bdevperf.py \
perform_tests -q 16 -o 4096 -t 5 -w write'
would be:
{
  "results": [
    {
      "job": "Malloc0",
      "core_mask": "0x1",
      "workload": "write",
      "status": "finished",
      "queue_depth": 16,
      "io_size": 4096,
      "runtime": 5.000048,
      "iops": 1200455.675625514,
      "mibps": 4689.279982912164,
      "io_failed": 0,
      "io_timeout": 0,
      "avg_latency_us": 13.252784244696382,
      "min_latency_us": 11.130434782608695,
      "max_latency_us": 222.6086956521739
    },
    {
      "job": "Malloc1",
      "core_mask": "0x2",
      "workload": "write",
      "status": "finished",
      "queue_depth": 16,
      "io_size": 4096,
      "runtime": 5.000024,
      "iops": 1216432.5611237066,
      "mibps": 4751.689691889479,
      "io_failed": 0,
      "io_timeout": 0,
      "avg_latency_us": 13.082916284986958,
      "min_latency_us": 5.7043478260869565,
      "max_latency_us": 90.82434782608695
    }
  ],
  "core_count": 2
}

Majority of the information provided in RPC response is the same as
information printed on the console. If the test finishes successfully,
it is indicated by:
"status": "finished"
If the test fails, it is indicated by:
"status": "failed"
If the bdevperf was terminated (SIGINT, SIGTERM sent to the process
or by Ctrl-C in the console window), it will be indicated by:
"status": "terminated"
The "runtime" field carries information about test execution time in
seconds, latencies are presented in microseconds.
If the test fails, "runtime" indicates the time when the test failed.
"core_count" provides number of cores that were actually used during
the test.

Change-Id: I327db7a57bdc1033b6803813d54861a6c970a718
Signed-off-by: default avatarMarcin Spiewak <marcin.spiewak@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/23881


Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Reviewed-by: default avatarKonrad Sztyber <konrad.sztyber@intel.com>
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarJim Harris <jim.harris@samsung.com>
Community-CI: Mellanox Build Bot
Reviewed-by: default avatarKarol Latecki <karol.latecki@intel.com>
parent 72177d39
Loading
Loading
Loading
Loading
+70 −6
Original line number Diff line number Diff line
@@ -25,6 +25,7 @@
#define BDEVPERF_CONFIG_UNDEFINED -1
#define BDEVPERF_CONFIG_ERROR -2
#define PATTERN_TYPES_STR "(read, write, randread, randwrite, rw, randrw, verify, reset, unmap, flush, write_zeroes)"
#define BDEVPERF_MAX_COREMASK_STRING 64

struct bdevperf_task {
	struct iovec			iov;
@@ -425,6 +426,50 @@ performance_dump_job_stdout(struct bdevperf_job *job,
	       job_stats->max_latency);
}

static void
performance_dump_job_json(struct bdevperf_job *job,
			  struct spdk_json_write_ctx *w,
			  struct bdevperf_stats *job_stats)
{
	char core_mask_string[BDEVPERF_MAX_COREMASK_STRING] = {0};

	spdk_json_write_named_string(w, "job", job->name);
	snprintf(core_mask_string, BDEVPERF_MAX_COREMASK_STRING,
		 "0x%s", spdk_cpuset_fmt(spdk_thread_get_cpumask(job->thread)));
	spdk_json_write_named_string(w, "core_mask", core_mask_string);
	spdk_json_write_named_string(w, "workload", parse_workload_type(job->workload_type));

	if (job->workload_type == JOB_CONFIG_RW_RW || job->workload_type == JOB_CONFIG_RW_RANDRW) {
		spdk_json_write_named_uint32(w, "percentage", job->rw_percentage);
	}

	if (g_shutdown) {
		spdk_json_write_named_string(w, "status", "terminated");
	} else if (job->io_failed > 0 && !job->reset && !job->continue_on_failure) {
		spdk_json_write_named_string(w, "status", "failed");
	} else {
		spdk_json_write_named_string(w, "status", "finished");
	}

	if (job->verify) {
		spdk_json_write_named_object_begin(w, "verify_range");
		spdk_json_write_named_uint64(w, "start", job->ios_base);
		spdk_json_write_named_uint64(w, "length", job->size_in_ios);
		spdk_json_write_object_end(w);
	}

	spdk_json_write_named_uint32(w, "queue_depth", job->queue_depth);
	spdk_json_write_named_uint32(w, "io_size", job->io_size);
	spdk_json_write_named_double(w, "runtime", (double)job_stats->io_time_in_usec / SPDK_SEC_TO_USEC);
	spdk_json_write_named_double(w, "iops", job_stats->total_io_per_second);
	spdk_json_write_named_double(w, "mibps", job_stats->total_mb_per_second);
	spdk_json_write_named_uint64(w, "io_failed", job->io_failed);
	spdk_json_write_named_uint64(w, "io_timeout", job->io_timeout);
	spdk_json_write_named_double(w, "avg_latency_us", job_stats->average_latency);
	spdk_json_write_named_double(w, "min_latency_us", job_stats->min_latency);
	spdk_json_write_named_double(w, "max_latency_us", job_stats->max_latency);
}

static void
generate_data(struct bdevperf_job *job, void *buf, void *md_buf, bool unique)
{
@@ -632,7 +677,9 @@ bdevperf_test_done(void *ctx)
	double average_latency = 0.0;
	uint64_t time_in_usec;
	int rc;
	struct spdk_json_write_ctx *w = NULL;
	struct bdevperf_stats job_stats = {0};
	struct spdk_cpuset cpu_mask;

	if (g_time_in_usec) {
		g_stats.total.io_time_in_usec = g_time_in_usec;
@@ -652,18 +699,40 @@ bdevperf_test_done(void *ctx)
		printf("Received shutdown signal, test time was about %.6f seconds\n",
		       (double)g_time_in_usec / SPDK_SEC_TO_USEC);
	}
	/* Send RPC response if g_run_rc indicate success, or shutdown request was sent to bdevperf.
	 * rpc_perform_tests_cb will send error response in case of error.
	 */
	if ((g_run_rc == 0 || g_shutdown) && g_request) {
		w = spdk_jsonrpc_begin_result(g_request);
		spdk_json_write_object_begin(w);
		spdk_json_write_named_array_begin(w, "results");
	}

	printf("\n%*s\n", 107, "Latency(us)");
	printf("\r %-*s: %10s %10s %10s %10s %8s %10s %10s %10s\n",
	       28, "Device Information", "runtime(s)", "IOPS", "MiB/s", "Fail/s", "TO/s", "Average", "min", "max");


	spdk_cpuset_zero(&cpu_mask);
	TAILQ_FOREACH_SAFE(job, &g_bdevperf.jobs, link, jtmp) {
		spdk_cpuset_or(&cpu_mask, spdk_thread_get_cpumask(job->thread));
		memset(&job_stats, 0, sizeof(job_stats));
		bdevperf_job_get_stats(job, &job_stats, job->run_time_in_usec, 0);
		bdevperf_job_stats_accumulate(&g_stats.total, &job_stats);
		performance_dump_job_stdout(job, &job_stats);
		if (w) {
			spdk_json_write_object_begin(w);
			performance_dump_job_json(job, w, &job_stats);
			spdk_json_write_object_end(w);
		}
	}

	if (w) {
		spdk_json_write_array_end(w);
		spdk_json_write_named_uint32(w, "core_count", spdk_cpuset_count(&cpu_mask));
		spdk_json_write_object_end(w);
		spdk_jsonrpc_end_result(g_request, w);
	}
	printf("\r =================================================================================="
	       "=================================\n");
	printf("\r %-28s: %10s %10.2f %10.2f",
@@ -2523,16 +2592,11 @@ rpc_perform_tests_reset(void)
static void
rpc_perform_tests_cb(void)
{
	struct spdk_json_write_ctx *w;
	struct spdk_jsonrpc_request *request = g_request;

	g_request = NULL;

	if (g_run_rc == 0) {
		w = spdk_jsonrpc_begin_result(request);
		spdk_json_write_uint32(w, g_run_rc);
		spdk_jsonrpc_end_result(request, w);
	} else {
	if (g_run_rc) {
		spdk_jsonrpc_send_error_response_fmt(request, SPDK_JSONRPC_ERROR_INTERNAL_ERROR,
						     "bdevperf failed with error %s", spdk_strerror(-g_run_rc));
	}