Commit b367de3d authored by Shuhei Matsumoto's avatar Shuhei Matsumoto Committed by Tomasz Zawadzki
Browse files

histogram: Rename bucket_shift by granularity throughout



bucket shift basically defines the granularity of the histogram.
granularity will be better than bucket_shift to explain histogram
easily to users.

Change-Id: I368599cebdd155be6d2d9cfd944084e7c4964c98
Signed-off-by: default avatarShuhei Matsumoto <smatsumoto@nvidia.com>
Suggested-by: default avatarJim Harris <jim.harris@nvidia.com>
Reviewed-on: https://review.spdk.io/c/spdk/spdk/+/26000


Tested-by: default avatarSPDK Automated Test System <spdkbot@gmail.com>
Reviewed-by: default avatarJim Harris <jim.harris@nvidia.com>
parent 8f9167af
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -2856,7 +2856,7 @@ name | Required | string | Block device name
Name                    | Description
------------------------| -----------
histogram               | Base64 encoded histogram
bucket_shift            | Granularity of the histogram buckets
granularity             | Granularity of the histogram buckets
tsc_rate                | Ticks per second

#### Example
@@ -2884,7 +2884,7 @@ Note that histogram field is trimmed, actual encoded histogram length is ~80kb.
  "result": {
    "histogram": "AAAAAAAAAAAAAA...AAAAAAAAA==",
    "tsc_rate": 2300000000,
    "bucket_shift": 7
    "granularity": 7
  }
}
~~~
@@ -8254,7 +8254,7 @@ name | Required | string | Iscsi target node name
Name                    | Description
------------------------| -----------
histogram               | Base64 encoded histogram
bucket_shift            | Granularity of the histogram buckets
granularity             | Granularity of the histogram buckets
tsc_rate                | Ticks per second

#### Example
@@ -8282,7 +8282,7 @@ Note that histogram field is trimmed, actual encoded histogram length is ~80kb.
  "result": {
    "histogram": "AAAAAAAAAAAAAA...AAAAAAAAA==",
    "tsc_rate": 2300000000,
    "bucket_shift": 7
    "granularity": 7
  }
}
~~~
+15 −15
Original line number Diff line number Diff line
@@ -17,10 +17,10 @@
extern "C" {
#endif

#define SPDK_HISTOGRAM_BUCKET_SHIFT_DEFAULT	7
#define SPDK_HISTOGRAM_BUCKET_SHIFT(h)		h->bucket_shift
#define SPDK_HISTOGRAM_BUCKET_LSB(h)		(64 - SPDK_HISTOGRAM_BUCKET_SHIFT(h))
#define SPDK_HISTOGRAM_NUM_BUCKETS_PER_RANGE(h)	(1ULL << SPDK_HISTOGRAM_BUCKET_SHIFT(h))
#define SPDK_HISTOGRAM_GRANULARITY_DEFAULT	7
#define SPDK_HISTOGRAM_GRANULARITY(h)		h->granularity
#define SPDK_HISTOGRAM_BUCKET_LSB(h)		(64 - SPDK_HISTOGRAM_GRANULARITY(h))
#define SPDK_HISTOGRAM_NUM_BUCKETS_PER_RANGE(h)	(1ULL << SPDK_HISTOGRAM_GRANULARITY(h))
#define SPDK_HISTOGRAM_BUCKET_MASK(h)		(SPDK_HISTOGRAM_NUM_BUCKETS_PER_RANGE(h) - 1)
#define SPDK_HISTOGRAM_NUM_BUCKET_RANGES(h)	(SPDK_HISTOGRAM_BUCKET_LSB(h) + 1)
#define SPDK_HISTOGRAM_NUM_BUCKETS(h)		(SPDK_HISTOGRAM_NUM_BUCKETS_PER_RANGE(h) * \
@@ -52,13 +52,13 @@ extern "C" {
 * On a 2.3GHz processor, this strategy results in 50ns buckets in the 7-14us range (sweet
 * spot for Intel Optane SSD latency testing).
 *
 * Buckets can be made more granular by increasing SPDK_HISTOGRAM_BUCKET_SHIFT.  This
 * Buckets can be made more granular by increasing SPDK_HISTOGRAM_GRANULARITY.  This
 * comes at the cost of additional storage per namespace context to store the bucket data.
 */

struct spdk_histogram_data {

	uint32_t	bucket_shift;
	uint32_t	granularity;
	uint64_t	*bucket;

};
@@ -68,20 +68,20 @@ __spdk_histogram_increment(struct spdk_histogram_data *h, uint32_t range, uint32
{
	uint64_t *count;

	count = &h->bucket[(range << SPDK_HISTOGRAM_BUCKET_SHIFT(h)) + index];
	count = &h->bucket[(range << SPDK_HISTOGRAM_GRANULARITY(h)) + index];
	(*count)++;
}

static inline uint64_t
__spdk_histogram_get_count(const struct spdk_histogram_data *h, uint32_t range, uint32_t index)
{
	return h->bucket[(range << SPDK_HISTOGRAM_BUCKET_SHIFT(h)) + index];
	return h->bucket[(range << SPDK_HISTOGRAM_GRANULARITY(h)) + index];
}

static inline uint64_t *
__spdk_histogram_get_bucket(const struct spdk_histogram_data *h, uint32_t range, uint32_t index)
{
	return &h->bucket[(range << SPDK_HISTOGRAM_BUCKET_SHIFT(h)) + index];
	return &h->bucket[(range << SPDK_HISTOGRAM_GRANULARITY(h)) + index];
}

static inline void
@@ -138,7 +138,7 @@ __spdk_histogram_data_get_bucket_start(const struct spdk_histogram_data *h, uint

	index += 1;
	if (range > 0) {
		bucket = 1ULL << (range + SPDK_HISTOGRAM_BUCKET_SHIFT(h) - 1);
		bucket = 1ULL << (range + SPDK_HISTOGRAM_GRANULARITY(h) - 1);
		bucket += (uint64_t)index << (range - 1);
	} else {
		bucket = index;
@@ -185,11 +185,11 @@ spdk_histogram_data_merge(const struct spdk_histogram_data *dst,
{
	uint64_t i;

	/* Histograms with different bucket_shift values cannot be simply
	/* Histograms with different granularity values cannot be simply
	 * merged, because the buckets represent different ranges of
	 * values.
	 */
	if (dst->bucket_shift != src->bucket_shift) {
	if (dst->granularity != src->granularity) {
		return -EINVAL;
	}

@@ -201,7 +201,7 @@ spdk_histogram_data_merge(const struct spdk_histogram_data *dst,
}

static inline struct spdk_histogram_data *
spdk_histogram_data_alloc_sized(uint32_t bucket_shift)
spdk_histogram_data_alloc_sized(uint32_t granularity)
{
	struct spdk_histogram_data *h;

@@ -210,7 +210,7 @@ spdk_histogram_data_alloc_sized(uint32_t bucket_shift)
		return NULL;
	}

	h->bucket_shift = bucket_shift;
	h->granularity = granularity;
	h->bucket = (uint64_t *)calloc(SPDK_HISTOGRAM_NUM_BUCKETS(h), sizeof(uint64_t));
	if (h->bucket == NULL) {
		free(h);
@@ -223,7 +223,7 @@ spdk_histogram_data_alloc_sized(uint32_t bucket_shift)
static inline struct spdk_histogram_data *
spdk_histogram_data_alloc(void)
{
	return spdk_histogram_data_alloc_sized(SPDK_HISTOGRAM_BUCKET_SHIFT_DEFAULT);
	return spdk_histogram_data_alloc_sized(SPDK_HISTOGRAM_GRANULARITY_DEFAULT);
}

static inline void
+1 −1
Original line number Diff line number Diff line
@@ -1119,7 +1119,7 @@ _rpc_bdev_histogram_data_cb(void *cb_arg, int status, struct spdk_histogram_data
	w = spdk_jsonrpc_begin_result(request);
	spdk_json_write_object_begin(w);
	spdk_json_write_named_string(w, "histogram", encoded_histogram);
	spdk_json_write_named_int64(w, "bucket_shift", histogram->bucket_shift);
	spdk_json_write_named_int64(w, "granularity", histogram->granularity);
	spdk_json_write_named_int64(w, "tsc_rate", spdk_get_ticks_hz());
	spdk_json_write_object_end(w);
	spdk_jsonrpc_end_result(request, w);
+1 −1
Original line number Diff line number Diff line
@@ -1987,7 +1987,7 @@ rpc_iscsi_get_histogram(struct spdk_jsonrpc_request *request,

	spdk_json_write_object_begin(w);
	spdk_json_write_named_string(w, "histogram", encoded_histogram);
	spdk_json_write_named_int64(w, "bucket_shift", target->histogram->bucket_shift);
	spdk_json_write_named_int64(w, "granularity", target->histogram->granularity);
	spdk_json_write_named_int64(w, "tsc_rate", spdk_get_ticks_hz());

	spdk_json_write_object_end(w);
+8 −8
Original line number Diff line number Diff line
@@ -12,7 +12,7 @@ import struct
buf = sys.stdin.readlines()
json = json.loads(" ".join(buf))
histogram = base64.b64decode(json["histogram"])
bucket_shift = json["bucket_shift"]
granularity = json["granularity"]
tsc_rate = json["tsc_rate"]

print("Latency histogram")
@@ -23,20 +23,20 @@ so_far = 0
bucket = 0
total = 1

for i in range(0, 64 - bucket_shift):
    for j in range(0, (1 << bucket_shift)):
        index = (((i << bucket_shift) + j) * 8)
for i in range(0, 64 - granularity):
    for j in range(0, (1 << granularity)):
        index = (((i << granularity) + j) * 8)
        total += int.from_bytes(histogram[index:index + 8], 'little')

for i in range(0, 64 - bucket_shift):
    for j in range(0, (1 << bucket_shift)):
        index = (((i << bucket_shift) + j)*8)
for i in range(0, 64 - granularity):
    for j in range(0, (1 << granularity)):
        index = (((i << granularity) + j)*8)
        count = int.from_bytes(histogram[index:index + 8], 'little')
        so_far += count
        last_bucket = bucket

        if i > 0:
            bucket = (1 << (i + bucket_shift - 1))
            bucket = (1 << (i + granularity - 1))
            bucket += ((j+1) << (i - 1))
        else:
            bucket = j+1
Loading