Commit 6881fb40 authored by Yalong Wang's avatar Yalong Wang Committed by Konrad Sztyber
Browse files

lib/reduce: speed up _check_overlap by using rbtree.



Change-Id: Ib7b14b9e1c46d2e1c45cf13a7036ceefe190efbb
Signed-off-by: default avatarYalong Wang <yalong9@staff.sina.com.cn>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/24944


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Mellanox Build Bot
Reviewed-by: default avatarShuhei Matsumoto <smatsumoto@nvidia.com>
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Reviewed-by: default avatarGangCao <gang.cao@intel.com>
Reviewed-by: default avatarJim Harris <jim.harris@samsung.com>
parent 4d0ed159
Loading
Loading
Loading
Loading
+20 −13
Original line number Diff line number Diff line
@@ -15,6 +15,7 @@
#include "spdk/util.h"
#include "spdk/log.h"
#include "spdk/memory.h"
#include "spdk/tree.h"

#include "libpmem.h"

@@ -112,6 +113,7 @@ struct spdk_reduce_vol_request {
	spdk_reduce_vol_op_complete		cb_fn;
	void					*cb_arg;
	TAILQ_ENTRY(spdk_reduce_vol_request)	tailq;
	RB_ENTRY(spdk_reduce_vol_request)	rbnode;
	struct spdk_reduce_vol_cb_args		backing_cb_args;
};

@@ -140,7 +142,7 @@ struct spdk_reduce_vol {

	struct spdk_reduce_vol_request		*request_mem;
	TAILQ_HEAD(, spdk_reduce_vol_request)	free_requests;
	TAILQ_HEAD(, spdk_reduce_vol_request)	executing_requests;
	RB_HEAD(executing_req_tree, spdk_reduce_vol_request) executing_requests;
	TAILQ_HEAD(, spdk_reduce_vol_request)	queued_requests;

	/* Single contiguous buffer used for all request buffers for this volume. */
@@ -583,6 +585,15 @@ _allocate_bit_arrays(struct spdk_reduce_vol *vol)
	return 0;
}

static int
overlap_cmp(struct spdk_reduce_vol_request *req1, struct spdk_reduce_vol_request *req2)
{
	return (req1->logical_map_index < req2->logical_map_index ? -1 : req1->logical_map_index >
		req2->logical_map_index);
}
RB_GENERATE_STATIC(executing_req_tree, spdk_reduce_vol_request, rbnode, overlap_cmp);


void
spdk_reduce_vol_init(struct spdk_reduce_vol_params *params,
		     struct spdk_reduce_backing_dev *backing_dev,
@@ -641,7 +652,7 @@ spdk_reduce_vol_init(struct spdk_reduce_vol_params *params,
	}

	TAILQ_INIT(&vol->free_requests);
	TAILQ_INIT(&vol->executing_requests);
	RB_INIT(&vol->executing_requests);
	TAILQ_INIT(&vol->queued_requests);
	queue_init(&vol->free_chunks_queue);
	queue_init(&vol->free_backing_blocks_queue);
@@ -885,7 +896,7 @@ spdk_reduce_vol_load(struct spdk_reduce_backing_dev *backing_dev,
	}

	TAILQ_INIT(&vol->free_requests);
	TAILQ_INIT(&vol->executing_requests);
	RB_INIT(&vol->executing_requests);
	TAILQ_INIT(&vol->queued_requests);
	queue_init(&vol->free_chunks_queue);
	queue_init(&vol->free_backing_blocks_queue);
@@ -1095,7 +1106,7 @@ _reduce_vol_complete_req(struct spdk_reduce_vol_request *req, int reduce_errno)
	struct spdk_reduce_vol *vol = req->vol;

	req->cb_fn(req->cb_arg, reduce_errno);
	TAILQ_REMOVE(&vol->executing_requests, req, tailq);
	RB_REMOVE(executing_req_tree, &vol->executing_requests, req);

	TAILQ_FOREACH(next_req, &vol->queued_requests, tailq) {
		if (next_req->logical_map_index == req->logical_map_index) {
@@ -1778,21 +1789,17 @@ _iov_array_is_valid(struct spdk_reduce_vol *vol, struct iovec *iov, int iovcnt,
static bool
_check_overlap(struct spdk_reduce_vol *vol, uint64_t logical_map_index)
{
	struct spdk_reduce_vol_request *req;
	struct spdk_reduce_vol_request req;

	TAILQ_FOREACH(req, &vol->executing_requests, tailq) {
		if (logical_map_index == req->logical_map_index) {
			return true;
		}
	}
	req.logical_map_index = logical_map_index;

	return false;
	return (NULL != RB_FIND(executing_req_tree, &vol->executing_requests, &req));
}

static void
_start_readv_request(struct spdk_reduce_vol_request *req)
{
	TAILQ_INSERT_TAIL(&req->vol->executing_requests, req, tailq);
	RB_INSERT(executing_req_tree, &req->vol->executing_requests, req);
	_reduce_vol_read_chunk(req, _read_read_done);
}

@@ -1867,7 +1874,7 @@ _start_writev_request(struct spdk_reduce_vol_request *req)
{
	struct spdk_reduce_vol *vol = req->vol;

	TAILQ_INSERT_TAIL(&req->vol->executing_requests, req, tailq);
	RB_INSERT(executing_req_tree, &req->vol->executing_requests, req);
	if (vol->pm_logical_map[req->logical_map_index] != REDUCE_EMPTY_MAP_ENTRY) {
		if ((req->length * vol->params.logical_block_size) < vol->params.chunk_size) {
			/* Read old chunk, then overwrite with data from this write
+15 −15
Original line number Diff line number Diff line
@@ -1599,7 +1599,7 @@ test_reduce_decompress_chunk(void)
	backing_dev.decompress = dummy_backing_dev_decompress;
	vol.backing_dev = &backing_dev;
	vol.logical_blocks_per_chunk = vol.params.chunk_size / vol.params.logical_block_size;
	TAILQ_INIT(&vol.executing_requests);
	RB_INIT(&vol.executing_requests);
	TAILQ_INIT(&vol.queued_requests);
	TAILQ_INIT(&vol.free_requests);

@@ -1627,7 +1627,7 @@ test_reduce_decompress_chunk(void)
		req.iov[i].iov_len = user_buffer_iov_len;
		memset(req.iov[i].iov_base, 0, req.iov[i].iov_len);
	}
	TAILQ_INSERT_HEAD(&vol.executing_requests, &req, tailq);
	RB_INSERT(executing_req_tree, &vol.executing_requests, &req);
	g_reduce_errno = -1;
	g_decompressed_len = vol.params.chunk_size;

@@ -1639,11 +1639,11 @@ test_reduce_decompress_chunk(void)
		CU_ASSERT(req.decomp_iov[i].iov_base == req.iov[i].iov_base);
		CU_ASSERT(req.decomp_iov[i].iov_len == req.iov[i].iov_len);
	}
	CU_ASSERT(TAILQ_EMPTY(&vol.executing_requests));
	CU_ASSERT(RB_EMPTY(&vol.executing_requests));
	CU_ASSERT(TAILQ_FIRST(&vol.free_requests) == &req);

	/* Test 2 - user's buffer less than chunk_size, without offset */
	TAILQ_INSERT_HEAD(&vol.executing_requests, &req, tailq);
	RB_INSERT(executing_req_tree, &vol.executing_requests, &req);
	g_reduce_errno = -1;
	user_buffer_iov_len = 4096;
	for (i = 0; i < 2; i++) {
@@ -1663,14 +1663,14 @@ test_reduce_decompress_chunk(void)
	}
	CU_ASSERT(req.decomp_iov[i].iov_base == req.decomp_buf + user_buffer_iov_len * 2);
	CU_ASSERT(req.decomp_iov[i].iov_len == remainder_bytes);
	CU_ASSERT(TAILQ_EMPTY(&vol.executing_requests));
	CU_ASSERT(RB_EMPTY(&vol.executing_requests));
	CU_ASSERT(TAILQ_FIRST(&vol.free_requests) == &req);

	/* Test 3 - user's buffer less than chunk_size, non zero offset */
	req.offset = 3;
	offset_bytes = req.offset * vol.params.logical_block_size;
	remainder_bytes = vol.params.chunk_size - offset_bytes - user_buffer_iov_len * 2;
	TAILQ_INSERT_HEAD(&vol.executing_requests, &req, tailq);
	RB_INSERT(executing_req_tree, &vol.executing_requests, &req);
	g_reduce_errno = -1;

	_reduce_vol_decompress_chunk(&req, _read_decompress_done);
@@ -1685,7 +1685,7 @@ test_reduce_decompress_chunk(void)
	}
	CU_ASSERT(req.decomp_iov[3].iov_base == req.decomp_buf + offset_bytes + user_buffer_iov_len * 2);
	CU_ASSERT(req.decomp_iov[3].iov_len == remainder_bytes);
	CU_ASSERT(TAILQ_EMPTY(&vol.executing_requests));
	CU_ASSERT(RB_EMPTY(&vol.executing_requests));
	CU_ASSERT(TAILQ_FIRST(&vol.free_requests) == &req);

	/* Part 2 - backing dev doesn't support sgl_out */
@@ -1701,7 +1701,7 @@ test_reduce_decompress_chunk(void)
		req.iov[i].iov_len = user_buffer_iov_len;
		memset(req.iov[i].iov_base, 0xb + i, req.iov[i].iov_len);
	}
	TAILQ_INSERT_HEAD(&vol.executing_requests, &req, tailq);
	RB_INSERT(executing_req_tree, &vol.executing_requests, &req);
	g_reduce_errno = -1;

	_reduce_vol_decompress_chunk(&req, _read_decompress_done);
@@ -1713,7 +1713,7 @@ test_reduce_decompress_chunk(void)
	CU_ASSERT(memcmp(req.iov[0].iov_base, req.decomp_iov[0].iov_base, req.iov[0].iov_len) == 0);
	CU_ASSERT(memcmp(req.iov[1].iov_base, req.decomp_iov[0].iov_base + req.iov[0].iov_len,
			 req.iov[1].iov_len) == 0);
	CU_ASSERT(TAILQ_EMPTY(&vol.executing_requests));
	CU_ASSERT(RB_EMPTY(&vol.executing_requests));
	CU_ASSERT(TAILQ_FIRST(&vol.free_requests) == &req);

	/* Test 2 - single user's buffer length equals to chunk_size, buffer is not aligned
@@ -1723,7 +1723,7 @@ test_reduce_decompress_chunk(void)
	req.iov[0].iov_len = vol.params.chunk_size;
	req.iovcnt = 1;
	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
	TAILQ_INSERT_HEAD(&vol.executing_requests, &req, tailq);
	RB_INSERT(executing_req_tree, &vol.executing_requests, &req);
	g_reduce_errno = -1;

	_reduce_vol_decompress_chunk(&req, _read_decompress_done);
@@ -1741,7 +1741,7 @@ test_reduce_decompress_chunk(void)
	req.iov[0].iov_len = vol.params.chunk_size;
	req.iovcnt = 1;
	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
	TAILQ_INSERT_HEAD(&vol.executing_requests, &req, tailq);
	RB_INSERT(executing_req_tree, &vol.executing_requests, &req);
	g_reduce_errno = -1;

	_reduce_vol_decompress_chunk(&req, _read_decompress_done);
@@ -1763,7 +1763,7 @@ test_reduce_decompress_chunk(void)
	}

	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
	TAILQ_INSERT_HEAD(&vol.executing_requests, &req, tailq);
	RB_INSERT(executing_req_tree, &vol.executing_requests, &req);
	g_reduce_errno = -1;

	_reduce_vol_decompress_chunk(&req, _read_decompress_done);
@@ -1776,7 +1776,7 @@ test_reduce_decompress_chunk(void)
			 req.iov[0].iov_len) == 0);
	CU_ASSERT(memcmp(req.iov[1].iov_base, req.decomp_iov[0].iov_base + req.iov[0].iov_len,
			 req.iov[1].iov_len) == 0);
	CU_ASSERT(TAILQ_EMPTY(&vol.executing_requests));
	CU_ASSERT(RB_EMPTY(&vol.executing_requests));
	CU_ASSERT(TAILQ_FIRST(&vol.free_requests) == &req);

	/* Test 5 - user's buffer less than chunk_size, non zero offset
@@ -1792,7 +1792,7 @@ test_reduce_decompress_chunk(void)
	}

	memset(req.decomp_buf, 0xa, vol.params.chunk_size);
	TAILQ_INSERT_HEAD(&vol.executing_requests, &req, tailq);
	RB_INSERT(executing_req_tree, &vol.executing_requests, &req);
	g_reduce_errno = -1;

	_prepare_compress_chunk(&req, false);
@@ -1807,7 +1807,7 @@ test_reduce_decompress_chunk(void)
	CU_ASSERT(memcmp(req.decomp_iov[0].iov_base + offset_bytes + req.iov[0].iov_len,
			 req.iov[1].iov_base,
			 req.iov[1].iov_len) == 0);
	CU_ASSERT(TAILQ_EMPTY(&vol.executing_requests));
	CU_ASSERT(RB_EMPTY(&vol.executing_requests));
	CU_ASSERT(TAILQ_FIRST(&vol.free_requests) == &req);

	free(buf);