Commit 466f9e8b authored by Konrad Sztyber's avatar Konrad Sztyber Committed by Tomasz Zawadzki
Browse files

sock: extract prepping iovs for a single req to a function



It'll make it possible to reuse this code for asynchronous read
requests.

Signed-off-by: default avatarKonrad Sztyber <konrad.sztyber@intel.com>
Change-Id: I2c2c44feee0821c972aa2a43d8a8ec81f3ce4ef4
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/12591


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-by: default avatarShuhei Matsumoto <smatsumoto@nvidia.com>
parent 6d3506d8
Loading
Loading
Loading
Loading
+37 −22
Original line number Diff line number Diff line
@@ -231,28 +231,15 @@ spdk_sock_abort_requests(struct spdk_sock *sock)
}

static inline int
spdk_sock_prep_reqs(struct spdk_sock *_sock, struct iovec *iovs, int index,
		    struct spdk_sock_request **last_req, int *flags)
spdk_sock_prep_req(struct spdk_sock_request *req, struct iovec *iovs, int index,
		   uint64_t *num_bytes)
{
	int iovcnt, i;
	struct spdk_sock_request *req;
	unsigned int offset;
	uint64_t total = 0;

	/* Gather an iov */
	iovcnt = index;
	if (spdk_unlikely(iovcnt >= IOV_BATCH_SIZE)) {
		goto end;
	}

	if (last_req != NULL && *last_req != NULL) {
		req = TAILQ_NEXT(*last_req, internal.link);
	} else {
		req = TAILQ_FIRST(&_sock->queued_reqs);
	}
	int iovcnt, i;

	while (req) {
	assert(index < IOV_BATCH_SIZE);
	offset = req->internal.offset;
	iovcnt = index;

	for (i = 0; i < req->iovcnt; i++) {
		/* Consume any offset first */
@@ -263,8 +250,10 @@ spdk_sock_prep_reqs(struct spdk_sock *_sock, struct iovec *iovs, int index,

		iovs[iovcnt].iov_base = SPDK_SOCK_REQUEST_IOV(req, i)->iov_base + offset;
		iovs[iovcnt].iov_len = SPDK_SOCK_REQUEST_IOV(req, i)->iov_len - offset;
		if (num_bytes != NULL) {
			*num_bytes += iovs[iovcnt].iov_len;
		}

			total += iovs[iovcnt].iov_len;
		iovcnt++;
		offset = 0;

@@ -272,6 +261,32 @@ spdk_sock_prep_reqs(struct spdk_sock *_sock, struct iovec *iovs, int index,
			break;
		}
	}

	return iovcnt;
}

static inline int
spdk_sock_prep_reqs(struct spdk_sock *_sock, struct iovec *iovs, int index,
		    struct spdk_sock_request **last_req, int *flags)
{
	int iovcnt;
	struct spdk_sock_request *req;
	uint64_t total = 0;

	/* Gather an iov */
	iovcnt = index;
	if (spdk_unlikely(iovcnt >= IOV_BATCH_SIZE)) {
		goto end;
	}

	if (last_req != NULL && *last_req != NULL) {
		req = TAILQ_NEXT(*last_req, internal.link);
	} else {
		req = TAILQ_FIRST(&_sock->queued_reqs);
	}

	while (req) {
		iovcnt = spdk_sock_prep_req(req, iovs, iovcnt, &total);
		if (iovcnt >= IOV_BATCH_SIZE) {
			break;
		}