Commit ffdb4541 authored by Tomasz Zawadzki's avatar Tomasz Zawadzki
Browse files

lib/vhost: move virtio related functions to rte_vhost



At this time only rte_vhost_user makes use of the virtio related
functionality. Which is used by vhost_scsi and will be used by
vhost_user_blk virtio transport.

Signed-off-by: default avatarTomasz Zawadzki <tomasz.zawadzki@intel.com>
Change-Id: Ia159e14ce5f9a74185da9898713deeff76d14f1a
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/11100


Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-by: default avatarChangpeng Liu <changpeng.liu@intel.com>
parent 03b75034
Loading
Loading
Loading
Loading
+678 −0
Original line number Diff line number Diff line
@@ -46,6 +46,8 @@

#include "spdk_internal/vhost_user.h"

bool g_packed_ring_recovery = false;

/* Path to folder where character device will be created. Can be set by user. */
static char g_vhost_user_dev_dirname[PATH_MAX] = "";

@@ -96,6 +98,682 @@ _vhost_user_sem_destroy(void)
	sem_destroy(&g_dpdk_sem);
}

void *vhost_gpa_to_vva(struct spdk_vhost_session *vsession, uint64_t addr, uint64_t len)
{
	void *vva;
	uint64_t newlen;

	newlen = len;
	vva = (void *)rte_vhost_va_from_guest_pa(vsession->mem, addr, &newlen);
	if (newlen != len) {
		return NULL;
	}

	return vva;

}

static void
vhost_log_req_desc(struct spdk_vhost_session *vsession, struct spdk_vhost_virtqueue *virtqueue,
		   uint16_t req_id)
{
	struct vring_desc *desc, *desc_table;
	uint32_t desc_table_size;
	int rc;

	if (spdk_likely(!vhost_dev_has_feature(vsession, VHOST_F_LOG_ALL))) {
		return;
	}

	rc = vhost_vq_get_desc(vsession, virtqueue, req_id, &desc, &desc_table, &desc_table_size);
	if (spdk_unlikely(rc != 0)) {
		SPDK_ERRLOG("Can't log used ring descriptors!\n");
		return;
	}

	do {
		if (vhost_vring_desc_is_wr(desc)) {
			/* To be honest, only pages realy touched should be logged, but
			 * doing so would require tracking those changes in each backed.
			 * Also backend most likely will touch all/most of those pages so
			 * for lets assume we touched all pages passed to as writeable buffers. */
			rte_vhost_log_write(vsession->vid, desc->addr, desc->len);
		}
		vhost_vring_desc_get_next(&desc, desc_table, desc_table_size);
	} while (desc);
}

static void
vhost_log_used_vring_elem(struct spdk_vhost_session *vsession,
			  struct spdk_vhost_virtqueue *virtqueue,
			  uint16_t idx)
{
	uint64_t offset, len;

	if (spdk_likely(!vhost_dev_has_feature(vsession, VHOST_F_LOG_ALL))) {
		return;
	}

	if (spdk_unlikely(virtqueue->packed.packed_ring)) {
		offset = idx * sizeof(struct vring_packed_desc);
		len = sizeof(struct vring_packed_desc);
	} else {
		offset = offsetof(struct vring_used, ring[idx]);
		len = sizeof(virtqueue->vring.used->ring[idx]);
	}

	rte_vhost_log_used_vring(vsession->vid, virtqueue->vring_idx, offset, len);
}

static void
vhost_log_used_vring_idx(struct spdk_vhost_session *vsession,
			 struct spdk_vhost_virtqueue *virtqueue)
{
	uint64_t offset, len;
	uint16_t vq_idx;

	if (spdk_likely(!vhost_dev_has_feature(vsession, VHOST_F_LOG_ALL))) {
		return;
	}

	offset = offsetof(struct vring_used, idx);
	len = sizeof(virtqueue->vring.used->idx);
	vq_idx = virtqueue - vsession->virtqueue;

	rte_vhost_log_used_vring(vsession->vid, vq_idx, offset, len);
}

/*
 * Get available requests from avail ring.
 */
uint16_t
vhost_vq_avail_ring_get(struct spdk_vhost_virtqueue *virtqueue, uint16_t *reqs,
			uint16_t reqs_len)
{
	struct rte_vhost_vring *vring = &virtqueue->vring;
	struct vring_avail *avail = vring->avail;
	uint16_t size_mask = vring->size - 1;
	uint16_t last_idx = virtqueue->last_avail_idx, avail_idx = avail->idx;
	uint16_t count, i;
	int rc;
	uint64_t u64_value;

	spdk_smp_rmb();

	if (virtqueue->vsession && spdk_unlikely(virtqueue->vsession->interrupt_mode)) {
		/* Read to clear vring's kickfd */
		rc = read(vring->kickfd, &u64_value, sizeof(u64_value));
		if (rc < 0) {
			SPDK_ERRLOG("failed to acknowledge kickfd: %s.\n", spdk_strerror(errno));
			return -errno;
		}
	}

	count = avail_idx - last_idx;
	if (spdk_likely(count == 0)) {
		return 0;
	}

	if (spdk_unlikely(count > vring->size)) {
		/* TODO: the queue is unrecoverably broken and should be marked so.
		 * For now we will fail silently and report there are no new avail entries.
		 */
		return 0;
	}

	count = spdk_min(count, reqs_len);

	virtqueue->last_avail_idx += count;
	/* Check whether there are unprocessed reqs in vq, then kick vq manually */
	if (virtqueue->vsession && spdk_unlikely(virtqueue->vsession->interrupt_mode)) {
		/* If avail_idx is larger than virtqueue's last_avail_idx, then there is unprocessed reqs.
		 * avail_idx should get updated here from memory, in case of race condition with guest.
		 */
		avail_idx = * (volatile uint16_t *) &avail->idx;
		if (avail_idx > virtqueue->last_avail_idx) {
			/* Write to notify vring's kickfd */
			rc = write(vring->kickfd, &u64_value, sizeof(u64_value));
			if (rc < 0) {
				SPDK_ERRLOG("failed to kick vring: %s.\n", spdk_strerror(errno));
				return -errno;
			}
		}
	}

	for (i = 0; i < count; i++) {
		reqs[i] = vring->avail->ring[(last_idx + i) & size_mask];
	}

	SPDK_DEBUGLOG(vhost_ring,
		      "AVAIL: last_idx=%"PRIu16" avail_idx=%"PRIu16" count=%"PRIu16"\n",
		      last_idx, avail_idx, count);

	return count;
}

static bool
vhost_vring_desc_is_indirect(struct vring_desc *cur_desc)
{
	return !!(cur_desc->flags & VRING_DESC_F_INDIRECT);
}

static bool
vhost_vring_packed_desc_is_indirect(struct vring_packed_desc *cur_desc)
{
	return (cur_desc->flags & VRING_DESC_F_INDIRECT) != 0;
}

static bool
vhost_inflight_packed_desc_is_indirect(spdk_vhost_inflight_desc *cur_desc)
{
	return (cur_desc->flags & VRING_DESC_F_INDIRECT) != 0;
}

int
vhost_vq_get_desc(struct spdk_vhost_session *vsession, struct spdk_vhost_virtqueue *virtqueue,
		  uint16_t req_idx, struct vring_desc **desc, struct vring_desc **desc_table,
		  uint32_t *desc_table_size)
{
	if (spdk_unlikely(req_idx >= virtqueue->vring.size)) {
		return -1;
	}

	*desc = &virtqueue->vring.desc[req_idx];

	if (vhost_vring_desc_is_indirect(*desc)) {
		*desc_table_size = (*desc)->len / sizeof(**desc);
		*desc_table = vhost_gpa_to_vva(vsession, (*desc)->addr,
					       sizeof(**desc) * *desc_table_size);
		*desc = *desc_table;
		if (*desc == NULL) {
			return -1;
		}

		return 0;
	}

	*desc_table = virtqueue->vring.desc;
	*desc_table_size = virtqueue->vring.size;

	return 0;
}

static bool
vhost_packed_desc_indirect_to_desc_table(struct spdk_vhost_session *vsession,
		uint64_t addr, uint32_t len,
		struct vring_packed_desc **desc_table,
		uint32_t *desc_table_size)
{
	*desc_table_size = len / sizeof(struct vring_packed_desc);

	*desc_table = vhost_gpa_to_vva(vsession, addr, len);
	if (spdk_unlikely(*desc_table == NULL)) {
		return false;
	}

	return true;
}

int
vhost_vq_get_desc_packed(struct spdk_vhost_session *vsession,
			 struct spdk_vhost_virtqueue *virtqueue,
			 uint16_t req_idx, struct vring_packed_desc **desc,
			 struct vring_packed_desc **desc_table, uint32_t *desc_table_size)
{
	*desc =  &virtqueue->vring.desc_packed[req_idx];

	/* In packed ring when the desc is non-indirect we get next desc
	 * by judging (desc->flag & VRING_DESC_F_NEXT) != 0. When the desc
	 * is indirect we get next desc by idx and desc_table_size. It's
	 * different from split ring.
	 */
	if (vhost_vring_packed_desc_is_indirect(*desc)) {
		if (!vhost_packed_desc_indirect_to_desc_table(vsession, (*desc)->addr, (*desc)->len,
				desc_table, desc_table_size)) {
			return -1;
		}

		*desc = *desc_table;
	} else {
		*desc_table = NULL;
		*desc_table_size  = 0;
	}

	return 0;
}

int
vhost_inflight_queue_get_desc(struct spdk_vhost_session *vsession,
			      spdk_vhost_inflight_desc *desc_array,
			      uint16_t req_idx, spdk_vhost_inflight_desc **desc,
			      struct vring_packed_desc  **desc_table, uint32_t *desc_table_size)
{
	*desc = &desc_array[req_idx];

	if (vhost_inflight_packed_desc_is_indirect(*desc)) {
		if (!vhost_packed_desc_indirect_to_desc_table(vsession, (*desc)->addr, (*desc)->len,
				desc_table, desc_table_size)) {
			return -1;
		}

		/* This desc is the inflight desc not the packed desc.
		 * When set the F_INDIRECT the table entry should be the packed desc
		 * so set the inflight desc NULL.
		 */
		*desc = NULL;
	} else {
		/* When not set the F_INDIRECT means there is no packed desc table */
		*desc_table = NULL;
		*desc_table_size = 0;
	}

	return 0;
}

int
vhost_vq_used_signal(struct spdk_vhost_session *vsession,
		     struct spdk_vhost_virtqueue *virtqueue)
{
	if (virtqueue->used_req_cnt == 0) {
		return 0;
	}

	virtqueue->req_cnt += virtqueue->used_req_cnt;
	virtqueue->used_req_cnt = 0;

	SPDK_DEBUGLOG(vhost_ring,
		      "Queue %td - USED RING: sending IRQ: last used %"PRIu16"\n",
		      virtqueue - vsession->virtqueue, virtqueue->last_used_idx);

	if (rte_vhost_vring_call(vsession->vid, virtqueue->vring_idx) == 0) {
		/* interrupt signalled */
		return 1;
	} else {
		/* interrupt not signalled */
		return 0;
	}
}

static void
session_vq_io_stats_update(struct spdk_vhost_session *vsession,
			   struct spdk_vhost_virtqueue *virtqueue, uint64_t now)
{
	uint32_t irq_delay_base = vsession->coalescing_delay_time_base;
	uint32_t io_threshold = vsession->coalescing_io_rate_threshold;
	int32_t irq_delay;
	uint32_t req_cnt;

	req_cnt = virtqueue->req_cnt + virtqueue->used_req_cnt;
	if (req_cnt <= io_threshold) {
		return;
	}

	irq_delay = (irq_delay_base * (req_cnt - io_threshold)) / io_threshold;
	virtqueue->irq_delay_time = (uint32_t) spdk_max(0, irq_delay);

	virtqueue->req_cnt = 0;
	virtqueue->next_event_time = now;
}

static void
check_session_vq_io_stats(struct spdk_vhost_session *vsession,
			  struct spdk_vhost_virtqueue *virtqueue, uint64_t now)
{
	if (now < vsession->next_stats_check_time) {
		return;
	}

	vsession->next_stats_check_time = now + vsession->stats_check_interval;
	session_vq_io_stats_update(vsession, virtqueue, now);
}

static inline bool
vhost_vq_event_is_suppressed(struct spdk_vhost_virtqueue *vq)
{
	if (spdk_unlikely(vq->packed.packed_ring)) {
		if (vq->vring.driver_event->flags & VRING_PACKED_EVENT_FLAG_DISABLE) {
			return true;
		}
	} else {
		if (vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) {
			return true;
		}
	}

	return false;
}

void
vhost_session_vq_used_signal(struct spdk_vhost_virtqueue *virtqueue)
{
	struct spdk_vhost_session *vsession = virtqueue->vsession;
	uint64_t now;

	if (vsession->coalescing_delay_time_base == 0) {
		if (virtqueue->vring.desc == NULL) {
			return;
		}

		if (vhost_vq_event_is_suppressed(virtqueue)) {
			return;
		}

		vhost_vq_used_signal(vsession, virtqueue);
	} else {
		now = spdk_get_ticks();
		check_session_vq_io_stats(vsession, virtqueue, now);

		/* No need for event right now */
		if (now < virtqueue->next_event_time) {
			return;
		}

		if (vhost_vq_event_is_suppressed(virtqueue)) {
			return;
		}

		if (!vhost_vq_used_signal(vsession, virtqueue)) {
			return;
		}

		/* Syscall is quite long so update time */
		now = spdk_get_ticks();
		virtqueue->next_event_time = now + virtqueue->irq_delay_time;
	}
}

void
vhost_session_used_signal(struct spdk_vhost_session *vsession)
{
	struct spdk_vhost_virtqueue *virtqueue;
	uint16_t q_idx;

	for (q_idx = 0; q_idx < vsession->max_queues; q_idx++) {
		virtqueue = &vsession->virtqueue[q_idx];
		vhost_session_vq_used_signal(virtqueue);
	}
}

/*
 * Enqueue id and len to used ring.
 */
void
vhost_vq_used_ring_enqueue(struct spdk_vhost_session *vsession,
			   struct spdk_vhost_virtqueue *virtqueue,
			   uint16_t id, uint32_t len)
{
	struct rte_vhost_vring *vring = &virtqueue->vring;
	struct vring_used *used = vring->used;
	uint16_t last_idx = virtqueue->last_used_idx & (vring->size - 1);
	uint16_t vq_idx = virtqueue->vring_idx;

	SPDK_DEBUGLOG(vhost_ring,
		      "Queue %td - USED RING: last_idx=%"PRIu16" req id=%"PRIu16" len=%"PRIu32"\n",
		      virtqueue - vsession->virtqueue, virtqueue->last_used_idx, id, len);

	vhost_log_req_desc(vsession, virtqueue, id);

	virtqueue->last_used_idx++;
	used->ring[last_idx].id = id;
	used->ring[last_idx].len = len;

	/* Ensure the used ring is updated before we log it or increment used->idx. */
	spdk_smp_wmb();

	rte_vhost_set_last_inflight_io_split(vsession->vid, vq_idx, id);

	vhost_log_used_vring_elem(vsession, virtqueue, last_idx);
	* (volatile uint16_t *) &used->idx = virtqueue->last_used_idx;
	vhost_log_used_vring_idx(vsession, virtqueue);

	rte_vhost_clr_inflight_desc_split(vsession->vid, vq_idx, virtqueue->last_used_idx, id);

	virtqueue->used_req_cnt++;

	if (vsession->interrupt_mode) {
		if (virtqueue->vring.desc == NULL || vhost_vq_event_is_suppressed(virtqueue)) {
			return;
		}

		vhost_vq_used_signal(vsession, virtqueue);
	}
}

void
vhost_vq_packed_ring_enqueue(struct spdk_vhost_session *vsession,
			     struct spdk_vhost_virtqueue *virtqueue,
			     uint16_t num_descs, uint16_t buffer_id,
			     uint32_t length, uint16_t inflight_head)
{
	struct vring_packed_desc *desc = &virtqueue->vring.desc_packed[virtqueue->last_used_idx];
	bool used, avail;

	SPDK_DEBUGLOG(vhost_ring,
		      "Queue %td - RING: buffer_id=%"PRIu16"\n",
		      virtqueue - vsession->virtqueue, buffer_id);

	/* When the descriptor is used, two flags in descriptor
	 * avail flag and used flag are set to equal
	 * and used flag value == used_wrap_counter.
	 */
	used = !!(desc->flags & VRING_DESC_F_USED);
	avail = !!(desc->flags & VRING_DESC_F_AVAIL);
	if (spdk_unlikely(used == virtqueue->packed.used_phase && used == avail)) {
		SPDK_ERRLOG("descriptor has been used before\n");
		return;
	}

	/* In used desc addr is unused and len specifies the buffer length
	 * that has been written to by the device.
	 */
	desc->addr = 0;
	desc->len = length;

	/* This bit specifies whether any data has been written by the device */
	if (length != 0) {
		desc->flags |= VRING_DESC_F_WRITE;
	}

	/* Buffer ID is included in the last descriptor in the list.
	 * The driver needs to keep track of the size of the list corresponding
	 * to each buffer ID.
	 */
	desc->id = buffer_id;

	/* A device MUST NOT make the descriptor used before buffer_id is
	 * written to the descriptor.
	 */
	spdk_smp_wmb();

	rte_vhost_set_last_inflight_io_packed(vsession->vid, virtqueue->vring_idx, inflight_head);
	/* To mark a desc as used, the device sets the F_USED bit in flags to match
	 * the internal Device ring wrap counter. It also sets the F_AVAIL bit to
	 * match the same value.
	 */
	if (virtqueue->packed.used_phase) {
		desc->flags |= VRING_DESC_F_AVAIL_USED;
	} else {
		desc->flags &= ~VRING_DESC_F_AVAIL_USED;
	}
	rte_vhost_clr_inflight_desc_packed(vsession->vid, virtqueue->vring_idx, inflight_head);

	vhost_log_used_vring_elem(vsession, virtqueue, virtqueue->last_used_idx);
	virtqueue->last_used_idx += num_descs;
	if (virtqueue->last_used_idx >= virtqueue->vring.size) {
		virtqueue->last_used_idx -= virtqueue->vring.size;
		virtqueue->packed.used_phase = !virtqueue->packed.used_phase;
	}

	virtqueue->used_req_cnt++;
}

bool
vhost_vq_packed_ring_is_avail(struct spdk_vhost_virtqueue *virtqueue)
{
	uint16_t flags = virtqueue->vring.desc_packed[virtqueue->last_avail_idx].flags;

	/* To mark a desc as available, the driver sets the F_AVAIL bit in flags
	 * to match the internal avail wrap counter. It also sets the F_USED bit to
	 * match the inverse value but it's not mandatory.
	 */
	return (!!(flags & VRING_DESC_F_AVAIL) == virtqueue->packed.avail_phase);
}

bool
vhost_vring_packed_desc_is_wr(struct vring_packed_desc *cur_desc)
{
	return (cur_desc->flags & VRING_DESC_F_WRITE) != 0;
}

bool
vhost_vring_inflight_desc_is_wr(spdk_vhost_inflight_desc *cur_desc)
{
	return (cur_desc->flags & VRING_DESC_F_WRITE) != 0;
}

int
vhost_vring_packed_desc_get_next(struct vring_packed_desc **desc, uint16_t *req_idx,
				 struct spdk_vhost_virtqueue *vq,
				 struct vring_packed_desc *desc_table,
				 uint32_t desc_table_size)
{
	if (desc_table != NULL) {
		/* When the desc_table isn't NULL means it's indirect and we get the next
		 * desc by req_idx and desc_table_size. The return value is NULL means
		 * we reach the last desc of this request.
		 */
		(*req_idx)++;
		if (*req_idx < desc_table_size) {
			*desc = &desc_table[*req_idx];
		} else {
			*desc = NULL;
		}
	} else {
		/* When the desc_table is NULL means it's non-indirect and we get the next
		 * desc by req_idx and F_NEXT in flags. The return value is NULL means
		 * we reach the last desc of this request. When return new desc
		 * we update the req_idx too.
		 */
		if (((*desc)->flags & VRING_DESC_F_NEXT) == 0) {
			*desc = NULL;
			return 0;
		}

		*req_idx = (*req_idx + 1) % vq->vring.size;
		*desc = &vq->vring.desc_packed[*req_idx];
	}

	return 0;
}

static int
vhost_vring_desc_payload_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov,
				uint16_t *iov_index, uintptr_t payload, uint64_t remaining)
{
	uintptr_t vva;
	uint64_t len;

	do {
		if (*iov_index >= SPDK_VHOST_IOVS_MAX) {
			SPDK_ERRLOG("SPDK_VHOST_IOVS_MAX(%d) reached\n", SPDK_VHOST_IOVS_MAX);
			return -1;
		}
		len = remaining;
		vva = (uintptr_t)rte_vhost_va_from_guest_pa(vsession->mem, payload, &len);
		if (vva == 0 || len == 0) {
			SPDK_ERRLOG("gpa_to_vva(%p) == NULL\n", (void *)payload);
			return -1;
		}
		iov[*iov_index].iov_base = (void *)vva;
		iov[*iov_index].iov_len = len;
		remaining -= len;
		payload += len;
		(*iov_index)++;
	} while (remaining);

	return 0;
}

int
vhost_vring_packed_desc_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov,
			       uint16_t *iov_index, const struct vring_packed_desc *desc)
{
	return vhost_vring_desc_payload_to_iov(vsession, iov, iov_index,
					       desc->addr, desc->len);
}

int
vhost_vring_inflight_desc_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov,
				 uint16_t *iov_index, const spdk_vhost_inflight_desc *desc)
{
	return vhost_vring_desc_payload_to_iov(vsession, iov, iov_index,
					       desc->addr, desc->len);
}

/* 1, Traverse the desc chain to get the buffer_id and return buffer_id as task_idx.
 * 2, Update the vq->last_avail_idx to point next available desc chain.
 * 3, Update the avail_wrap_counter if last_avail_idx overturn.
 */
uint16_t
vhost_vring_packed_desc_get_buffer_id(struct spdk_vhost_virtqueue *vq, uint16_t req_idx,
				      uint16_t *num_descs)
{
	struct vring_packed_desc *desc;
	uint16_t desc_head = req_idx;

	*num_descs = 1;

	desc =  &vq->vring.desc_packed[req_idx];
	if (!vhost_vring_packed_desc_is_indirect(desc)) {
		while ((desc->flags & VRING_DESC_F_NEXT) != 0) {
			req_idx = (req_idx + 1) % vq->vring.size;
			desc = &vq->vring.desc_packed[req_idx];
			(*num_descs)++;
		}
	}

	/* Queue Size doesn't have to be a power of 2
	 * Device maintains last_avail_idx so we can make sure
	 * the value is valid(0 ~ vring.size - 1)
	 */
	vq->last_avail_idx = (req_idx + 1) % vq->vring.size;
	if (vq->last_avail_idx < desc_head) {
		vq->packed.avail_phase = !vq->packed.avail_phase;
	}

	return desc->id;
}

int
vhost_vring_desc_get_next(struct vring_desc **desc,
			  struct vring_desc *desc_table, uint32_t desc_table_size)
{
	struct vring_desc *old_desc = *desc;
	uint16_t next_idx;

	if ((old_desc->flags & VRING_DESC_F_NEXT) == 0) {
		*desc = NULL;
		return 0;
	}

	next_idx = old_desc->next;
	if (spdk_unlikely(next_idx >= desc_table_size)) {
		*desc = NULL;
		return -1;
	}

	*desc = &desc_table[next_idx];
	return 0;
}

int
vhost_vring_desc_to_iov(struct spdk_vhost_session *vsession, struct iovec *iov,
			uint16_t *iov_index, const struct vring_desc *desc)
{
	return vhost_vring_desc_payload_to_iov(vsession, iov, iov_index,
					       desc->addr, desc->len);
}

static inline void
vhost_session_mem_region_calc(uint64_t *previous_start, uint64_t *start, uint64_t *end,
			      uint64_t *len, struct rte_vhost_mem_region *region)
+0 −678

File changed.

Preview size limit exceeded, changes collapsed.