Commit 09af33b6 authored by Dariusz Stojaczyk's avatar Dariusz Stojaczyk Committed by Daniel Verkamp
Browse files

rte_virtio: added virtio_dev struct



Previously virtio_hw was managing both VirtIO PCI and
vhost-user devices. Now's there virtio_dev, a common
part for both backends. virtio_hw is only used for PCI.

Note that this patch does not introduce another
abstraction layer. It only unifies an already existing
one. Previously virtio_user_dev was built on top of
virtio_hw, with most PCI fields just hanging there
unused. Now both, virtio_user_dev and virtio_hw are
built on top of virtio_dev.

Change-Id: Ida25defc0063055a81cf4039c9b85470b9880bc3
Signed-off-by: default avatarDariusz Stojaczyk <dariuszx.stojaczyk@intel.com>
Reviewed-on: https://review.gerrithub.io/376966


Tested-by: default avatarSPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
Reviewed-by: default avatarDaniel Verkamp <daniel.verkamp@intel.com>
parent 72ebd590
Loading
Loading
Loading
Loading
+20 −20
Original line number Diff line number Diff line
@@ -68,7 +68,7 @@ struct virtio_scsi_io_ctx {
};

struct virtio_scsi_scan_base {
	struct virtio_hw		*hw;
	struct virtio_dev		*vdev;
	struct spdk_bdev_poller		*scan_poller;

	/* Currently queried target */
@@ -84,14 +84,14 @@ struct virtio_scsi_scan_base {

struct virtio_scsi_disk {
	struct spdk_bdev	bdev;
	struct virtio_hw	*hw;
	struct virtio_dev	*vdev;
	uint64_t		num_blocks;
	uint32_t		block_size;
	TAILQ_ENTRY(virtio_scsi_disk) link;
};

struct bdev_virtio_io_channel {
	struct virtio_hw	*hw;
	struct virtio_dev	*vdev;
	struct spdk_bdev_poller	*poller;
};

@@ -148,7 +148,7 @@ bdev_virtio_rw(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
		to_be16(&req->cdb[7], bdev_io->u.write.num_blocks);
	}

	virtio_xmit_pkts(disk->hw->vqs[2], vreq);
	virtio_xmit_pkts(disk->vdev->vqs[2], vreq);
}

static int _bdev_virtio_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io)
@@ -199,7 +199,7 @@ bdev_virtio_get_io_channel(void *ctx)
{
	struct virtio_scsi_disk *disk = ctx;

	return spdk_get_io_channel(&disk->hw);
	return spdk_get_io_channel(&disk->vdev);
}

static int
@@ -260,7 +260,7 @@ bdev_virtio_poll(void *arg)
	struct virtio_req *req[32];
	uint16_t i, cnt;

	cnt = virtio_recv_pkts(ch->hw->vqs[2], req, SPDK_COUNTOF(req));
	cnt = virtio_recv_pkts(ch->vdev->vqs[2], req, SPDK_COUNTOF(req));
	for (i = 0; i < cnt; ++i) {
		bdev_virtio_io_cpl(req[i]);
	}
@@ -269,10 +269,10 @@ bdev_virtio_poll(void *arg)
static int
bdev_virtio_create_cb(void *io_device, void *ctx_buf)
{
	struct virtio_hw **hw = io_device;
	struct virtio_dev **vdev = io_device;
	struct bdev_virtio_io_channel *ch = ctx_buf;

	ch->hw = *hw;
	ch->vdev = *vdev;
	spdk_bdev_poller_start(&ch->poller, bdev_virtio_poll, ch,
			       spdk_env_get_current_core(), 0);
	return 0;
@@ -301,7 +301,7 @@ scan_target_finish(struct virtio_scsi_scan_base *base)

	while ((disk = TAILQ_FIRST(&base->found_disks))) {
		TAILQ_REMOVE(&base->found_disks, disk, link);
		spdk_io_device_register(&disk->hw, bdev_virtio_create_cb, bdev_virtio_destroy_cb,
		spdk_io_device_register(&disk->vdev, bdev_virtio_create_cb, bdev_virtio_destroy_cb,
					sizeof(struct bdev_virtio_io_channel));
		spdk_bdev_register(&disk->bdev);
	}
@@ -334,7 +334,7 @@ process_scan_inquiry(struct virtio_scsi_scan_base *base, struct virtio_req *vreq
	iov[0].iov_len = 32;
	to_be32(&req->cdb[10], iov[0].iov_len);

	virtio_xmit_pkts(base->hw->vqs[2], vreq);
	virtio_xmit_pkts(base->vdev->vqs[2], vreq);
	return 0;
}

@@ -360,7 +360,7 @@ process_read_cap(struct virtio_scsi_scan_base *base, struct virtio_req *vreq)
	disk->num_blocks = from_be64((uint64_t *)(vreq->iov[0].iov_base)) + 1;
	disk->block_size = from_be32((uint32_t *)(vreq->iov[0].iov_base + 8));

	disk->hw = base->hw;
	disk->vdev = base->vdev;

	bdev = &disk->bdev;
	bdev->name = spdk_sprintf_alloc("Virtio0");
@@ -416,7 +416,7 @@ bdev_scan_poll(void *arg)
	struct virtio_req *req;
	uint16_t cnt;

	cnt = virtio_recv_pkts(base->hw->vqs[2], &req, 1);
	cnt = virtio_recv_pkts(base->vdev->vqs[2], &req, 1);
	if (cnt > 0) {
		process_scan_resp(base, req);
	}
@@ -456,7 +456,7 @@ scan_target(struct virtio_scsi_scan_base *base)
	cdb->opcode = SPDK_SPC_INQUIRY;
	cdb->alloc_len[1] = 255;

	virtio_xmit_pkts(base->hw->vqs[2], vreq);
	virtio_xmit_pkts(base->vdev->vqs[2], vreq);
}

static int
@@ -464,7 +464,7 @@ bdev_virtio_initialize(void)
{
	struct spdk_conf_section *sp = spdk_conf_find_section(NULL, "Virtio");
	struct virtio_scsi_scan_base *base;
	struct virtio_hw *hw = NULL;
	struct virtio_dev *vdev = NULL;
	char *type, *path;
	uint32_t i;
	int rc = 0;
@@ -485,16 +485,16 @@ bdev_virtio_initialize(void)
				SPDK_ERRLOG("No path specified for index %d\n", i);
				continue;
			}
			hw = virtio_user_dev_init(path, 1, 512);
			vdev = virtio_user_dev_init(path, 1, 512);
		} else if (!strcmp("Pci", type)) {
			hw = get_pci_virtio_hw();
			vdev = get_pci_virtio_hw();
		} else {
			SPDK_ERRLOG("Invalid type %s specified for index %d\n", type, i);
			continue;
		}
	}

	if (hw == NULL) {
	if (vdev == NULL) {
		goto out;
	}

@@ -506,10 +506,10 @@ bdev_virtio_initialize(void)
	}

	/* TODO check rc, add virtio_dev_deinit() */
	virtio_init_device(hw, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
	virtio_dev_start(hw);
	virtio_init_device(vdev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
	virtio_dev_start(vdev);

	base->hw = hw;
	base->vdev = vdev;
	TAILQ_INIT(&base->found_disks);

	spdk_bdev_poller_start(&base->scan_poller, bdev_scan_poll, base,
+47 −46
Original line number Diff line number Diff line
@@ -67,9 +67,9 @@ static const struct rte_pci_id pci_id_virtio_map[] = {
};

static uint16_t
virtio_get_nr_vq(struct virtio_hw *hw)
virtio_get_nr_vq(struct virtio_dev *dev)
{
	return hw->max_queues;
	return dev->max_queues;
}

static void
@@ -102,7 +102,7 @@ virtio_init_vring(struct virtqueue *vq)
}

static int
virtio_init_queue(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
virtio_init_queue(struct virtio_dev *dev, uint16_t vtpci_queue_idx)
{
	char vq_name[VIRTQUEUE_MAX_NAME_SZ];
	const struct rte_memzone *mz = NULL;
@@ -116,7 +116,7 @@ virtio_init_queue(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
	 * Read the virtqueue size from the Queue Size field
	 * Always power of 2 and if 0 virtqueue does not exist
	 */
	vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
	vq_size = VTPCI_OPS(dev)->get_queue_num(dev, vtpci_queue_idx);
	PMD_INIT_LOG(DEBUG, "vq_size: %u", vq_size);
	if (vq_size == 0) {
		PMD_INIT_LOG(ERR, "virtqueue does not exist");
@@ -129,7 +129,7 @@ virtio_init_queue(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
	}

	snprintf(vq_name, sizeof(vq_name), "port%d_vq%d",
		 hw->port_id, vtpci_queue_idx);
		 dev->port_id, vtpci_queue_idx);

	size = RTE_ALIGN_CEIL(sizeof(*vq) +
				vq_size * sizeof(struct vq_desc_extra),
@@ -141,9 +141,9 @@ virtio_init_queue(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
		PMD_INIT_LOG(ERR, "can not allocate vq");
		return -ENOMEM;
	}
	hw->vqs[vtpci_queue_idx] = vq;
	dev->vqs[vtpci_queue_idx] = vq;

	vq->hw = hw;
	vq->vdev = dev;
	vq->vq_queue_index = vtpci_queue_idx;
	vq->vq_nentries = vq_size;

@@ -180,7 +180,7 @@ virtio_init_queue(struct virtio_hw *hw, uint16_t vtpci_queue_idx)

	vq->mz = mz;

	if (VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) {
	if (VTPCI_OPS(dev)->setup_queue(dev, vq) < 0) {
		PMD_INIT_LOG(ERR, "setup_queue failed");
		return -EINVAL;
	}
@@ -195,47 +195,47 @@ fail_q_alloc:
}

static void
virtio_free_queues(struct virtio_hw *hw)
virtio_free_queues(struct virtio_dev *dev)
{
	uint16_t nr_vq = virtio_get_nr_vq(hw);
	uint16_t nr_vq = virtio_get_nr_vq(dev);
	struct virtqueue *vq;
	uint16_t i;

	if (hw->vqs == NULL)
	if (dev->vqs == NULL)
		return;

	for (i = 0; i < nr_vq; i++) {
		vq = hw->vqs[i];
		vq = dev->vqs[i];
		if (!vq)
			continue;

		rte_memzone_free(vq->mz);

		rte_free(vq);
		hw->vqs[i] = NULL;
		dev->vqs[i] = NULL;
	}

	rte_free(hw->vqs);
	hw->vqs = NULL;
	rte_free(dev->vqs);
	dev->vqs = NULL;
}

static int
virtio_alloc_queues(struct virtio_hw *hw)
virtio_alloc_queues(struct virtio_dev *dev)
{
	uint16_t nr_vq = virtio_get_nr_vq(hw);
	uint16_t nr_vq = virtio_get_nr_vq(dev);
	uint16_t i;
	int ret;

	hw->vqs = rte_zmalloc(NULL, sizeof(struct virtqueue *) * nr_vq, 0);
	if (!hw->vqs) {
	dev->vqs = rte_zmalloc(NULL, sizeof(struct virtqueue *) * nr_vq, 0);
	if (!dev->vqs) {
		PMD_INIT_LOG(ERR, "failed to allocate vqs");
		return -ENOMEM;
	}

	for (i = 0; i < nr_vq; i++) {
		ret = virtio_init_queue(hw, i);
		ret = virtio_init_queue(dev, i);
		if (ret < 0) {
			virtio_free_queues(hw);
			virtio_free_queues(dev);
			return ret;
		}
	}
@@ -244,7 +244,7 @@ virtio_alloc_queues(struct virtio_hw *hw)
}

static int
virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
virtio_negotiate_features(struct virtio_dev *dev, uint64_t req_features)
{
	uint64_t host_features;

@@ -253,7 +253,7 @@ virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
		req_features);

	/* Read device(host) feature bits */
	host_features = VTPCI_OPS(hw)->get_features(hw);
	host_features = VTPCI_OPS(dev)->get_features(dev);
	PMD_INIT_LOG(DEBUG, "host_features before negotiate = %" PRIx64,
		host_features);

@@ -261,66 +261,66 @@ virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
	 * Negotiate features: Subset of device feature bits are written back
	 * guest feature bits.
	 */
	hw->guest_features = req_features;
	hw->guest_features = vtpci_negotiate_features(hw, host_features);
	dev->guest_features = req_features;
	dev->guest_features = vtpci_negotiate_features(dev, host_features);
	PMD_INIT_LOG(DEBUG, "features after negotiate = %" PRIx64,
		hw->guest_features);
		dev->guest_features);

	if (hw->modern) {
		if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
	if (dev->modern) {
		if (!vtpci_with_feature(dev, VIRTIO_F_VERSION_1)) {
			PMD_INIT_LOG(ERR,
				"VIRTIO_F_VERSION_1 features is not enabled.");
			return -1;
		}
		vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK);
		if (!(vtpci_get_status(hw) & VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
		vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_FEATURES_OK);
		if (!(vtpci_get_status(dev) & VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
			PMD_INIT_LOG(ERR,
				"failed to set FEATURES_OK status!");
			return -1;
		}
	}

	hw->req_guest_features = req_features;
	dev->req_guest_features = req_features;

	return 0;
}

/* reset device and renegotiate features if needed */
int
virtio_init_device(struct virtio_hw *hw, uint64_t req_features)
virtio_init_device(struct virtio_dev *dev, uint64_t req_features)
{
	int ret;

	/* Reset the device although not necessary at startup */
	vtpci_reset(hw);
	vtpci_reset(dev);

	/* Tell the host we've noticed this device. */
	vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
	vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);

	/* Tell the host we've known how to drive the device. */
	vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
	if (virtio_negotiate_features(hw, req_features) < 0)
	vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER);
	if (virtio_negotiate_features(dev, req_features) < 0)
		return -1;

	vtpci_read_dev_config(hw, offsetof(struct virtio_scsi_config, num_queues),
			      &hw->max_queues, sizeof(hw->max_queues));
	vtpci_read_dev_config(dev, offsetof(struct virtio_scsi_config, num_queues),
			      &dev->max_queues, sizeof(dev->max_queues));
	/* FIXME
	 * Hardcode num_queues to 3 until we add proper
	 * mutli-queue support. This value should be limited
	 * by number of cores assigned to SPDK
	 */
	hw->max_queues = 3;
	dev->max_queues = 3;

	ret = virtio_alloc_queues(hw);
	ret = virtio_alloc_queues(dev);
	if (ret < 0)
		return ret;

	vtpci_reinit_complete(hw);
	vtpci_reinit_complete(dev);
	return 0;
}

int
virtio_dev_start(struct virtio_hw *hw)
virtio_dev_start(struct virtio_dev *vdev)
{
	struct virtnet_tx *txvq __rte_unused;

@@ -343,14 +343,14 @@ virtio_dev_start(struct virtio_hw *hw)

	PMD_INIT_LOG(DEBUG, "Notified backend at initialization");

	hw->started = 1;
	vdev->started = 1;

	return 0;
}

static struct virtio_hw *g_pci_hw = NULL;

struct virtio_hw *
struct virtio_dev *
get_pci_virtio_hw(void)
{
	int ret;
@@ -361,11 +361,11 @@ get_pci_virtio_hw(void)
		return NULL;
	}

	ret = vtpci_init(g_pci_hw->pci_dev, g_pci_hw);
	ret = vtpci_init(g_pci_hw->pci_dev, &g_pci_hw->vdev);
	if (ret)
		return NULL;

	return g_pci_hw;
	return &g_pci_hw->vdev;
}

static int virtio_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
@@ -374,6 +374,7 @@ static int virtio_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
	struct virtio_hw *hw;

	hw = calloc(1, sizeof(*hw));
	hw->vdev.is_hw = 1;
	hw->pci_dev = pci_dev;

	g_pci_hw = hw;
+14 −5
Original line number Diff line number Diff line
@@ -37,12 +37,21 @@
#include <stdint.h>
#include <sys/uio.h>

#include "virtio_pci.h"

#define VIRTIO_MAX_RX_QUEUES 128U
#define VIRTIO_MAX_TX_QUEUES 128U
#define VIRTIO_MIN_RX_BUFSIZE 64

struct virtio_dev {
	struct virtqueue **vqs;
	uint16_t	started;
	uint32_t	max_queues;
	uint8_t		port_id;
	uint64_t	req_guest_features;
	uint64_t	guest_features;
	int		is_hw;
	uint8_t		modern;
};

struct virtio_req {
	struct iovec	*iov;
	struct iovec	iov_req;
@@ -66,9 +75,9 @@ uint16_t virtio_recv_pkts(struct virtqueue *vq, struct virtio_req **reqs,

uint16_t virtio_xmit_pkts(struct virtqueue *vq, struct virtio_req *req);

int virtio_init_device(struct virtio_hw *hw, uint64_t req_features);
int virtio_dev_start(struct virtio_hw *hw);
struct virtio_hw *get_pci_virtio_hw(void);
int virtio_init_device(struct virtio_dev *hw, uint64_t req_features);
int virtio_dev_start(struct virtio_dev *hw);
struct virtio_dev *get_pci_virtio_hw(void);

void virtio_interrupt_handler(void *param);

+108 −81
Original line number Diff line number Diff line
@@ -43,7 +43,7 @@
#include "virtio_logs.h"
#include "virtio_queue.h"

struct virtio_hw_internal virtio_hw_internal[128];
struct vtpci_internal virtio_hw_internal[128];

/*
 * Following macros are derived from linux/pci_regs.h, however,
@@ -60,6 +60,9 @@ struct virtio_hw_internal virtio_hw_internal[128];
 */
#define VIRTIO_PCI_CONFIG(hw) (((hw)->use_msix) ? 24 : 20)

#define virtio_dev_get_hw(hw) \
	((struct virtio_hw *)((uintptr_t)(hw) - offsetof(struct virtio_hw, vdev)))

static inline int
check_vq_phys_addr_ok(struct virtqueue *vq)
{
@@ -88,27 +91,28 @@ check_vq_phys_addr_ok(struct virtqueue *vq)
 * enforces this for the virtio-net stuff.
 */
static void
legacy_read_dev_config(struct virtio_hw *hw, size_t offset,
legacy_read_dev_config(struct virtio_dev *dev, size_t offset,
		       void *dst, int length)
{
	struct virtio_hw *hw = virtio_dev_get_hw(dev);
#ifdef RTE_ARCH_PPC_64
	int size;

	while (length > 0) {
		if (length >= 4) {
			size = 4;
			rte_pci_ioport_read(VTPCI_IO(hw), dst, size,
				VIRTIO_PCI_CONFIG(hw) + offset);
			rte_pci_ioport_read(VTPCI_IO(dev), dst, size,
				VIRTIO_PCI_CONFIG(dev) + offset);
			*(uint32_t *)dst = rte_be_to_cpu_32(*(uint32_t *)dst);
		} else if (length >= 2) {
			size = 2;
			rte_pci_ioport_read(VTPCI_IO(hw), dst, size,
				VIRTIO_PCI_CONFIG(hw) + offset);
			rte_pci_ioport_read(VTPCI_IO(dev), dst, size,
				VIRTIO_PCI_CONFIG(dev) + offset);
			*(uint16_t *)dst = rte_be_to_cpu_16(*(uint16_t *)dst);
		} else {
			size = 1;
			rte_pci_ioport_read(VTPCI_IO(hw), dst, size,
				VIRTIO_PCI_CONFIG(hw) + offset);
			rte_pci_ioport_read(VTPCI_IO(dev), dst, size,
				VIRTIO_PCI_CONFIG(dev) + offset);
		}

		dst = (char *)dst + size;
@@ -116,15 +120,16 @@ legacy_read_dev_config(struct virtio_hw *hw, size_t offset,
		length -= size;
	}
#else
	rte_pci_ioport_read(VTPCI_IO(hw), dst, length,
	rte_pci_ioport_read(VTPCI_IO(dev), dst, length,
		VIRTIO_PCI_CONFIG(hw) + offset);
#endif
}

static void
legacy_write_dev_config(struct virtio_hw *hw, size_t offset,
legacy_write_dev_config(struct virtio_dev *dev, size_t offset,
			const void *src, int length)
{
	struct virtio_hw *hw = virtio_dev_get_hw(dev);
#ifdef RTE_ARCH_PPC_64
	union {
		uint32_t u32;
@@ -154,125 +159,125 @@ legacy_write_dev_config(struct virtio_hw *hw, size_t offset,
		length -= size;
	}
#else
	rte_pci_ioport_write(VTPCI_IO(hw), src, length,
	rte_pci_ioport_write(VTPCI_IO(dev), src, length,
		VIRTIO_PCI_CONFIG(hw) + offset);
#endif
}

static uint64_t
legacy_get_features(struct virtio_hw *hw)
legacy_get_features(struct virtio_dev *dev)
{
	uint32_t dst;

	rte_pci_ioport_read(VTPCI_IO(hw), &dst, 4, VIRTIO_PCI_HOST_FEATURES);
	rte_pci_ioport_read(VTPCI_IO(dev), &dst, 4, VIRTIO_PCI_HOST_FEATURES);
	return dst;
}

static void
legacy_set_features(struct virtio_hw *hw, uint64_t features)
legacy_set_features(struct virtio_dev *dev, uint64_t features)
{
	if ((features >> 32) != 0) {
		PMD_DRV_LOG(ERR,
			"only 32 bit features are allowed for legacy virtio!");
		return;
	}
	rte_pci_ioport_write(VTPCI_IO(hw), &features, 4,
	rte_pci_ioport_write(VTPCI_IO(dev), &features, 4,
		VIRTIO_PCI_GUEST_FEATURES);
}

static uint8_t
legacy_get_status(struct virtio_hw *hw)
legacy_get_status(struct virtio_dev *dev)
{
	uint8_t dst;

	rte_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_STATUS);
	rte_pci_ioport_read(VTPCI_IO(dev), &dst, 1, VIRTIO_PCI_STATUS);
	return dst;
}

static void
legacy_set_status(struct virtio_hw *hw, uint8_t status)
legacy_set_status(struct virtio_dev *dev, uint8_t status)
{
	rte_pci_ioport_write(VTPCI_IO(hw), &status, 1, VIRTIO_PCI_STATUS);
	rte_pci_ioport_write(VTPCI_IO(dev), &status, 1, VIRTIO_PCI_STATUS);
}

static void
legacy_reset(struct virtio_hw *hw)
legacy_reset(struct virtio_dev *dev)
{
	legacy_set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
	legacy_set_status(dev, VIRTIO_CONFIG_STATUS_RESET);
}

static uint8_t
legacy_get_isr(struct virtio_hw *hw)
legacy_get_isr(struct virtio_dev *dev)
{
	uint8_t dst;

	rte_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_ISR);
	rte_pci_ioport_read(VTPCI_IO(dev), &dst, 1, VIRTIO_PCI_ISR);
	return dst;
}

/* Enable one vector (0) for Link State Intrerrupt */
static uint16_t
legacy_set_config_irq(struct virtio_hw *hw, uint16_t vec)
legacy_set_config_irq(struct virtio_dev *dev, uint16_t vec)
{
	uint16_t dst;

	rte_pci_ioport_write(VTPCI_IO(hw), &vec, 2, VIRTIO_MSI_CONFIG_VECTOR);
	rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_MSI_CONFIG_VECTOR);
	rte_pci_ioport_write(VTPCI_IO(dev), &vec, 2, VIRTIO_MSI_CONFIG_VECTOR);
	rte_pci_ioport_read(VTPCI_IO(dev), &dst, 2, VIRTIO_MSI_CONFIG_VECTOR);
	return dst;
}

static uint16_t
legacy_set_queue_irq(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec)
legacy_set_queue_irq(struct virtio_dev *dev, struct virtqueue *vq, uint16_t vec)
{
	uint16_t dst;

	rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
	rte_pci_ioport_write(VTPCI_IO(dev), &vq->vq_queue_index, 2,
		VIRTIO_PCI_QUEUE_SEL);
	rte_pci_ioport_write(VTPCI_IO(hw), &vec, 2, VIRTIO_MSI_QUEUE_VECTOR);
	rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_MSI_QUEUE_VECTOR);
	rte_pci_ioport_write(VTPCI_IO(dev), &vec, 2, VIRTIO_MSI_QUEUE_VECTOR);
	rte_pci_ioport_read(VTPCI_IO(dev), &dst, 2, VIRTIO_MSI_QUEUE_VECTOR);
	return dst;
}

static uint16_t
legacy_get_queue_num(struct virtio_hw *hw, uint16_t queue_id)
legacy_get_queue_num(struct virtio_dev *dev, uint16_t queue_id)
{
	uint16_t dst;

	rte_pci_ioport_write(VTPCI_IO(hw), &queue_id, 2, VIRTIO_PCI_QUEUE_SEL);
	rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_PCI_QUEUE_NUM);
	rte_pci_ioport_write(VTPCI_IO(dev), &queue_id, 2, VIRTIO_PCI_QUEUE_SEL);
	rte_pci_ioport_read(VTPCI_IO(dev), &dst, 2, VIRTIO_PCI_QUEUE_NUM);
	return dst;
}

static int
legacy_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
legacy_setup_queue(struct virtio_dev *dev, struct virtqueue *vq)
{
	uint32_t src;

	if (!check_vq_phys_addr_ok(vq))
		return -1;

	rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
	rte_pci_ioport_write(VTPCI_IO(dev), &vq->vq_queue_index, 2,
		VIRTIO_PCI_QUEUE_SEL);
	src = vq->vq_ring_mem >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
	rte_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN);
	rte_pci_ioport_write(VTPCI_IO(dev), &src, 4, VIRTIO_PCI_QUEUE_PFN);

	return 0;
}

static void
legacy_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
legacy_del_queue(struct virtio_dev *dev, struct virtqueue *vq)
{
	uint32_t src = 0;

	rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
	rte_pci_ioport_write(VTPCI_IO(dev), &vq->vq_queue_index, 2,
		VIRTIO_PCI_QUEUE_SEL);
	rte_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN);
	rte_pci_ioport_write(VTPCI_IO(dev), &src, 4, VIRTIO_PCI_QUEUE_PFN);
}

static void
legacy_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
legacy_notify_queue(struct virtio_dev *dev, struct virtqueue *vq)
{
	rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
	rte_pci_ioport_write(VTPCI_IO(dev), &vq->vq_queue_index, 2,
		VIRTIO_PCI_QUEUE_NOTIFY);
}

@@ -301,9 +306,10 @@ io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi)
}

static void
modern_read_dev_config(struct virtio_hw *hw, size_t offset,
modern_read_dev_config(struct virtio_dev *dev, size_t offset,
		       void *dst, int length)
{
	struct virtio_hw *hw = virtio_dev_get_hw(dev);
	int i;
	uint8_t *p;
	uint8_t old_gen, new_gen;
@@ -320,9 +326,10 @@ modern_read_dev_config(struct virtio_hw *hw, size_t offset,
}

static void
modern_write_dev_config(struct virtio_hw *hw, size_t offset,
modern_write_dev_config(struct virtio_dev *dev, size_t offset,
			const void *src, int length)
{
	struct virtio_hw *hw = virtio_dev_get_hw(dev);
	int i;
	const uint8_t *p = src;

@@ -331,8 +338,9 @@ modern_write_dev_config(struct virtio_hw *hw, size_t offset,
}

static uint64_t
modern_get_features(struct virtio_hw *hw)
modern_get_features(struct virtio_dev *dev)
{
	struct virtio_hw *hw = virtio_dev_get_hw(dev);
	uint32_t features_lo, features_hi;

	rte_write32(0, &hw->common_cfg->device_feature_select);
@@ -345,8 +353,10 @@ modern_get_features(struct virtio_hw *hw)
}

static void
modern_set_features(struct virtio_hw *hw, uint64_t features)
modern_set_features(struct virtio_dev *dev, uint64_t features)
{
	struct virtio_hw *hw = virtio_dev_get_hw(dev);

	rte_write32(0, &hw->common_cfg->guest_feature_select);
	rte_write32(features & ((1ULL << 32) - 1),
		    &hw->common_cfg->guest_feature);
@@ -357,55 +367,68 @@ modern_set_features(struct virtio_hw *hw, uint64_t features)
}

static uint8_t
modern_get_status(struct virtio_hw *hw)
modern_get_status(struct virtio_dev *dev)
{
	struct virtio_hw *hw = virtio_dev_get_hw(dev);

	return rte_read8(&hw->common_cfg->device_status);
}

static void
modern_set_status(struct virtio_hw *hw, uint8_t status)
modern_set_status(struct virtio_dev *dev, uint8_t status)
{
	struct virtio_hw *hw = virtio_dev_get_hw(dev);

	rte_write8(status, &hw->common_cfg->device_status);
}

static void
modern_reset(struct virtio_hw *hw)
modern_reset(struct virtio_dev *dev)
{
	modern_set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
	modern_get_status(hw);
	modern_set_status(dev, VIRTIO_CONFIG_STATUS_RESET);
	modern_get_status(dev);
}

static uint8_t
modern_get_isr(struct virtio_hw *hw)
modern_get_isr(struct virtio_dev *dev)
{
	struct virtio_hw *hw = virtio_dev_get_hw(dev);

	return rte_read8(hw->isr);
}

static uint16_t
modern_set_config_irq(struct virtio_hw *hw, uint16_t vec)
modern_set_config_irq(struct virtio_dev *dev, uint16_t vec)
{
	struct virtio_hw *hw = virtio_dev_get_hw(dev);

	rte_write16(vec, &hw->common_cfg->msix_config);
	return rte_read16(&hw->common_cfg->msix_config);
}

static uint16_t
modern_set_queue_irq(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec)
modern_set_queue_irq(struct virtio_dev *dev, struct virtqueue *vq, uint16_t vec)
{
	struct virtio_hw *hw = virtio_dev_get_hw(dev);

	rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
	rte_write16(vec, &hw->common_cfg->queue_msix_vector);
	return rte_read16(&hw->common_cfg->queue_msix_vector);
}

static uint16_t
modern_get_queue_num(struct virtio_hw *hw, uint16_t queue_id)
modern_get_queue_num(struct virtio_dev *dev, uint16_t queue_id)
{
	struct virtio_hw *hw = virtio_dev_get_hw(dev);

	rte_write16(queue_id, &hw->common_cfg->queue_select);
	return rte_read16(&hw->common_cfg->queue_size);
}

static int
modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
modern_setup_queue(struct virtio_dev *dev, struct virtqueue *vq)
{
	struct virtio_hw *hw = virtio_dev_get_hw(dev);
	uint64_t desc_addr, avail_addr, used_addr;
	uint16_t notify_off;

@@ -444,8 +467,10 @@ modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
}

static void
modern_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
modern_del_queue(struct virtio_dev *dev, struct virtqueue *vq)
{
	struct virtio_hw *hw = virtio_dev_get_hw(dev);

	rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);

	io_write64_twopart(0, &hw->common_cfg->queue_desc_lo,
@@ -459,7 +484,7 @@ modern_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
}

static void
modern_notify_queue(struct virtio_hw *hw __rte_unused, struct virtqueue *vq)
modern_notify_queue(struct virtio_dev *dev __rte_unused, struct virtqueue *vq)
{
	rte_write16(vq->vq_queue_index, vq->notify_addr);
}
@@ -483,21 +508,21 @@ const struct virtio_pci_ops modern_ops = {


void
vtpci_read_dev_config(struct virtio_hw *hw, size_t offset,
vtpci_read_dev_config(struct virtio_dev *dev, size_t offset,
		      void *dst, int length)
{
	VTPCI_OPS(hw)->read_dev_cfg(hw, offset, dst, length);
	VTPCI_OPS(dev)->read_dev_cfg(dev, offset, dst, length);
}

void
vtpci_write_dev_config(struct virtio_hw *hw, size_t offset,
vtpci_write_dev_config(struct virtio_dev *dev, size_t offset,
		       const void *src, int length)
{
	VTPCI_OPS(hw)->write_dev_cfg(hw, offset, src, length);
	VTPCI_OPS(dev)->write_dev_cfg(dev, offset, src, length);
}

uint64_t
vtpci_negotiate_features(struct virtio_hw *hw, uint64_t host_features)
vtpci_negotiate_features(struct virtio_dev *dev, uint64_t host_features)
{
	uint64_t features;

@@ -505,45 +530,45 @@ vtpci_negotiate_features(struct virtio_hw *hw, uint64_t host_features)
	 * Limit negotiated features to what the driver, virtqueue, and
	 * host all support.
	 */
	features = host_features & hw->guest_features;
	VTPCI_OPS(hw)->set_features(hw, features);
	features = host_features & dev->guest_features;
	VTPCI_OPS(dev)->set_features(dev, features);

	return features;
}

void
vtpci_reset(struct virtio_hw *hw)
vtpci_reset(struct virtio_dev *dev)
{
	VTPCI_OPS(hw)->set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
	VTPCI_OPS(dev)->set_status(dev, VIRTIO_CONFIG_STATUS_RESET);
	/* flush status write */
	VTPCI_OPS(hw)->get_status(hw);
	VTPCI_OPS(dev)->get_status(dev);
}

void
vtpci_reinit_complete(struct virtio_hw *hw)
vtpci_reinit_complete(struct virtio_dev *dev)
{
	vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK);
	vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
}

void
vtpci_set_status(struct virtio_hw *hw, uint8_t status)
vtpci_set_status(struct virtio_dev *dev, uint8_t status)
{
	if (status != VIRTIO_CONFIG_STATUS_RESET)
		status |= VTPCI_OPS(hw)->get_status(hw);
		status |= VTPCI_OPS(dev)->get_status(dev);

	VTPCI_OPS(hw)->set_status(hw, status);
	VTPCI_OPS(dev)->set_status(dev, status);
}

uint8_t
vtpci_get_status(struct virtio_hw *hw)
vtpci_get_status(struct virtio_dev *dev)
{
	return VTPCI_OPS(hw)->get_status(hw);
	return VTPCI_OPS(dev)->get_status(dev);
}

uint8_t
vtpci_isr(struct virtio_hw *hw)
vtpci_isr(struct virtio_dev *dev)
{
	return VTPCI_OPS(hw)->get_isr(hw);
	return VTPCI_OPS(dev)->get_isr(dev);
}

static void *
@@ -668,8 +693,10 @@ next:
 * Return 0 on success.
 */
int
vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw)
vtpci_init(struct rte_pci_device *dev, struct virtio_dev *vdev)
{
	struct virtio_hw *hw = virtio_dev_get_hw(vdev);

	/*
	 * Try if we can succeed reading virtio pci caps, which exists
	 * only on modern pci device. If failed, we fallback to legacy
@@ -677,8 +704,8 @@ vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw)
	 */
	if (virtio_read_caps(dev, hw) == 0) {
		PMD_INIT_LOG(INFO, "modern virtio pci detected.");
		virtio_hw_internal[hw->port_id].vtpci_ops = &modern_ops;
		hw->modern = 1;
		virtio_hw_internal[vdev->port_id].vtpci_ops = &modern_ops;
		vdev->modern = 1;
		return 0;
	}

@@ -697,8 +724,8 @@ vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw)
	}
#endif

	virtio_hw_internal[hw->port_id].vtpci_ops = &legacy_ops;
	hw->modern   = 0;
	virtio_hw_internal[vdev->port_id].vtpci_ops = &legacy_ops;
	vdev->modern   = 0;

	return 0;
}
+32 −41

File changed.

Preview size limit exceeded, changes collapsed.

Loading