Commit ba9528c9 authored by Dariusz Stojaczyk's avatar Dariusz Stojaczyk Committed by Jim Harris
Browse files

rte_virtio: bind virtqueues to spdk_threads instead of lcores



This will allow us to send a message
to a virtqueue via spdk_thread_send_msg.

Change-Id: I8502f27e74de107bc5e5ccf2939448896579acc2
Signed-off-by: default avatarDariusz Stojaczyk <dariuszx.stojaczyk@intel.com>
Reviewed-on: https://review.gerrithub.io/388834


Tested-by: default avatarSPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
parent 5e8ec497
Loading
Loading
Loading
Loading
+8 −8
Original line number Diff line number Diff line
@@ -170,7 +170,7 @@ virtio_init_queue(struct virtio_dev *dev, uint16_t vtpci_queue_idx)

	vq->mz = mz;

	vq->owner_lcore = SPDK_VIRTIO_QUEUE_LCORE_ID_UNUSED;
	vq->owner_thread = NULL;
	vq->poller = NULL;

	if (virtio_dev_backend_ops(dev)->setup_queue(dev, vq) < 0) {
@@ -570,13 +570,13 @@ virtio_dev_acquire_queue(struct virtio_dev *vdev, uint16_t index)

	pthread_mutex_lock(&vdev->mutex);
	vq = vdev->vqs[index];
	if (vq == NULL || vq->owner_lcore != SPDK_VIRTIO_QUEUE_LCORE_ID_UNUSED) {
	if (vq == NULL || vq->owner_thread != NULL) {
		pthread_mutex_unlock(&vdev->mutex);
		return -1;
	}

	assert(vq->poller == NULL);
	vq->owner_lcore = spdk_env_get_current_core();
	vq->owner_thread = spdk_get_thread();
	pthread_mutex_unlock(&vdev->mutex);
	return 0;
}
@@ -590,7 +590,7 @@ virtio_dev_find_and_acquire_queue(struct virtio_dev *vdev, uint16_t start_index)
	pthread_mutex_lock(&vdev->mutex);
	for (i = start_index; i < vdev->max_queues; ++i) {
		vq = vdev->vqs[i];
		if (vq != NULL && vq->owner_lcore == SPDK_VIRTIO_QUEUE_LCORE_ID_UNUSED) {
		if (vq != NULL && vq->owner_thread == NULL) {
			break;
		}
	}
@@ -602,7 +602,7 @@ virtio_dev_find_and_acquire_queue(struct virtio_dev *vdev, uint16_t start_index)
	}

	assert(vq->poller == NULL);
	vq->owner_lcore = spdk_env_get_current_core();
	vq->owner_thread = spdk_get_thread();
	pthread_mutex_unlock(&vdev->mutex);
	return i;
}
@@ -627,7 +627,7 @@ virtio_dev_queue_is_acquired(struct virtio_dev *vdev, uint16_t index)
		return false;
	}

	rc = (vq->owner_lcore != SPDK_VIRTIO_QUEUE_LCORE_ID_UNUSED);
	rc = (vq->owner_thread != NULL);
	pthread_mutex_unlock(&vdev->mutex);

	return rc;
@@ -653,8 +653,8 @@ virtio_dev_release_queue(struct virtio_dev *vdev, uint16_t index)
	}

	assert(vq->poller == NULL);
	assert(vq->owner_lcore == spdk_env_get_current_core());
	vq->owner_lcore = SPDK_VIRTIO_QUEUE_LCORE_ID_UNUSED;
	assert(vq->owner_thread == spdk_get_thread());
	vq->owner_thread = NULL;
	pthread_mutex_unlock(&vdev->mutex);
}

+5 −9
Original line number Diff line number Diff line
@@ -48,6 +48,7 @@
#include "spdk/likely.h"
#include "spdk/queue.h"
#include "spdk/json.h"
#include "spdk/io_channel.h"

/*
 * Per virtio_config.h in Linux.
@@ -70,11 +71,6 @@
 */
#define VQ_RING_DESC_CHAIN_END 32768

/* This is a work-around for fio-plugin bug, where each
 * fio job thread returns local lcore id = -1
 */
#define SPDK_VIRTIO_QUEUE_LCORE_ID_UNUSED (UINT32_MAX - 1)

/* Number of non-request queues - eventq and controlq */
#define SPDK_VIRTIO_SCSI_QUEUE_NUM_FIXED 2

@@ -188,8 +184,8 @@ struct virtqueue {
	uint16_t  vq_queue_index;   /**< PCI queue index */
	uint16_t  *notify_addr;

	/** Logical CPU ID that's polling this queue. */
	uint32_t owner_lcore;
	/** Thread that's polling this queue. */
	struct spdk_thread *owner_thread;

	/** Response poller. */
	struct spdk_bdev_poller	*poller;
@@ -312,7 +308,7 @@ virtqueue_kick_prepare(struct virtqueue *vq)
}

/**
 * Bind a virtqueue with given index to the current CPU core.
 * Bind a virtqueue with given index to the current thread;
 *
 * This function is thread-safe.
 *
@@ -324,7 +320,7 @@ virtqueue_kick_prepare(struct virtqueue *vq)
int virtio_dev_acquire_queue(struct virtio_dev *vdev, uint16_t index);

/**
 * Look for unused queue and bind it to the current CPU core.  This will
 * Look for unused queue and bind it to the current thread.  This will
 * scan the queues in range from *start_index* (inclusive) up to
 * vdev->max_queues (exclusive).
 *