Commit 34162e72 authored by Dariusz Stojaczyk's avatar Dariusz Stojaczyk Committed by Jim Harris
Browse files

vhost: call new/destroy_device entirely on vhost reactor



DPDK is now fully synchronized with the rest of vhost.

This patch also removes timed_event API from vhost_internal.h.
It should only be used internally in vhost.c under g_spdk_vhost_mutex.

Also, this patch temporarily gets rid of return codes for
vhost_scsi/blk dpdk callbacks. -1 will be returned if the
call timed out, 0 otherwise. A patch reenabling this
functionality is on the way

Change-Id: Ifd9566ceb55a3c6dbe4bac013cc3b7600c834d17
Signed-off-by: default avatarDariusz Stojaczyk <dariuszx.stojaczyk@intel.com>
Reviewed-on: https://review.gerrithub.io/372687


Tested-by: default avatarSPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: default avatarPawel Wodkowski <pawelx.wodkowski@intel.com>
Reviewed-by: default avatarDaniel Verkamp <daniel.verkamp@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
parent ef1437b3
Loading
Loading
Loading
Loading
+14 −13
Original line number Diff line number Diff line
@@ -61,9 +61,6 @@ struct spdk_vhost_dev_event_ctx {

	/** Semaphore used to signal that event is done. */
	sem_t sem;

	/** Response to be written by enqueued event. */
	int response;
};

static int new_connection(int vid);
@@ -544,8 +541,7 @@ spdk_vhost_event_cb(void *arg1, void *arg2)
{
	struct spdk_vhost_dev_event_ctx *ctx = arg1;

	ctx->response = ctx->cb_fn(ctx->vdev, arg2);
	sem_post(&ctx->sem);
	ctx->cb_fn(ctx->vdev, &ctx->sem);
}

static void
@@ -568,8 +564,8 @@ spdk_vhost_event_async_fn(void *arg1, void *arg2)
	free(ctx);
}

int
spdk_vhost_event_send(struct spdk_vhost_dev *vdev, spdk_vhost_event_fn cb_fn, void *arg,
static int
spdk_vhost_event_send(struct spdk_vhost_dev *vdev, spdk_vhost_event_fn cb_fn,
		      unsigned timeout_sec, const char *errmsg)
{
	struct spdk_vhost_dev_event_ctx ev_ctx = {0};
@@ -586,13 +582,14 @@ spdk_vhost_event_send(struct spdk_vhost_dev *vdev, spdk_vhost_event_fn cb_fn, vo
	ev_ctx.vdev = vdev;
	ev_ctx.ctrlr_name = strdup(vdev->name);
	ev_ctx.cb_fn = cb_fn;
	clock_gettime(CLOCK_REALTIME, &timeout);
	timeout.tv_sec += timeout_sec;

	ev = spdk_event_allocate(vdev->lcore, spdk_vhost_event_cb, &ev_ctx, arg);
	ev = spdk_event_allocate(vdev->lcore, spdk_vhost_event_cb, &ev_ctx, NULL);
	assert(ev);
	spdk_event_call(ev);

	clock_gettime(CLOCK_REALTIME, &timeout);
	timeout.tv_sec += timeout_sec;

	rc = sem_timedwait(&ev_ctx.sem, &timeout);
	if (rc != 0) {
		SPDK_ERRLOG("Timout waiting for event: %s.\n", errmsg);
@@ -602,7 +599,7 @@ spdk_vhost_event_send(struct spdk_vhost_dev *vdev, spdk_vhost_event_fn cb_fn, vo
	sem_destroy(&ev_ctx.sem);
	free(ev_ctx.ctrlr_name);

	return ev_ctx.response;
	return rc;
}

static int
@@ -633,6 +630,7 @@ destroy_device(int vid)
{
	struct spdk_vhost_dev *vdev;
	struct rte_vhost_vring *q;
	int rc;
	uint16_t i;

	pthread_mutex_lock(&g_spdk_vhost_mutex);
@@ -649,7 +647,8 @@ destroy_device(int vid)
		return;
	}

	if (vdev->backend->destroy_device(vdev) != 0) {
	rc = spdk_vhost_event_send(vdev, vdev->backend->destroy_device, 3, "destroy device");
	if (rc != 0) {
		SPDK_ERRLOG("Couldn't stop device with vid %d.\n", vid);
		pthread_mutex_unlock(&g_spdk_vhost_mutex);
		return;
@@ -721,7 +720,7 @@ new_device(int vid)
	}

	vdev->lcore = spdk_vhost_allocate_reactor(vdev->cpumask);
	rc = vdev->backend->new_device(vdev);
	rc = spdk_vhost_event_send(vdev, vdev->backend->new_device, 3, "new device");
	if (rc != 0) {
		free(vdev->mem);
		spdk_vhost_free_reactor(vdev->lcore);
@@ -837,6 +836,7 @@ new_connection(int vid)
		return -1;
	}

	/* since pollers are not running it safe not to use spdk_event here */
	if (vdev->vid != -1) {
		SPDK_ERRLOG("Device with vid %d is already connected.\n", vid);
		pthread_mutex_unlock(&g_spdk_vhost_mutex);
@@ -861,6 +861,7 @@ destroy_connection(int vid)
		return;
	}

	/* since pollers are not running it safe not to use spdk_event here */
	vdev->vid = -1;
	pthread_mutex_unlock(&g_spdk_vhost_mutex);
}
+72 −68
Original line number Diff line number Diff line
@@ -359,52 +359,6 @@ no_bdev_vdev_worker(void *arg)
	}
}

static int
add_vdev_cb(struct spdk_vhost_dev *vdev, void *arg)
{
	struct spdk_vhost_blk_dev *bvdev = arg;

	spdk_vhost_dev_mem_register(&bvdev->vdev);

	if (bvdev->bdev) {
		bvdev->bdev_io_channel = spdk_bdev_get_io_channel(bvdev->bdev_desc);
		if (!bvdev->bdev_io_channel) {
			SPDK_ERRLOG("Controller %s: IO channel allocation failed\n", vdev->name);
			abort();
		}
	}

	spdk_poller_register(&bvdev->requestq_poller, bvdev->bdev ? vdev_worker : no_bdev_vdev_worker,
			     bvdev, vdev->lcore, 0);
	SPDK_NOTICELOG("Started poller for vhost controller %s on lcore %d\n", vdev->name, vdev->lcore);
	return 0;
}

static int
remove_vdev_cb(struct spdk_vhost_dev *vdev, void *arg)
{
	struct spdk_vhost_blk_dev *bvdev = arg;

	SPDK_NOTICELOG("Stopping poller for vhost controller %s\n", bvdev->vdev.name);

	if (bvdev->bdev_io_channel) {
		spdk_put_io_channel(bvdev->bdev_io_channel);
		bvdev->bdev_io_channel = NULL;
	}

	spdk_vhost_dev_mem_unregister(&bvdev->vdev);
	return 0;
}

static int
unregister_vdev_cb(struct spdk_vhost_dev *vdev, void *arg)
{
	struct spdk_vhost_blk_dev *bvdev = arg;

	spdk_poller_unregister(&bvdev->requestq_poller, NULL);
	return 0;
}

static struct spdk_vhost_blk_dev *
to_blk_dev(struct spdk_vhost_dev *vdev)
{
@@ -530,58 +484,108 @@ alloc_task_pool(struct spdk_vhost_blk_dev *bvdev)
 *
 */
static int
new_device(struct spdk_vhost_dev *vdev)
new_device(struct spdk_vhost_dev *vdev, void *arg)
{
	struct spdk_vhost_blk_dev *bvdev;
	int rc = -1;
	sem_t *sem = arg;
	int rc = 0;

	bvdev = to_blk_dev(vdev);
	if (bvdev == NULL) {
		SPDK_ERRLOG("Trying to start non-blk controller as a blk one.\n");
		return -1;
		rc = -1;
		goto out;
	} else if (bvdev == NULL) {
		SPDK_ERRLOG("Trying to start non-blk controller as blk one.\n");
		return -1;
		rc = -1;
		goto out;
	}

	rc = alloc_task_pool(bvdev);
	if (rc != 0) {
		SPDK_ERRLOG("%s: failed to alloc task pool.\n", bvdev->vdev.name);
		return -1;
		goto out;
	}

	spdk_vhost_event_send(vdev, add_vdev_cb, bvdev, 1, "add blk vdev");
	return 0;
	spdk_vhost_dev_mem_register(&bvdev->vdev);

	if (bvdev->bdev) {
		bvdev->bdev_io_channel = spdk_bdev_get_io_channel(bvdev->bdev_desc);
		if (!bvdev->bdev_io_channel) {
			SPDK_ERRLOG("Controller %s: IO channel allocation failed\n", vdev->name);
			rc = -1;
			goto out;
		}
	}

	spdk_poller_register(&bvdev->requestq_poller, bvdev->bdev ? vdev_worker : no_bdev_vdev_worker,
			     bvdev, vdev->lcore, 0);
	SPDK_NOTICELOG("Started poller for vhost controller %s on lcore %d\n", vdev->name, vdev->lcore);
out:
	sem_post(sem);
	return rc;
}

struct spdk_vhost_dev_destroy_ctx {
	struct spdk_vhost_blk_dev *bvdev;
	struct spdk_poller *poller;
	sem_t *sem;
};

static void
destroy_device_poller_cb(void *arg)
{
	struct spdk_vhost_dev_destroy_ctx *ctx = arg;
	struct spdk_vhost_blk_dev *bvdev = ctx->bvdev;

	if (bvdev->vdev.task_cnt > 0) {
		return;
	}

	SPDK_NOTICELOG("Stopping poller for vhost controller %s\n", bvdev->vdev.name);

	if (bvdev->bdev_io_channel) {
		spdk_put_io_channel(bvdev->bdev_io_channel);
		bvdev->bdev_io_channel = NULL;
	}

	free_task_pool(bvdev);
	spdk_vhost_dev_mem_unregister(&bvdev->vdev);

	spdk_poller_unregister(&ctx->poller, NULL);
	sem_post(ctx->sem);
}

static int
destroy_device(struct spdk_vhost_dev *vdev)
destroy_device(struct spdk_vhost_dev *vdev, void *arg)
{
	struct spdk_vhost_blk_dev *bvdev;
	uint32_t i;
	struct spdk_vhost_dev_destroy_ctx *destroy_ctx;
	sem_t *sem = arg;

	bvdev = to_blk_dev(vdev);
	if (bvdev == NULL) {
		SPDK_ERRLOG("Trying to stop non-blk controller as a blk one.\n");
		return -1;
	}

	spdk_vhost_event_send(vdev, unregister_vdev_cb, bvdev, 1, "unregister vdev");

	/* Wait for all tasks to finish */
	for (i = 1000; i && vdev->task_cnt > 0; i--) {
		usleep(1000);
		goto err;
	}

	if (vdev->task_cnt > 0) {
		SPDK_ERRLOG("%s: pending tasks did not finish in 1s.\n", vdev->name);
		abort();
	destroy_ctx = spdk_dma_zmalloc(sizeof(*destroy_ctx), SPDK_CACHE_LINE_SIZE, NULL);
	if (destroy_ctx == NULL) {
		SPDK_ERRLOG("Failed to alloc memory for destroying device.\n");
		goto err;
	}

	spdk_vhost_event_send(vdev, remove_vdev_cb, bvdev, 1, "remove vdev");
	destroy_ctx->bvdev = bvdev;
	destroy_ctx->sem = arg;

	free_task_pool(bvdev);
	spdk_poller_unregister(&bvdev->requestq_poller, NULL);
	spdk_poller_register(&destroy_ctx->poller, destroy_device_poller_cb, destroy_ctx, vdev->lcore,
			     1000);
	return 0;

err:
	sem_post(sem);
	return -1;
}

static void
+12 −5
Original line number Diff line number Diff line
@@ -85,8 +85,18 @@ enum spdk_vhost_dev_type {
struct spdk_vhost_dev_backend {
	uint64_t virtio_features;
	uint64_t disabled_features;
	int (*new_device)(struct spdk_vhost_dev *);
	int (*destroy_device)(struct spdk_vhost_dev *);

	/**
	 * Callbacks for starting and pausing the device.
	 * The first param is struct spdk_vhost_dev *,
	 * The second one is sem_t* passed as a void*.
	 * The callback must call sem_post with given sem.
	 * If sem_post won't be called within an arbitrary
	 * limit of 3 seconds, this will time out.
	 */
	spdk_vhost_event_fn new_device;
	spdk_vhost_event_fn destroy_device;

	void (*dump_config_json)(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w);
};

@@ -135,9 +145,6 @@ int spdk_vhost_dev_construct(struct spdk_vhost_dev *vdev, const char *name, cons
			     enum spdk_vhost_dev_type type, const struct spdk_vhost_dev_backend *backend);
int spdk_vhost_dev_remove(struct spdk_vhost_dev *vdev);

int spdk_vhost_event_send(struct spdk_vhost_dev *vdev, spdk_vhost_event_fn cb_fn, void *arg,
			  unsigned timeout_sec, const char *errmsg);

int spdk_vhost_blk_controller_construct(void);
void spdk_vhost_dump_config_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w);

+82 −76
Original line number Diff line number Diff line
@@ -109,8 +109,8 @@ struct spdk_vhost_scsi_event {
	struct spdk_scsi_lun *lun;
};

static int new_device(struct spdk_vhost_dev *);
static int destroy_device(struct spdk_vhost_dev *);
static int new_device(struct spdk_vhost_dev *, void *);
static int destroy_device(struct spdk_vhost_dev *, void *);
static void spdk_vhost_scsi_config_json(struct spdk_vhost_dev *vdev, struct spdk_json_write_ctx *w);

const struct spdk_vhost_dev_backend spdk_vhost_scsi_device_backend = {
@@ -688,56 +688,6 @@ vdev_worker(void *arg)
	}
}

static int
add_vdev_cb(struct spdk_vhost_dev *vdev, void *arg)
{
	struct spdk_vhost_scsi_dev *svdev = arg;
	uint32_t i;

	for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; i++) {
		if (svdev->scsi_dev[i] == NULL) {
			continue;
		}
		spdk_scsi_dev_allocate_io_channels(svdev->scsi_dev[i]);
	}
	SPDK_NOTICELOG("Started poller for vhost controller %s on lcore %d\n", vdev->name, vdev->lcore);

	spdk_vhost_dev_mem_register(vdev);

	spdk_poller_register(&svdev->requestq_poller, vdev_worker, svdev, vdev->lcore, 0);
	spdk_poller_register(&svdev->mgmt_poller, vdev_mgmt_worker, svdev, vdev->lcore,
			     MGMT_POLL_PERIOD_US);
	return 0;
}

static int
remove_vdev_cb(struct spdk_vhost_dev *vdev, void *arg)
{
	struct spdk_vhost_scsi_dev *svdev = arg;
	uint32_t i;

	for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; i++) {
		if (svdev->scsi_dev[i] == NULL) {
			continue;
		}
		spdk_scsi_dev_free_io_channels(svdev->scsi_dev[i]);
	}

	SPDK_NOTICELOG("Stopping poller for vhost controller %s\n", svdev->vdev.name);
	spdk_vhost_dev_mem_unregister(&svdev->vdev);
	return 0;
}

static int
unregister_vdev_cb(struct spdk_vhost_dev *vdev, void *arg)
{
	struct spdk_vhost_scsi_dev *svdev = arg;

	spdk_poller_unregister(&svdev->requestq_poller, NULL);
	spdk_poller_unregister(&svdev->mgmt_poller, NULL);
	return 0;
}

static struct spdk_vhost_scsi_dev *
to_scsi_dev(struct spdk_vhost_dev *ctrlr)
{
@@ -1102,59 +1052,79 @@ alloc_task_pool(struct spdk_vhost_scsi_dev *svdev)
 * and then allocated to a specific data core.
 */
static int
new_device(struct spdk_vhost_dev *vdev)
new_device(struct spdk_vhost_dev *vdev, void *arg)
{
	struct spdk_vhost_scsi_dev *svdev;
	int rc;
	sem_t *sem = arg;
	uint32_t i;
	int rc = 0;

	svdev = to_scsi_dev(vdev);
	if (svdev == NULL) {
		SPDK_ERRLOG("Trying to start non-scsi controller as a scsi one.\n");
		return -1;
		rc = -1;
		goto out;
	}

	rc = alloc_task_pool(svdev);
	if (rc != 0) {
		SPDK_ERRLOG("%s: failed to alloc task pool.\n", vdev->name);
		return -1;
		goto out;
	}

	svdev->vhost_events = spdk_ring_create(SPDK_RING_TYPE_MP_SC, 16,
					       spdk_env_get_socket_id(vdev->lcore));
	if (svdev->vhost_events == NULL) {
		SPDK_ERRLOG("%s: failed to alloc event pool.\n", vdev->name);
		return -1;
		rc = -1;
		goto out;
	}

	spdk_vhost_event_send(vdev, add_vdev_cb, svdev, 1, "add scsi vdev");
	return 0;
	for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; i++) {
		if (svdev->scsi_dev[i] == NULL) {
			continue;
		}
		spdk_scsi_dev_allocate_io_channels(svdev->scsi_dev[i]);
	}
	SPDK_NOTICELOG("Started poller for vhost controller %s on lcore %d\n", vdev->name, vdev->lcore);

static int
destroy_device(struct spdk_vhost_dev *vdev)
{
	spdk_vhost_dev_mem_register(vdev);

	spdk_poller_register(&svdev->requestq_poller, vdev_worker, svdev, vdev->lcore, 0);
	spdk_poller_register(&svdev->mgmt_poller, vdev_mgmt_worker, svdev, vdev->lcore,
			     MGMT_POLL_PERIOD_US);
out:
	sem_post(sem);
	return rc;
}

struct spdk_vhost_dev_destroy_ctx {
	struct spdk_vhost_scsi_dev *svdev;
	struct spdk_poller *poller;
	sem_t *sem;
};

static void
destroy_device_poller_cb(void *arg)
{
	struct spdk_vhost_dev_destroy_ctx *ctx = arg;
	struct spdk_vhost_scsi_dev *svdev = ctx->svdev;
	void *ev;
	uint32_t i;

	svdev = to_scsi_dev(vdev);
	if (svdev == NULL) {
		SPDK_ERRLOG("Trying to stop non-scsi controller as a scsi one.\n");
		return -1;
	if (svdev->vdev.task_cnt > 0) {
		return;
	}

	spdk_vhost_event_send(vdev, unregister_vdev_cb, svdev, 1, "unregister scsi vdev");

	/* Wait for all tasks to finish */
	for (i = 1000; i && vdev->task_cnt > 0; i--) {
		usleep(1000);
	for (i = 0; i < SPDK_VHOST_SCSI_CTRLR_MAX_DEVS; i++) {
		if (svdev->scsi_dev[i] == NULL) {
			continue;
		}

	if (vdev->task_cnt > 0) {
		SPDK_ERRLOG("%s: pending tasks did not finish in 1s.\n", vdev->name);
		spdk_scsi_dev_free_io_channels(svdev->scsi_dev[i]);
	}

	spdk_vhost_event_send(vdev, remove_vdev_cb, svdev, 1, "remove scsi vdev");
	SPDK_NOTICELOG("Stopping poller for vhost controller %s\n", svdev->vdev.name);
	spdk_vhost_dev_mem_unregister(&svdev->vdev);

	/* Flush not sent events */
	while (spdk_ring_dequeue(svdev->vhost_events, &ev, 1) == 1) {
@@ -1166,7 +1136,43 @@ destroy_device(struct spdk_vhost_dev *vdev)
	spdk_ring_free(svdev->vhost_events);

	free_task_pool(svdev);

	spdk_poller_unregister(&ctx->poller, NULL);
	sem_post(ctx->sem);
}

static int
destroy_device(struct spdk_vhost_dev *vdev, void *arg)
{
	struct spdk_vhost_scsi_dev *svdev;
	struct spdk_vhost_dev_destroy_ctx *destroy_ctx;
	sem_t *sem = arg;

	svdev = to_scsi_dev(vdev);
	if (svdev == NULL) {
		SPDK_ERRLOG("Trying to stop non-scsi controller as a scsi one.\n");
		goto err;
	}

	destroy_ctx = spdk_dma_zmalloc(sizeof(*destroy_ctx), SPDK_CACHE_LINE_SIZE, NULL);
	if (destroy_ctx == NULL) {
		SPDK_ERRLOG("Failed to alloc memory for destroying device.\n");
		goto err;
	}

	destroy_ctx->svdev = svdev;
	destroy_ctx->sem = arg;

	spdk_poller_unregister(&svdev->requestq_poller, NULL);
	spdk_poller_unregister(&svdev->mgmt_poller, NULL);
	spdk_poller_register(&destroy_ctx->poller, destroy_device_poller_cb, destroy_ctx, vdev->lcore,
			     1000);

	return 0;

err:
	sem_post(sem);
	return -1;
}

int