Commit 61c9017c authored by Ben Walker's avatar Ben Walker Committed by Tomasz Zawadzki
Browse files

idxd: Eliminate spdk_idxd_configure_chan



We can do all of the configuration in spdk_idxd_get_channel, and the
configuration step was always done immediately after getting the channel
anyway.

Change-Id: I9fef342e393261f0db6308cd5be4f49720420aa0
Signed-off-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/10349


Community-CI: Broadcom CI <spdk-ci.pdl@broadcom.com>
Community-CI: Mellanox Build Bot
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarChangpeng Liu <changpeng.liu@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
Reviewed-by: default avatarPaul Luse <paul.e.luse@intel.com>
Reviewed-by: default avatarMonica Kenguva <monica.kenguva@intel.com>
parent 2f10d280
Loading
Loading
Loading
Loading
+0 −8
Original line number Diff line number Diff line
@@ -64,14 +64,6 @@ struct spdk_idxd_device;
 */
uint32_t spdk_idxd_get_socket(struct spdk_idxd_device *idxd);

/**
 * Signature for configuring a channel
 *
 * \param chan IDXD channel to be configured
 * \return 0 on success, negative errno on failure.
 */
int spdk_idxd_configure_chan(struct spdk_idxd_io_channel *chan);

/**
 * Signature for callback function invoked when a request is completed.
 *
+84 −107
Original line number Diff line number Diff line
@@ -91,91 +91,6 @@ _submit_to_hw(struct spdk_idxd_io_channel *chan, struct idxd_ops *op)
			      PORTAL_MASK;
}

struct spdk_idxd_io_channel *
spdk_idxd_get_channel(struct spdk_idxd_device *idxd)
{
	struct spdk_idxd_io_channel *chan;
	struct idxd_batch *batch;
	int i, num_batches;

	assert(idxd != NULL);

	chan = calloc(1, sizeof(struct spdk_idxd_io_channel));
	if (chan == NULL) {
		SPDK_ERRLOG("Failed to allocate idxd chan\n");
		return NULL;
	}

	num_batches = idxd->queues[idxd->wq_id].wqcfg.wq_size / idxd->chan_per_device;

	chan->batch_base = calloc(num_batches, sizeof(struct idxd_batch));
	if (chan->batch_base == NULL) {
		SPDK_ERRLOG("Failed to allocate batch pool\n");
		free(chan);
		return NULL;
	}

	pthread_mutex_lock(&idxd->num_channels_lock);
	if (idxd->num_channels == idxd->chan_per_device) {
		/* too many channels sharing this device */
		pthread_mutex_unlock(&idxd->num_channels_lock);
		free(chan->batch_base);
		free(chan);
		return NULL;
	}

	/* Have each channel start at a different offset. */
	chan->portal_offset = (idxd->num_channels * PORTAL_STRIDE) & PORTAL_MASK;

	idxd->num_channels++;
	pthread_mutex_unlock(&idxd->num_channels_lock);

	chan->idxd = idxd;
	TAILQ_INIT(&chan->ops_pool);
	TAILQ_INIT(&chan->batch_pool);
	TAILQ_INIT(&chan->ops_outstanding);

	batch = chan->batch_base;
	for (i = 0 ; i < num_batches ; i++) {
		TAILQ_INSERT_TAIL(&chan->batch_pool, batch, link);
		batch++;
	}

	return chan;
}

void
spdk_idxd_put_channel(struct spdk_idxd_io_channel *chan)
{
	struct idxd_batch *batch;

	assert(chan != NULL);

	pthread_mutex_lock(&chan->idxd->num_channels_lock);
	assert(chan->idxd->num_channels > 0);
	chan->idxd->num_channels--;
	pthread_mutex_unlock(&chan->idxd->num_channels_lock);

	spdk_free(chan->ops_base);
	spdk_free(chan->desc_base);
	while ((batch = TAILQ_FIRST(&chan->batch_pool))) {
		TAILQ_REMOVE(&chan->batch_pool, batch, link);
		spdk_free(batch->user_ops);
		spdk_free(batch->user_desc);
	}
	free(chan->batch_base);
	free(chan);
}

/* returns the total max operations for channel. */
int
spdk_idxd_chan_get_max_operations(struct spdk_idxd_io_channel *chan)
{
	assert(chan != NULL);

	return chan->idxd->total_wq_size / chan->idxd->chan_per_device;
}

inline static int
_vtophys(const void *buf, uint64_t *buf_addr, uint64_t size)
{
@@ -196,30 +111,57 @@ _vtophys(const void *buf, uint64_t *buf_addr, uint64_t size)
	return 0;
}

int
spdk_idxd_configure_chan(struct spdk_idxd_io_channel *chan)
struct spdk_idxd_io_channel *
spdk_idxd_get_channel(struct spdk_idxd_device *idxd)
{
	struct spdk_idxd_io_channel *chan;
	struct idxd_batch *batch;
	int rc, num_descriptors, i;
	struct idxd_hw_desc *desc;
	struct idxd_ops *op;
	int i, j, num_batches, num_descriptors, rc;

	assert(chan != NULL);
	assert(idxd != NULL);

	chan = calloc(1, sizeof(struct spdk_idxd_io_channel));
	if (chan == NULL) {
		SPDK_ERRLOG("Failed to allocate idxd chan\n");
		return NULL;
	}

	chan->idxd = idxd;
	TAILQ_INIT(&chan->ops_pool);
	TAILQ_INIT(&chan->batch_pool);
	TAILQ_INIT(&chan->ops_outstanding);

	/* Assign WQ, portal */
	pthread_mutex_lock(&idxd->num_channels_lock);
	if (idxd->num_channels == idxd->chan_per_device) {
		/* too many channels sharing this device */
		pthread_mutex_unlock(&idxd->num_channels_lock);
		goto err_chan;
	}

	/* Have each channel start at a different offset. */
	chan->portal = idxd->impl->portal_get_addr(idxd);
	chan->portal_offset = (idxd->num_channels * PORTAL_STRIDE) & PORTAL_MASK;
	idxd->num_channels++;

	/* Round robin the WQ selection for the chan on this IDXD device. */
	chan->idxd->wq_id++;
	if (chan->idxd->wq_id == g_dev_cfg->total_wqs) {
		chan->idxd->wq_id = 0;
	idxd->wq_id++;
	if (idxd->wq_id == g_dev_cfg->total_wqs) {
		idxd->wq_id = 0;
	}

	num_descriptors = chan->idxd->queues[chan->idxd->wq_id].wqcfg.wq_size / chan->idxd->chan_per_device;
	pthread_mutex_unlock(&idxd->num_channels_lock);

	/* Allocate descriptors and completions */
	num_descriptors = idxd->queues[idxd->wq_id].wqcfg.wq_size / idxd->chan_per_device;
	chan->desc_base = desc = spdk_zmalloc(num_descriptors * sizeof(struct idxd_hw_desc),
					      0x40, NULL,
					      SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
	if (chan->desc_base == NULL) {
		SPDK_ERRLOG("Failed to allocate descriptor memory\n");
		return -ENOMEM;
		goto err_chan;
	}

	chan->ops_base = op = spdk_zmalloc(num_descriptors * sizeof(struct idxd_ops),
@@ -227,7 +169,6 @@ spdk_idxd_configure_chan(struct spdk_idxd_io_channel *chan)
					   SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
	if (chan->ops_base == NULL) {
		SPDK_ERRLOG("Failed to allocate completion memory\n");
		rc = -ENOMEM;
		goto err_op;
	}

@@ -237,21 +178,26 @@ spdk_idxd_configure_chan(struct spdk_idxd_io_channel *chan)
		rc = _vtophys(&op->hw, &desc->completion_addr, sizeof(struct idxd_hw_comp_record));
		if (rc) {
			SPDK_ERRLOG("Failed to translate completion memory\n");
			rc = -ENOMEM;
			goto err_op;
		}
		op++;
		desc++;
	}

	/* Populate the batches */
	TAILQ_FOREACH(batch, &chan->batch_pool, link) {
	/* Allocate batches */
	num_batches = idxd->queues[idxd->wq_id].wqcfg.wq_size / idxd->chan_per_device;
	chan->batch_base = calloc(num_batches, sizeof(struct idxd_batch));
	if (chan->batch_base == NULL) {
		SPDK_ERRLOG("Failed to allocate batch pool\n");
		goto err_op;
	}
	batch = chan->batch_base;
	for (i = 0 ; i < num_batches ; i++) {
		batch->user_desc = desc = spdk_zmalloc(DESC_PER_BATCH * sizeof(struct idxd_hw_desc),
						       0x40, NULL,
						       SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
		if (batch->user_desc == NULL) {
			SPDK_ERRLOG("Failed to allocate batch descriptor memory\n");
			rc = -ENOMEM;
			goto err_user_desc_or_op;
		}

@@ -259,7 +205,6 @@ spdk_idxd_configure_chan(struct spdk_idxd_io_channel *chan)
			      DESC_PER_BATCH * sizeof(struct idxd_hw_desc));
		if (rc) {
			SPDK_ERRLOG("Failed to translate batch descriptor memory\n");
			rc = -ENOMEM;
			goto err_user_desc_or_op;
		}

@@ -268,25 +213,24 @@ spdk_idxd_configure_chan(struct spdk_idxd_io_channel *chan)
						    SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
		if (batch->user_ops == NULL) {
			SPDK_ERRLOG("Failed to allocate user completion memory\n");
			rc = -ENOMEM;
			goto err_user_desc_or_op;
		}

		for (i = 0; i < DESC_PER_BATCH; i++) {
		for (j = 0; j < DESC_PER_BATCH; j++) {
			rc = _vtophys(&op->hw, &desc->completion_addr, sizeof(struct idxd_hw_comp_record));
			if (rc) {
				SPDK_ERRLOG("Failed to translate batch entry completion memory\n");
				rc = -ENOMEM;
				goto err_user_desc_or_op;
			}
			op++;
			desc++;
		}
	}

	chan->portal = chan->idxd->impl->portal_get_addr(chan->idxd);
		TAILQ_INSERT_TAIL(&chan->batch_pool, batch, link);
		batch++;
	}

	return 0;
	return chan;

err_user_desc_or_op:
	TAILQ_FOREACH(batch, &chan->batch_pool, link) {
@@ -300,8 +244,41 @@ err_user_desc_or_op:
err_op:
	spdk_free(chan->desc_base);
	chan->desc_base = NULL;
err_chan:
	free(chan);
	return NULL;
}

	return rc;
void
spdk_idxd_put_channel(struct spdk_idxd_io_channel *chan)
{
	struct idxd_batch *batch;

	assert(chan != NULL);

	pthread_mutex_lock(&chan->idxd->num_channels_lock);
	assert(chan->idxd->num_channels > 0);
	chan->idxd->num_channels--;
	pthread_mutex_unlock(&chan->idxd->num_channels_lock);

	spdk_free(chan->ops_base);
	spdk_free(chan->desc_base);
	while ((batch = TAILQ_FIRST(&chan->batch_pool))) {
		TAILQ_REMOVE(&chan->batch_pool, batch, link);
		spdk_free(batch->user_ops);
		spdk_free(batch->user_desc);
	}
	free(chan->batch_base);
	free(chan);
}

/* returns the total max operations for channel. */
int
spdk_idxd_chan_get_max_operations(struct spdk_idxd_io_channel *chan)
{
	assert(chan != NULL);

	return chan->idxd->total_wq_size / chan->idxd->chan_per_device;
}

static inline struct spdk_idxd_impl *
+0 −1
Original line number Diff line number Diff line
@@ -3,7 +3,6 @@

	# public functions
	spdk_idxd_chan_get_max_operations;
	spdk_idxd_configure_chan;
	spdk_idxd_probe;
	spdk_idxd_detach;
	spdk_idxd_batch_prep_copy;
+0 −10
Original line number Diff line number Diff line
@@ -327,7 +327,6 @@ idxd_create_cb(void *io_device, void *ctx_buf)
{
	struct idxd_io_channel *chan = ctx_buf;
	struct idxd_device *dev;
	int rc;

	dev = idxd_select_device(chan);
	if (dev == NULL) {
@@ -338,15 +337,6 @@ idxd_create_cb(void *io_device, void *ctx_buf)
	chan->dev = dev;
	chan->poller = SPDK_POLLER_REGISTER(idxd_poll, chan, 0);
	TAILQ_INIT(&chan->queued_tasks);

	rc = spdk_idxd_configure_chan(chan->chan);
	if (rc) {
		SPDK_ERRLOG("Failed to configure new channel rc = %d\n", rc);
		chan->state = IDXD_CHANNEL_ERROR;
		spdk_poller_unregister(&chan->poller);
		return rc;
	}

	chan->num_outstanding = 0;
	chan->state = IDXD_CHANNEL_ACTIVE;