Commit a68e0930 authored by Kalwas, Jacek's avatar Kalwas, Jacek Committed by Tomasz Zawadzki
Browse files

nvmf: fix iobuf module and channel usage



Iobuf module can be non registered only if shared buffers or cache
is not required. Previously only shared buffers were considered but
having shared buffers set to 0 and cache to non 0 is a valid
configuration.

As transport can use iobuf without cache configured conditions in
transport poll group create/destroy were adjusted to reflect that.

Change-Id: I6a7bbe529368a654709dc57cf2a876de5e062baa
Signed-off-by: default avatarJacek Kalwas <jacek.kalwas@intel.com>
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/19998


Reviewed-by: default avatarJim Harris <jim.harris@samsung.com>
Community-CI: Mellanox Build Bot
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarKonrad Sztyber <konrad.sztyber@intel.com>
parent c2b5b28c
Loading
Loading
Loading
Loading
+14 −6
Original line number Diff line number Diff line
@@ -165,6 +165,12 @@ struct nvmf_transport_create_ctx {
	spdk_nvmf_transport_create_done_cb cb_fn;
};

static bool
nvmf_transport_use_iobuf(struct spdk_nvmf_transport *transport)
{
	return transport->opts.num_shared_buffers || transport->opts.buf_cache_size;
}

static void
nvmf_transport_create_async_done(void *cb_arg, struct spdk_nvmf_transport *transport)
{
@@ -187,7 +193,7 @@ nvmf_transport_create_async_done(void *cb_arg, struct spdk_nvmf_transport *trans
		goto err;
	}

	if (ctx->opts.num_shared_buffers) {
	if (nvmf_transport_use_iobuf(transport)) {
		spdk_iobuf_register_module(transport->iobuf_name);
	}

@@ -358,7 +364,7 @@ spdk_nvmf_transport_destroy(struct spdk_nvmf_transport *transport,
		free(listener);
	}

	if (transport->opts.num_shared_buffers) {
	if (nvmf_transport_use_iobuf(transport)) {
		spdk_iobuf_unregister_module(transport->iobuf_name);
	}

@@ -569,8 +575,8 @@ nvmf_transport_poll_group_create(struct spdk_nvmf_transport *transport,

	STAILQ_INIT(&tgroup->pending_buf_queue);

	if (transport->opts.buf_cache_size == 0) {
		/* We aren't going to allocate any buffers for the cache, so just return now. */
	if (!nvmf_transport_use_iobuf(transport)) {
		/* We aren't going to allocate any shared buffers or cache, so just return now. */
		return tgroup;
	}

@@ -648,7 +654,7 @@ nvmf_transport_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
		SPDK_ERRLOG("Pending I/O list wasn't empty on poll group destruction\n");
	}

	if (transport->opts.buf_cache_size) {
	if (nvmf_transport_use_iobuf(transport)) {
		/* The call to poll_group_destroy both frees the group memory,
		 * but also releases any remaining buffers. Make a copy of
		 * the channel onto the stack so we can still release the
@@ -660,7 +666,7 @@ nvmf_transport_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
	transport->ops->poll_group_destroy(group);
	pthread_mutex_unlock(&transport->mutex);

	if (transport->opts.buf_cache_size) {
	if (nvmf_transport_use_iobuf(transport)) {
		spdk_iobuf_channel_fini(&ch);
	}
}
@@ -862,6 +868,8 @@ spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
{
	int rc;

	assert(nvmf_transport_use_iobuf(transport));

	req->iovcnt = 0;
	rc = nvmf_request_get_buffers(req, group, transport, length,
				      transport->opts.io_unit_size,