Commit 62266a72 authored by Seth Howell's avatar Seth Howell Committed by Jim Harris
Browse files

rdma: allocate protection domains for devices up front.



We were only using one pd per device anywas, and this is necessary for
shared receive queue support.

Change-Id: I86668d5b7256277fe50836863408af2215b5adf9
Signed-off-by: default avatarSeth Howell <seth.howell@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/447385


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
parent bbf7627c
Loading
Loading
Loading
Loading
+34 −62
Original line number Diff line number Diff line
@@ -1651,6 +1651,11 @@ spdk_nvmf_rdma_opts_init(struct spdk_nvmf_transport_opts *opts)
	opts->buf_cache_size =		SPDK_NVMF_RDMA_DEFAULT_BUFFER_CACHE_SIZE;
}

const struct spdk_mem_map_ops g_nvmf_rdma_map_ops = {
	.notify_cb = spdk_nvmf_rdma_mem_notify,
	.are_contiguous = spdk_nvmf_rdma_check_contiguous_entries
};

static int spdk_nvmf_rdma_destroy(struct spdk_nvmf_transport *transport);

static struct spdk_nvmf_transport *
@@ -1659,6 +1664,7 @@ spdk_nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)
	int rc;
	struct spdk_nvmf_rdma_transport *rtransport;
	struct spdk_nvmf_rdma_device	*device, *tmp;
	struct ibv_pd			*pd;
	struct ibv_context		**contexts;
	uint32_t			i;
	int				flag;
@@ -1809,6 +1815,34 @@ spdk_nvmf_rdma_create(struct spdk_nvmf_transport_opts *opts)

		TAILQ_INSERT_TAIL(&rtransport->devices, device, link);
		i++;

		pd = NULL;
		if (g_nvmf_hooks.get_ibv_pd) {
			pd = g_nvmf_hooks.get_ibv_pd(NULL, device->context);
		}

		if (!g_nvmf_hooks.get_ibv_pd) {
			device->pd = ibv_alloc_pd(device->context);
			if (!device->pd) {
				SPDK_ERRLOG("Unable to allocate protection domain.\n");
				spdk_nvmf_rdma_destroy(&rtransport->transport);
				return NULL;
			}
		} else {
			device->pd = pd;
		}

		assert(device->map == NULL);

		device->map = spdk_mem_map_alloc(0, &g_nvmf_rdma_map_ops, device->pd);
		if (!device->map) {
			SPDK_ERRLOG("Unable to allocate memory map for listen address\n");
			spdk_nvmf_rdma_destroy(&rtransport->transport);
			return NULL;
		}

		assert(device->map != NULL);
		assert(device->pd != NULL);
	}
	rdma_free_devices(contexts);

@@ -1909,11 +1943,6 @@ spdk_nvmf_rdma_trid_from_cm_id(struct rdma_cm_id *id,
			       struct spdk_nvme_transport_id *trid,
			       bool peer);

const struct spdk_mem_map_ops g_nvmf_rdma_map_ops = {
	.notify_cb = spdk_nvmf_rdma_mem_notify,
	.are_contiguous = spdk_nvmf_rdma_check_contiguous_entries
};

static int
spdk_nvmf_rdma_listen(struct spdk_nvmf_transport *transport,
		      const struct spdk_nvme_transport_id *trid)
@@ -1921,7 +1950,6 @@ spdk_nvmf_rdma_listen(struct spdk_nvmf_transport *transport,
	struct spdk_nvmf_rdma_transport	*rtransport;
	struct spdk_nvmf_rdma_device	*device;
	struct spdk_nvmf_rdma_port	*port_tmp, *port;
	struct ibv_pd			*pd;
	struct addrinfo			*res;
	struct addrinfo			hints;
	int				family;
@@ -2033,62 +2061,6 @@ spdk_nvmf_rdma_listen(struct spdk_nvmf_transport *transport,
		return -EINVAL;
	}

	pd = NULL;
	if (g_nvmf_hooks.get_ibv_pd) {
		if (spdk_nvmf_rdma_trid_from_cm_id(port->id, &port->trid, 1) < 0) {
			rdma_destroy_id(port->id);
			free(port);
			pthread_mutex_unlock(&rtransport->lock);
			return -EINVAL;
		}

		pd = g_nvmf_hooks.get_ibv_pd(&port->trid, port->id->verbs);
	}

	if (device->pd == NULL) {
		/* Haven't created a protection domain yet. */

		if (!g_nvmf_hooks.get_ibv_pd) {
			device->pd = ibv_alloc_pd(device->context);
			if (!device->pd) {
				SPDK_ERRLOG("Unable to allocate protection domain.\n");
				rdma_destroy_id(port->id);
				free(port);
				pthread_mutex_unlock(&rtransport->lock);
				return -ENOMEM;
			}
		} else {
			device->pd = pd;
		}

		assert(device->map == NULL);

		device->map = spdk_mem_map_alloc(0, &g_nvmf_rdma_map_ops, device->pd);
		if (!device->map) {
			SPDK_ERRLOG("Unable to allocate memory map for listen address\n");
			if (!g_nvmf_hooks.get_ibv_pd) {
				ibv_dealloc_pd(device->pd);
			}
			rdma_destroy_id(port->id);
			free(port);
			pthread_mutex_unlock(&rtransport->lock);
			return -ENOMEM;
		}
	} else if (g_nvmf_hooks.get_ibv_pd) {
		/* A protection domain exists for this device, but the user has
		 * enabled hooks. Verify that they only supply one pd per device. */
		if (device->pd != pd) {
			SPDK_ERRLOG("The NVMe-oF target only supports one protection domain per device.\n");
			rdma_destroy_id(port->id);
			free(port);
			pthread_mutex_unlock(&rtransport->lock);
			return -EINVAL;
		}
	}

	assert(device->map != NULL);
	assert(device->pd != NULL);

	SPDK_INFOLOG(SPDK_LOG_RDMA, "*** NVMf Target Listening on %s port %d ***\n",
		     port->trid.traddr, ntohs(rdma_get_src_port(port->id)));