Commit 20f13426 authored by Ben Walker's avatar Ben Walker Committed by Jim Harris
Browse files

nvmf/rdma: Create pd and memory map at transport initialization



Instead of waiting until the first listen address is added,
create a protection domain and a memory map for every RDMA
device in the system. This consumes more resources when there
are RDMA devices that aren't used by the target, but it
will simplify some order of operations issues when listen
addresses and poll groups are added and removed at run
time.

Change-Id: Idfe6f8307decbf19e02765dbf67f03c2510a328f
Signed-off-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.gerrithub.io/422602


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: default avatarSeth Howell <seth.howell5141@gmail.com>
Reviewed-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
parent d9b3149e
Loading
Loading
Loading
Loading
+20 −29
Original line number Diff line number Diff line
@@ -566,7 +566,7 @@ spdk_nvmf_rdma_qpair_initialize(struct spdk_nvmf_qpair *qpair)
	rqpair->ibv_init_attr.cap.max_send_sge	= rqpair->max_sge;
	rqpair->ibv_init_attr.cap.max_recv_sge	= NVMF_DEFAULT_RX_SGE;

	rc = rdma_create_qp(rqpair->cm_id, NULL, &rqpair->ibv_init_attr);
	rc = rdma_create_qp(rqpair->cm_id, rqpair->port->device->pd, &rqpair->ibv_init_attr);
	if (rc) {
		SPDK_ERRLOG("rdma_create_qp failed: errno %d: %s\n", errno, spdk_strerror(errno));
		rdma_destroy_id(rqpair->cm_id);
@@ -1547,8 +1547,22 @@ spdk_nvmf_rdma_create(struct spdk_nvmf_tgt *tgt)
			break;
		}

		device->pd = NULL;
		device->map = NULL;
		device->pd = ibv_alloc_pd(device->context);
		if (!device->pd) {
			SPDK_ERRLOG("Unable to allocate protection domain.\n");
			free(device);
			rc = -1;
			break;
		}

		device->map = spdk_mem_map_alloc(0, spdk_nvmf_rdma_mem_notify, device);
		if (!device->map) {
			SPDK_ERRLOG("Unable to allocate memory map for new poll group\n");
			ibv_dealloc_pd(device->pd);
			free(device);
			rc = -1;
			break;
		}

		TAILQ_INSERT_TAIL(&rtransport->devices, device, link);
		i++;
@@ -1611,6 +1625,9 @@ spdk_nvmf_rdma_destroy(struct spdk_nvmf_transport *transport)
		if (device->map) {
			spdk_mem_map_free(&device->map);
		}
		if (device->pd) {
			ibv_dealloc_pd(device->pd);
		}
		free(device);
	}

@@ -1745,17 +1762,6 @@ spdk_nvmf_rdma_listen(struct spdk_nvmf_transport *transport,
		return -EINVAL;
	}

	if (!device->map) {
		device->pd = port->id->pd;
		device->map = spdk_mem_map_alloc(0, spdk_nvmf_rdma_mem_notify, device);
		if (!device->map) {
			SPDK_ERRLOG("Unable to allocate memory map for new poll group\n");
			return -1;
		}
	} else {
		assert(device->pd == port->id->pd);
	}

	SPDK_INFOLOG(SPDK_LOG_RDMA, "*** NVMf Target Listening on %s port %d ***\n",
		     port->trid.traddr, ntohs(rdma_get_src_port(port->id)));

@@ -2215,16 +2221,6 @@ spdk_nvmf_rdma_poll_group_create(struct spdk_nvmf_transport *transport)

	pthread_mutex_lock(&rtransport->lock);
	TAILQ_FOREACH(device, &rtransport->devices, link) {
		if (device->map == NULL) {
			/*
			 * The device is not in use (no listeners),
			 * so no protection domain has been constructed.
			 * Skip it.
			 */
			SPDK_NOTICELOG("Skipping unused RDMA device when creating poll group.\n");
			continue;
		}

		poller = calloc(1, sizeof(*poller));
		if (!poller) {
			SPDK_ERRLOG("Unable to allocate memory for new RDMA poller\n");
@@ -2301,11 +2297,6 @@ spdk_nvmf_rdma_poll_group_add(struct spdk_nvmf_transport_poll_group *group,

	device = rqpair->port->device;

	if (device->pd != rqpair->cm_id->pd) {
		SPDK_ERRLOG("Mismatched protection domains\n");
		return -1;
	}

	TAILQ_FOREACH(poller, &rgroup->pollers, link) {
		if (poller->device == device) {
			break;