Commit d75daea5 authored by Shuhei Matsumoto's avatar Shuhei Matsumoto Committed by Tomasz Zawadzki
Browse files

nvme_rdma: Use persistent protection domain for qpair



Get a PD for the device from the PD pool managed by the RDMA provider
when creating a QP, and put the PD when destroying the PD.

By this change, PD is managed completely by the RDMA provider or the hooks.
nvme_rdma_ctrlr::pd was added long time ago but is not referenced
anywhere. Remove nvme_rdma_ctrlr::pd for cleanup and clarification.

Signed-off-by: default avatarShuhei Matsumoto <smatsumoto@nvidia.com>
Change-Id: If8dc8ad011eed70149012128bd1b33f1a8b7b90b
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/13770


Community-CI: Mellanox Build Bot
Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-by: default avatarAleksey Marchuk <alexeymar@nvidia.com>
parent b5f360c4
Loading
Loading
Loading
Loading
+3 −7
Original line number Diff line number Diff line
@@ -113,8 +113,6 @@ struct nvme_rdma_cm_event_entry {
struct nvme_rdma_ctrlr {
	struct spdk_nvme_ctrlr			ctrlr;

	struct ibv_pd				*pd;

	uint16_t				max_sge;

	struct rdma_event_channel		*cm_channel;
@@ -764,12 +762,11 @@ nvme_rdma_qpair_init(struct nvme_rdma_qpair *rqpair)

	rctrlr = nvme_rdma_ctrlr(rqpair->qpair.ctrlr);
	if (g_nvme_hooks.get_ibv_pd) {
		rctrlr->pd = g_nvme_hooks.get_ibv_pd(&rctrlr->ctrlr.trid, rqpair->cm_id->verbs);
		attr.pd = g_nvme_hooks.get_ibv_pd(&rctrlr->ctrlr.trid, rqpair->cm_id->verbs);
	} else {
		rctrlr->pd = NULL;
		attr.pd = spdk_rdma_get_pd(rqpair->cm_id->verbs);
	}

	attr.pd =		rctrlr->pd;
	attr.stats =		rqpair->poller ? &rqpair->poller->stats.rdma_stats : NULL;
	attr.send_cq		= rqpair->cq;
	attr.recv_cq		= rqpair->cq;
@@ -796,8 +793,6 @@ nvme_rdma_qpair_init(struct nvme_rdma_qpair *rqpair)
	rqpair->current_num_recvs = 0;
	rqpair->current_num_sends = 0;

	rctrlr->pd = rqpair->rdma_qp->qp->pd;

	rqpair->cm_id->context = rqpair;

	return 0;
@@ -1963,6 +1958,7 @@ nvme_rdma_qpair_destroy(struct nvme_rdma_qpair *rqpair)

	if (rqpair->cm_id) {
		if (rqpair->rdma_qp) {
			spdk_rdma_put_pd(rqpair->rdma_qp->qp->pd);
			spdk_rdma_qp_destroy(rqpair->rdma_qp);
			rqpair->rdma_qp = NULL;
		}
+10 −0
Original line number Diff line number Diff line
@@ -65,3 +65,13 @@ spdk_rdma_get_translation(struct spdk_rdma_mem_map *map, void *address,

	return 0;
}

DEFINE_RETURN_MOCK(spdk_rdma_get_pd, struct ibv_pd *);
struct ibv_pd *
spdk_rdma_get_pd(struct ibv_context *context)
{
	HANDLE_RETURN_MOCK(spdk_rdma_get_pd);
	return NULL;
}

DEFINE_STUB_V(spdk_rdma_put_pd, (struct ibv_pd *pd));
+3 −0
Original line number Diff line number Diff line
@@ -1076,6 +1076,7 @@ test_nvme_rdma_qpair_init(void)
	rqpair.qpair.poll_group = NULL;
	rqpair.qpair.ctrlr = &rctrlr.ctrlr;
	g_spdk_rdma_qp.qp = &qp;
	MOCK_SET(spdk_rdma_get_pd, pd);

	rc = nvme_rdma_qpair_init(&rqpair);
	CU_ASSERT(rc == 0);
@@ -1087,6 +1088,8 @@ test_nvme_rdma_qpair_init(void)
	CU_ASSERT(rqpair.current_num_recvs == 0);
	CU_ASSERT(rqpair.cq == (struct ibv_cq *)0xFEEDBEEF);
	CU_ASSERT(rqpair.memory_domain != NULL);

	MOCK_CLEAR(spdk_rdma_get_pd);
}

static void