Commit 54c394c4 authored by Seth Howell's avatar Seth Howell Committed by Jim Harris
Browse files

nvmf/rdma: cleanup qpairs and reqs on poll group deletion.



Change-Id: I6dedf295b80148f37f75ebd5553f18dae76b2ab8
Signed-off-by: default avatarSeth Howell <seth.howell@intel.com>
Signed-off-by: default avatarBen Walker <benjamin.walker@intel.com>
Reviewed-on: https://review.gerrithub.io/421166


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
Reviewed-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
parent ed60507d
Loading
Loading
Loading
Loading
+25 −13
Original line number Diff line number Diff line
@@ -2005,22 +2005,11 @@ error:
}

static void
_spdk_nvmf_rdma_qp_error(void *arg)
_spdk_nvmf_rdma_qp_cleanup_all_states(struct spdk_nvmf_rdma_qpair *rqpair)
{
	struct spdk_nvmf_rdma_qpair	*rqpair = arg;
	struct spdk_nvmf_rdma_request	*rdma_req, *req_tmp;
	enum ibv_qp_state		state;

	state = spdk_nvmf_rdma_update_ibv_state(rqpair);
	if (state != IBV_QPS_ERR) {
		/* Error was already recovered */
		return;
	}

	if (spdk_nvmf_qpair_is_admin_queue(&rqpair->qpair)) {
		spdk_nvmf_ctrlr_abort_aer(rqpair->qpair.ctrlr);
	}

	spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_NEW);
	spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_DATA_TRANSFER_PENDING);

	/* First wipe the requests waiting for buffer from the global list */
@@ -2030,10 +2019,28 @@ _spdk_nvmf_rdma_qp_error(void *arg)
	/* Then drain the requests through the rdma queue */
	spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_NEED_BUFFER);

	spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_EXECUTING);
	spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
	spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST);
	spdk_nvmf_rdma_drain_state_queue(rqpair, RDMA_REQUEST_STATE_COMPLETING);
}

static void
_spdk_nvmf_rdma_qp_error(void *arg)
{
	struct spdk_nvmf_rdma_qpair	*rqpair = arg;
	enum ibv_qp_state		state;

	state = spdk_nvmf_rdma_update_ibv_state(rqpair);
	if (state != IBV_QPS_ERR) {
		/* Error was already recovered */
		return;
	}

	if (spdk_nvmf_qpair_is_admin_queue(&rqpair->qpair)) {
		spdk_nvmf_ctrlr_abort_aer(rqpair->qpair.ctrlr);
	}
	_spdk_nvmf_rdma_qp_cleanup_all_states(rqpair);
	spdk_nvmf_rdma_qpair_recover(rqpair);
}

@@ -2216,6 +2223,7 @@ spdk_nvmf_rdma_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
{
	struct spdk_nvmf_rdma_poll_group	*rgroup;
	struct spdk_nvmf_rdma_poller		*poller, *tmp;
	struct spdk_nvmf_rdma_qpair		*qpair, *tmp_qpair;

	rgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_rdma_poll_group, group);

@@ -2229,6 +2237,10 @@ spdk_nvmf_rdma_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
		if (poller->cq) {
			ibv_destroy_cq(poller->cq);
		}
		TAILQ_FOREACH_SAFE(qpair, &poller->qpairs, link, tmp_qpair) {
			_spdk_nvmf_rdma_qp_cleanup_all_states(qpair);
			spdk_nvmf_rdma_qpair_destroy(qpair);
		}

		free(poller);
	}