Commit cf151d60 authored by Alexey Marchuk's avatar Alexey Marchuk Committed by Tomasz Zawadzki
Browse files

lib/rdma: Rename lib to rdma_provider



The new name better reflects purpose of this library
Next patch moves part of functions to a dedicated lib
and the new name helps to avoid confusion

Signed-off-by: default avatarAlexey Marchuk <alexeymar@nvidia.com>
Change-Id: If7296ed77a07f7084bce66971d6937d7671b3a91
Reviewed-on: https://review.spdk.io/gerrit/c/spdk/spdk/+/23071


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Community-CI: Mellanox Build Bot
Reviewed-by: default avatarBen Walker <ben@nvidia.com>
Reviewed-by: default avatarShuhei Matsumoto <smatsumoto@nvidia.com>
parent 5b333e40
Loading
Loading
Loading
Loading
+40 −32
Original line number Diff line number Diff line
/*   SPDX-License-Identifier: BSD-3-Clause
 *   Copyright (c) Mellanox Technologies LTD. All rights reserved.
 *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
 *   Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
 */

#ifndef SPDK_RDMA_H
@@ -17,44 +17,44 @@
#define SPDK_RDMA_RXE_VENDOR_ID_OLD 0
#define SPDK_RDMA_RXE_VENDOR_ID_NEW 0XFFFFFF

struct spdk_rdma_wr_stats {
struct spdk_rdma_provider_wr_stats {
	/* Total number of submitted requests */
	uint64_t num_submitted_wrs;
	/* Total number of doorbell updates */
	uint64_t doorbell_updates;
};

struct spdk_rdma_qp_stats {
	struct spdk_rdma_wr_stats send;
	struct spdk_rdma_wr_stats recv;
struct spdk_rdma_provider_qp_stats {
	struct spdk_rdma_provider_wr_stats send;
	struct spdk_rdma_provider_wr_stats recv;
};

struct spdk_rdma_qp_init_attr {
struct spdk_rdma_provider_qp_init_attr {
	void		       *qp_context;
	struct ibv_cq	       *send_cq;
	struct ibv_cq	       *recv_cq;
	struct ibv_srq	       *srq;
	struct ibv_qp_cap	cap;
	struct ibv_pd	       *pd;
	struct spdk_rdma_qp_stats *stats;
	struct spdk_rdma_provider_qp_stats *stats;
};

struct spdk_rdma_send_wr_list {
struct spdk_rdma_provider_send_wr_list {
	struct ibv_send_wr	*first;
	struct ibv_send_wr	*last;
};

struct spdk_rdma_recv_wr_list {
struct spdk_rdma_provider_recv_wr_list {
	struct ibv_recv_wr	*first;
	struct ibv_recv_wr	*last;
};

struct spdk_rdma_qp {
struct spdk_rdma_provider_qp {
	struct ibv_qp *qp;
	struct rdma_cm_id *cm_id;
	struct spdk_rdma_send_wr_list send_wrs;
	struct spdk_rdma_recv_wr_list recv_wrs;
	struct spdk_rdma_qp_stats *stats;
	struct spdk_rdma_provider_send_wr_list send_wrs;
	struct spdk_rdma_provider_recv_wr_list recv_wrs;
	struct spdk_rdma_provider_qp_stats *stats;
	bool shared_stats;
};

@@ -74,16 +74,16 @@ struct spdk_rdma_memory_translation {
	union spdk_rdma_mr mr_or_key;
	uint8_t translation_type;
};
struct spdk_rdma_srq_init_attr {
struct spdk_rdma_provider_srq_init_attr {
	struct ibv_pd *pd;
	struct spdk_rdma_wr_stats *stats;
	struct spdk_rdma_provider_wr_stats *stats;
	struct ibv_srq_init_attr srq_init_attr;
};

struct spdk_rdma_srq {
struct spdk_rdma_provider_srq {
	struct ibv_srq *srq;
	struct spdk_rdma_recv_wr_list recv_wrs;
	struct spdk_rdma_wr_stats *stats;
	struct spdk_rdma_provider_recv_wr_list recv_wrs;
	struct spdk_rdma_provider_wr_stats *stats;
	bool shared_stats;
};

@@ -98,7 +98,8 @@ enum spdk_rdma_memory_map_role {
 * \param init_attr Pointer to SRQ init attr
 * \return pointer to srq on success or NULL on failure. errno is updated in failure case.
 */
struct spdk_rdma_srq *spdk_rdma_srq_create(struct spdk_rdma_srq_init_attr *init_attr);
struct spdk_rdma_provider_srq *spdk_rdma_provider_srq_create(
	struct spdk_rdma_provider_srq_init_attr *init_attr);

/**
 * Destroy RDMA SRQ
@@ -106,7 +107,7 @@ struct spdk_rdma_srq *spdk_rdma_srq_create(struct spdk_rdma_srq_init_attr *init_
 * \param rdma_srq Pointer to SRQ
 * \return 0 on succes, errno on failure
 */
int spdk_rdma_srq_destroy(struct spdk_rdma_srq *rdma_srq);
int spdk_rdma_provider_srq_destroy(struct spdk_rdma_provider_srq *rdma_srq);

/**
 * Append the given recv wr structure to the SRQ's outstanding recv list.
@@ -116,7 +117,8 @@ int spdk_rdma_srq_destroy(struct spdk_rdma_srq *rdma_srq);
 * \param first pointer to the first Work Request
 * \return true if there were no outstanding WRs before, false otherwise
 */
bool spdk_rdma_srq_queue_recv_wrs(struct spdk_rdma_srq *rdma_srq, struct ibv_recv_wr *first);
bool spdk_rdma_provider_srq_queue_recv_wrs(struct spdk_rdma_provider_srq *rdma_srq,
		struct ibv_recv_wr *first);

/**
 * Submit all queued receive Work Request
@@ -125,7 +127,8 @@ bool spdk_rdma_srq_queue_recv_wrs(struct spdk_rdma_srq *rdma_srq, struct ibv_rec
 * \param bad_wr Stores a pointer to the first failed WR if this function return nonzero value
 * \return 0 on succes, errno on failure
 */
int spdk_rdma_srq_flush_recv_wrs(struct spdk_rdma_srq *rdma_srq, struct ibv_recv_wr **bad_wr);
int spdk_rdma_provider_srq_flush_recv_wrs(struct spdk_rdma_provider_srq *rdma_srq,
		struct ibv_recv_wr **bad_wr);

/**
 * Create RDMA provider specific qpair
@@ -134,8 +137,8 @@ int spdk_rdma_srq_flush_recv_wrs(struct spdk_rdma_srq *rdma_srq, struct ibv_recv
 * \param qp_attr Pointer to qpair init attributes
 * \return Pointer to a newly created qpair on success or NULL on failure
 */
struct spdk_rdma_qp *spdk_rdma_qp_create(struct rdma_cm_id *cm_id,
		struct spdk_rdma_qp_init_attr *qp_attr);
struct spdk_rdma_provider_qp *spdk_rdma_provider_qp_create(struct rdma_cm_id *cm_id,
		struct spdk_rdma_provider_qp_init_attr *qp_attr);

/**
 * Accept a connection request. Called by the passive side (NVMEoF target)
@@ -144,7 +147,8 @@ struct spdk_rdma_qp *spdk_rdma_qp_create(struct rdma_cm_id *cm_id,
 * \param conn_param Optional information needed to establish the connection
 * \return 0 on success, errno on failure
 */
int spdk_rdma_qp_accept(struct spdk_rdma_qp *spdk_rdma_qp, struct rdma_conn_param *conn_param);
int spdk_rdma_provider_qp_accept(struct spdk_rdma_provider_qp *spdk_rdma_qp,
				 struct rdma_conn_param *conn_param);

/**
 * Complete the connection process, must be called by the active
@@ -153,14 +157,14 @@ int spdk_rdma_qp_accept(struct spdk_rdma_qp *spdk_rdma_qp, struct rdma_conn_para
 * \param spdk_rdma_qp Pointer to SPDK RDMA qpair
 * \return 0 on success, errno on failure
 */
int spdk_rdma_qp_complete_connect(struct spdk_rdma_qp *spdk_rdma_qp);
int spdk_rdma_provider_qp_complete_connect(struct spdk_rdma_provider_qp *spdk_rdma_qp);

/**
 * Destroy RDMA provider specific qpair
 *
 * \param spdk_rdma_qp Pointer to SPDK RDMA qpair to be destroyed
 */
void spdk_rdma_qp_destroy(struct spdk_rdma_qp *spdk_rdma_qp);
void spdk_rdma_provider_qp_destroy(struct spdk_rdma_provider_qp *spdk_rdma_qp);

/**
 * Disconnect a connection and transition associated qpair to error state.
@@ -168,7 +172,7 @@ void spdk_rdma_qp_destroy(struct spdk_rdma_qp *spdk_rdma_qp);
 *
 * \param spdk_rdma_qp Pointer to qpair to be disconnected
 */
int spdk_rdma_qp_disconnect(struct spdk_rdma_qp *spdk_rdma_qp);
int spdk_rdma_provider_qp_disconnect(struct spdk_rdma_provider_qp *spdk_rdma_qp);

/**
 * Append the given send wr structure to the qpair's outstanding sends list.
@@ -178,7 +182,8 @@ int spdk_rdma_qp_disconnect(struct spdk_rdma_qp *spdk_rdma_qp);
 * \param first Pointer to the first Work Request
 * \return true if there were no outstanding WRs before, false otherwise
 */
bool spdk_rdma_qp_queue_send_wrs(struct spdk_rdma_qp *spdk_rdma_qp, struct ibv_send_wr *first);
bool spdk_rdma_provider_qp_queue_send_wrs(struct spdk_rdma_provider_qp *spdk_rdma_qp,
		struct ibv_send_wr *first);

/**
 * Submit all queued send Work Request
@@ -187,7 +192,8 @@ bool spdk_rdma_qp_queue_send_wrs(struct spdk_rdma_qp *spdk_rdma_qp, struct ibv_s
 * \param bad_wr Stores a pointer to the first failed WR if this function return nonzero value
 * \return 0 on succes, errno on failure
 */
int spdk_rdma_qp_flush_send_wrs(struct spdk_rdma_qp *spdk_rdma_qp, struct ibv_send_wr **bad_wr);
int spdk_rdma_provider_qp_flush_send_wrs(struct spdk_rdma_provider_qp *spdk_rdma_qp,
		struct ibv_send_wr **bad_wr);

/**
 * Append the given recv wr structure to the qpair's outstanding recv list.
@@ -197,7 +203,8 @@ int spdk_rdma_qp_flush_send_wrs(struct spdk_rdma_qp *spdk_rdma_qp, struct ibv_se
 * \param first Pointer to the first Work Request
 * \return true if there were no outstanding WRs before, false otherwise
 */
bool spdk_rdma_qp_queue_recv_wrs(struct spdk_rdma_qp *spdk_rdma_qp, struct ibv_recv_wr *first);
bool spdk_rdma_provider_qp_queue_recv_wrs(struct spdk_rdma_provider_qp *spdk_rdma_qp,
		struct ibv_recv_wr *first);

/**
 * Submit all queued recv Work Request
@@ -205,7 +212,8 @@ bool spdk_rdma_qp_queue_recv_wrs(struct spdk_rdma_qp *spdk_rdma_qp, struct ibv_r
 * \param bad_wr Stores a pointer to the first failed WR if this function return nonzero value
 * \return 0 on succes, errno on failure
 */
int spdk_rdma_qp_flush_recv_wrs(struct spdk_rdma_qp *spdk_rdma_qp, struct ibv_recv_wr **bad_wr);
int spdk_rdma_provider_qp_flush_recv_wrs(struct spdk_rdma_provider_qp *spdk_rdma_qp,
		struct ibv_recv_wr **bad_wr);

/**
 * Create a memory map which is used to register Memory Regions and perform address -> memory
+2 −1
Original line number Diff line number Diff line
#  SPDX-License-Identifier: BSD-3-Clause
#  Copyright (C) 2015 Intel Corporation.
#  Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES.
#  All rights reserved.
#

@@ -28,7 +29,7 @@ DIRS-$(CONFIG_IDXD) += idxd
DIRS-$(CONFIG_VHOST) += vhost
DIRS-$(CONFIG_VIRTIO) += virtio
DIRS-$(CONFIG_VBDEV_COMPRESS) += reduce
DIRS-$(CONFIG_RDMA) += rdma
DIRS-$(CONFIG_RDMA) += rdma_provider
DIRS-$(CONFIG_VFIO_USER) += vfu_tgt

ifeq ($(CONFIG_RDMA_PROV),mlx5_dv)
+2 −2
Original line number Diff line number Diff line
/*   SPDX-License-Identifier: BSD-3-Clause
 *   Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
 *   Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
 */

#include <rdma/rdma_cma.h>
@@ -12,7 +12,7 @@
#include "spdk/likely.h"
#include "spdk/util.h"
#include "spdk_internal/mlx5.h"
#include "spdk_internal/rdma.h"
#include "spdk_internal/rdma_provider.h"

#define MLX5_VENDOR_ID_MELLANOX 0x2c9

+25 −25
Original line number Diff line number Diff line
@@ -23,7 +23,7 @@
#include "spdk/config.h"

#include "nvme_internal.h"
#include "spdk_internal/rdma.h"
#include "spdk_internal/rdma_provider.h"

#define NVME_RDMA_TIME_OUT_IN_MS 2000
#define NVME_RDMA_RW_BUFFER_SIZE 131072
@@ -126,7 +126,7 @@ struct nvme_rdma_poller_stats {
	uint64_t idle_polls;
	uint64_t queued_requests;
	uint64_t completions;
	struct spdk_rdma_qp_stats rdma_stats;
	struct spdk_rdma_provider_qp_stats rdma_stats;
};

struct nvme_rdma_poll_group;
@@ -135,7 +135,7 @@ struct nvme_rdma_rsps;
struct nvme_rdma_poller {
	struct ibv_context		*device;
	struct ibv_cq			*cq;
	struct spdk_rdma_srq		*srq;
	struct spdk_rdma_provider_srq	*srq;
	struct nvme_rdma_rsps		*rsps;
	struct ibv_pd			*pd;
	struct spdk_rdma_mem_map	*mr_map;
@@ -174,7 +174,7 @@ typedef int (*nvme_rdma_cm_event_cb)(struct nvme_rdma_qpair *rqpair, int ret);
struct nvme_rdma_rsp_opts {
	uint16_t				num_entries;
	struct nvme_rdma_qpair			*rqpair;
	struct spdk_rdma_srq			*srq;
	struct spdk_rdma_provider_srq		*srq;
	struct spdk_rdma_mem_map		*mr_map;
};

@@ -195,10 +195,10 @@ struct nvme_rdma_rsps {
struct nvme_rdma_qpair {
	struct spdk_nvme_qpair			qpair;

	struct spdk_rdma_qp			*rdma_qp;
	struct spdk_rdma_provider_qp		*rdma_qp;
	struct rdma_cm_id			*cm_id;
	struct ibv_cq				*cq;
	struct spdk_rdma_srq			*srq;
	struct spdk_rdma_provider_srq		*srq;

	struct	spdk_nvme_rdma_req		*rdma_reqs;

@@ -500,7 +500,7 @@ nvme_rdma_qpair_process_cm_event(struct nvme_rdma_qpair *rqpair)
		case RDMA_CM_EVENT_REJECTED:
			break;
		case RDMA_CM_EVENT_CONNECT_RESPONSE:
			rc = spdk_rdma_qp_complete_connect(rqpair->rdma_qp);
			rc = spdk_rdma_provider_qp_complete_connect(rqpair->rdma_qp);
		/* fall through */
		case RDMA_CM_EVENT_ESTABLISHED:
			accept_data = (struct spdk_nvmf_rdma_accept_private_data *)event->param.conn.private_data;
@@ -748,7 +748,7 @@ static int
nvme_rdma_qpair_init(struct nvme_rdma_qpair *rqpair)
{
	int			rc;
	struct spdk_rdma_qp_init_attr	attr = {};
	struct spdk_rdma_provider_qp_init_attr	attr = {};
	struct ibv_device_attr	dev_attr;
	struct nvme_rdma_ctrlr	*rctrlr;
	uint32_t num_cqe, max_num_cqe;
@@ -799,7 +799,7 @@ nvme_rdma_qpair_init(struct nvme_rdma_qpair *rqpair)
	attr.cap.max_send_sge	= spdk_min(NVME_RDMA_DEFAULT_TX_SGE, dev_attr.max_sge);
	attr.cap.max_recv_sge	= spdk_min(NVME_RDMA_DEFAULT_RX_SGE, dev_attr.max_sge);

	rqpair->rdma_qp = spdk_rdma_qp_create(rqpair->cm_id, &attr);
	rqpair->rdma_qp = spdk_rdma_provider_qp_create(rqpair->cm_id, &attr);

	if (!rqpair->rdma_qp) {
		return -1;
@@ -853,7 +853,7 @@ nvme_rdma_qpair_submit_sends(struct nvme_rdma_qpair *rqpair)
	struct ibv_send_wr *bad_send_wr = NULL;
	int rc;

	rc = spdk_rdma_qp_flush_send_wrs(rqpair->rdma_qp, &bad_send_wr);
	rc = spdk_rdma_provider_qp_flush_send_wrs(rqpair->rdma_qp, &bad_send_wr);

	if (spdk_unlikely(rc)) {
		nvme_rdma_reset_failed_sends(rqpair, bad_send_wr, rc);
@@ -868,7 +868,7 @@ nvme_rdma_qpair_submit_recvs(struct nvme_rdma_qpair *rqpair)
	struct ibv_recv_wr *bad_recv_wr;
	int rc = 0;

	rc = spdk_rdma_qp_flush_recv_wrs(rqpair->rdma_qp, &bad_recv_wr);
	rc = spdk_rdma_provider_qp_flush_recv_wrs(rqpair->rdma_qp, &bad_recv_wr);
	if (spdk_unlikely(rc)) {
		nvme_rdma_reset_failed_recvs(rqpair->rsps, bad_recv_wr, rc);
	}
@@ -882,7 +882,7 @@ nvme_rdma_poller_submit_recvs(struct nvme_rdma_poller *poller)
	struct ibv_recv_wr *bad_recv_wr;
	int rc;

	rc = spdk_rdma_srq_flush_recv_wrs(poller->srq, &bad_recv_wr);
	rc = spdk_rdma_provider_srq_flush_recv_wrs(poller->srq, &bad_recv_wr);
	if (spdk_unlikely(rc)) {
		nvme_rdma_reset_failed_recvs(poller->rsps, bad_recv_wr, rc);
	}
@@ -968,9 +968,9 @@ nvme_rdma_create_rsps(struct nvme_rdma_rsp_opts *opts)
		nvme_rdma_trace_ibv_sge(recv_wr->sg_list);

		if (opts->rqpair) {
			spdk_rdma_qp_queue_recv_wrs(opts->rqpair->rdma_qp, recv_wr);
			spdk_rdma_provider_qp_queue_recv_wrs(opts->rqpair->rdma_qp, recv_wr);
		} else {
			spdk_rdma_srq_queue_recv_wrs(opts->srq, recv_wr);
			spdk_rdma_provider_srq_queue_recv_wrs(opts->srq, recv_wr);
		}
	}

@@ -1884,7 +1884,7 @@ nvme_rdma_qpair_destroy(struct nvme_rdma_qpair *rqpair)
	if (rqpair->cm_id) {
		if (rqpair->rdma_qp) {
			spdk_rdma_put_pd(rqpair->rdma_qp->qp->pd);
			spdk_rdma_qp_destroy(rqpair->rdma_qp);
			spdk_rdma_provider_qp_destroy(rqpair->rdma_qp);
			rqpair->rdma_qp = NULL;
		}
	}
@@ -1999,7 +1999,7 @@ _nvme_rdma_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvm

	if (rqpair->cm_id) {
		if (rqpair->rdma_qp) {
			rc = spdk_rdma_qp_disconnect(rqpair->rdma_qp);
			rc = spdk_rdma_provider_qp_disconnect(rqpair->rdma_qp);
			if ((qpair->ctrlr != NULL) && (rc == 0)) {
				rc = nvme_rdma_process_event_start(rqpair, RDMA_CM_EVENT_DISCONNECTED,
								   disconnected_qpair_cb);
@@ -2347,7 +2347,7 @@ nvme_rdma_qpair_submit_request(struct spdk_nvme_qpair *qpair,
	wr->next = NULL;
	nvme_rdma_trace_ibv_sge(wr->sg_list);

	spdk_rdma_qp_queue_send_wrs(rqpair->rdma_qp, wr);
	spdk_rdma_provider_qp_queue_send_wrs(rqpair->rdma_qp, wr);

	if (!rqpair->delay_cmd_submit) {
		return nvme_rdma_qpair_submit_sends(rqpair);
@@ -2444,9 +2444,9 @@ nvme_rdma_request_ready(struct nvme_rdma_qpair *rqpair, struct spdk_nvme_rdma_re
	nvme_rdma_trace_ibv_sge(recv_wr->sg_list);

	if (!rqpair->srq) {
		spdk_rdma_qp_queue_recv_wrs(rqpair->rdma_qp, recv_wr);
		spdk_rdma_provider_qp_queue_recv_wrs(rqpair->rdma_qp, recv_wr);
	} else {
		spdk_rdma_srq_queue_recv_wrs(rqpair->srq, recv_wr);
		spdk_rdma_provider_srq_queue_recv_wrs(rqpair->srq, recv_wr);
	}
}

@@ -2525,7 +2525,7 @@ nvme_rdma_process_recv_completion(struct nvme_rdma_poller *poller, struct ibv_wc
			 * However, for the SRQ, this is not any error. Hence, just re-post the
			 * receive request to the SRQ to reuse for other QPs, and return 0.
			 */
			spdk_rdma_srq_queue_recv_wrs(poller->srq, rdma_rsp->recv_wr);
			spdk_rdma_provider_srq_queue_recv_wrs(poller->srq, rdma_rsp->recv_wr);
			return 0;
		}
	} else {
@@ -2579,7 +2579,7 @@ nvme_rdma_process_recv_completion(struct nvme_rdma_poller *poller, struct ibv_wc
err_wc:
	nvme_rdma_fail_qpair(&rqpair->qpair, 0);
	if (poller && poller->srq) {
		spdk_rdma_srq_queue_recv_wrs(poller->srq, rdma_rsp->recv_wr);
		spdk_rdma_provider_srq_queue_recv_wrs(poller->srq, rdma_rsp->recv_wr);
	}
	return -ENXIO;
}
@@ -2613,7 +2613,7 @@ nvme_rdma_process_send_completion(struct nvme_rdma_poller *poller,
		nvme_rdma_log_wc_status(rqpair, wc);
		nvme_rdma_fail_qpair(&rqpair->qpair, 0);
		if (rdma_req->rdma_rsp && poller && poller->srq) {
			spdk_rdma_srq_queue_recv_wrs(poller->srq, rdma_req->rdma_rsp->recv_wr);
			spdk_rdma_provider_srq_queue_recv_wrs(poller->srq, rdma_req->rdma_rsp->recv_wr);
		}
		return -ENXIO;
	}
@@ -2892,7 +2892,7 @@ nvme_rdma_poller_destroy(struct nvme_rdma_poller *poller)
		nvme_rdma_free_rsps(poller->rsps);
	}
	if (poller->srq) {
		spdk_rdma_srq_destroy(poller->srq);
		spdk_rdma_provider_srq_destroy(poller->srq);
	}
	if (poller->mr_map) {
		spdk_rdma_free_mem_map(&poller->mr_map);
@@ -2908,7 +2908,7 @@ nvme_rdma_poller_create(struct nvme_rdma_poll_group *group, struct ibv_context *
{
	struct nvme_rdma_poller *poller;
	struct ibv_device_attr dev_attr;
	struct spdk_rdma_srq_init_attr srq_init_attr = {};
	struct spdk_rdma_provider_srq_init_attr srq_init_attr = {};
	struct nvme_rdma_rsp_opts opts;
	int num_cqe, max_num_cqe;
	int rc;
@@ -2949,7 +2949,7 @@ nvme_rdma_poller_create(struct nvme_rdma_poll_group *group, struct ibv_context *
		srq_init_attr.srq_init_attr.attr.max_sge = spdk_min(dev_attr.max_sge,
				NVME_RDMA_DEFAULT_RX_SGE);

		poller->srq = spdk_rdma_srq_create(&srq_init_attr);
		poller->srq = spdk_rdma_provider_srq_create(&srq_init_attr);
		if (poller->srq == NULL) {
			SPDK_ERRLOG("Unable to create SRQ.\n");
			goto fail;
+35 −35
Original line number Diff line number Diff line
@@ -17,7 +17,7 @@

#include "spdk_internal/assert.h"
#include "spdk/log.h"
#include "spdk_internal/rdma.h"
#include "spdk_internal/rdma_provider.h"

#include "nvmf_internal.h"
#include "transport.h"
@@ -311,9 +311,9 @@ struct spdk_nvmf_rdma_qpair {
	struct spdk_nvmf_rdma_device		*device;
	struct spdk_nvmf_rdma_poller		*poller;

	struct spdk_rdma_qp			*rdma_qp;
	struct spdk_rdma_provider_qp		*rdma_qp;
	struct rdma_cm_id			*cm_id;
	struct spdk_rdma_srq			*srq;
	struct spdk_rdma_provider_srq		*srq;
	struct rdma_cm_id			*listen_id;

	/* Cache the QP number to improve QP search by RB tree. */
@@ -397,7 +397,7 @@ struct spdk_nvmf_rdma_poller_stat {
	uint64_t				pending_rdma_read;
	uint64_t				pending_rdma_write;
	uint64_t				pending_rdma_send;
	struct spdk_rdma_qp_stats		qp_stats;
	struct spdk_rdma_provider_qp_stats	qp_stats;
};

struct spdk_nvmf_rdma_poller {
@@ -413,7 +413,7 @@ struct spdk_nvmf_rdma_poller {
	bool					need_destroy;

	/* Shared receive queue */
	struct spdk_rdma_srq			*srq;
	struct spdk_rdma_provider_srq		*srq;

	struct spdk_nvmf_rdma_resources		*resources;
	struct spdk_nvmf_rdma_poller_stat	stat;
@@ -669,8 +669,8 @@ nvmf_rdma_resources_create(struct spdk_nvmf_rdma_resource_opts *opts)
	struct spdk_nvmf_rdma_resources		*resources;
	struct spdk_nvmf_rdma_request		*rdma_req;
	struct spdk_nvmf_rdma_recv		*rdma_recv;
	struct spdk_rdma_qp			*qp = NULL;
	struct spdk_rdma_srq			*srq = NULL;
	struct spdk_rdma_provider_qp		*qp = NULL;
	struct spdk_rdma_provider_srq		*srq = NULL;
	struct ibv_recv_wr			*bad_wr = NULL;
	struct spdk_rdma_memory_translation	translation;
	uint32_t				i;
@@ -718,9 +718,9 @@ nvmf_rdma_resources_create(struct spdk_nvmf_rdma_resource_opts *opts)
	STAILQ_INIT(&resources->free_queue);

	if (opts->shared) {
		srq = (struct spdk_rdma_srq *)opts->qp;
		srq = (struct spdk_rdma_provider_srq *)opts->qp;
	} else {
		qp = (struct spdk_rdma_qp *)opts->qp;
		qp = (struct spdk_rdma_provider_qp *)opts->qp;
	}

	for (i = 0; i < opts->max_queue_depth; i++) {
@@ -759,9 +759,9 @@ nvmf_rdma_resources_create(struct spdk_nvmf_rdma_resource_opts *opts)
		rdma_recv->wr.wr_id = (uintptr_t)&rdma_recv->rdma_wr;
		rdma_recv->wr.sg_list = rdma_recv->sgl;
		if (srq) {
			spdk_rdma_srq_queue_recv_wrs(srq, &rdma_recv->wr);
			spdk_rdma_provider_srq_queue_recv_wrs(srq, &rdma_recv->wr);
		} else {
			spdk_rdma_qp_queue_recv_wrs(qp, &rdma_recv->wr);
			spdk_rdma_provider_qp_queue_recv_wrs(qp, &rdma_recv->wr);
		}
	}

@@ -811,9 +811,9 @@ nvmf_rdma_resources_create(struct spdk_nvmf_rdma_resource_opts *opts)
	}

	if (srq) {
		rc = spdk_rdma_srq_flush_recv_wrs(srq, &bad_wr);
		rc = spdk_rdma_provider_srq_flush_recv_wrs(srq, &bad_wr);
	} else {
		rc = spdk_rdma_qp_flush_recv_wrs(qp, &bad_wr);
		rc = spdk_rdma_provider_qp_flush_recv_wrs(qp, &bad_wr);
	}

	if (rc) {
@@ -885,8 +885,8 @@ nvmf_rdma_qpair_destroy(struct spdk_nvmf_rdma_qpair *rqpair)
			STAILQ_FOREACH_SAFE(rdma_recv, &rqpair->resources->incoming_queue, link, recv_tmp) {
				if (rqpair == rdma_recv->qpair) {
					STAILQ_REMOVE(&rqpair->resources->incoming_queue, rdma_recv, spdk_nvmf_rdma_recv, link);
					spdk_rdma_srq_queue_recv_wrs(rqpair->srq, &rdma_recv->wr);
					rc = spdk_rdma_srq_flush_recv_wrs(rqpair->srq, &bad_recv_wr);
					spdk_rdma_provider_srq_queue_recv_wrs(rqpair->srq, &rdma_recv->wr);
					rc = spdk_rdma_provider_srq_flush_recv_wrs(rqpair->srq, &bad_recv_wr);
					if (rc) {
						SPDK_ERRLOG("Unable to re-post rx descriptor\n");
					}
@@ -897,7 +897,7 @@ nvmf_rdma_qpair_destroy(struct spdk_nvmf_rdma_qpair *rqpair)

	if (rqpair->cm_id) {
		if (rqpair->rdma_qp != NULL) {
			spdk_rdma_qp_destroy(rqpair->rdma_qp);
			spdk_rdma_provider_qp_destroy(rqpair->rdma_qp);
			rqpair->rdma_qp = NULL;
		}

@@ -978,7 +978,7 @@ nvmf_rdma_qpair_initialize(struct spdk_nvmf_qpair *qpair)
	struct spdk_nvmf_transport		*transport;
	struct spdk_nvmf_rdma_resource_opts	opts;
	struct spdk_nvmf_rdma_device		*device;
	struct spdk_rdma_qp_init_attr		qp_init_attr = {};
	struct spdk_rdma_provider_qp_init_attr	qp_init_attr = {};

	rqpair = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_rdma_qpair, qpair);
	device = rqpair->device;
@@ -1005,7 +1005,7 @@ nvmf_rdma_qpair_initialize(struct spdk_nvmf_qpair *qpair)
		goto error;
	}

	rqpair->rdma_qp = spdk_rdma_qp_create(rqpair->cm_id, &qp_init_attr);
	rqpair->rdma_qp = spdk_rdma_provider_qp_create(rqpair->cm_id, &qp_init_attr);
	if (!rqpair->rdma_qp) {
		goto error;
	}
@@ -1064,9 +1064,9 @@ nvmf_rdma_qpair_queue_recv_wrs(struct spdk_nvmf_rdma_qpair *rqpair, struct ibv_r
			struct spdk_nvmf_rdma_transport, transport);

	if (rqpair->srq != NULL) {
		spdk_rdma_srq_queue_recv_wrs(rqpair->srq, first);
		spdk_rdma_provider_srq_queue_recv_wrs(rqpair->srq, first);
	} else {
		if (spdk_rdma_qp_queue_recv_wrs(rqpair->rdma_qp, first)) {
		if (spdk_rdma_provider_qp_queue_recv_wrs(rqpair->rdma_qp, first)) {
			STAILQ_INSERT_TAIL(&rqpair->poller->qpairs_pending_recv, rqpair, recv_link);
		}
	}
@@ -1093,7 +1093,7 @@ request_transfer_in(struct spdk_nvmf_request *req)
	assert(req->xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER);
	assert(rdma_req != NULL);

	if (spdk_rdma_qp_queue_send_wrs(rqpair->rdma_qp, rdma_req->transfer_wr)) {
	if (spdk_rdma_provider_qp_queue_send_wrs(rqpair->rdma_qp, rdma_req->transfer_wr)) {
		STAILQ_INSERT_TAIL(&rqpair->poller->qpairs_pending_send, rqpair, send_link);
	}
	if (rtransport->rdma_opts.no_wr_batching) {
@@ -1199,7 +1199,7 @@ request_transfer_out(struct spdk_nvmf_request *req, int *data_posted)
		*data_posted = 1;
		num_outstanding_data_wr = rdma_req->num_outstanding_data_wr;
	}
	if (spdk_rdma_qp_queue_send_wrs(rqpair->rdma_qp, first)) {
	if (spdk_rdma_provider_qp_queue_send_wrs(rqpair->rdma_qp, first)) {
		STAILQ_INSERT_TAIL(&rqpair->poller->qpairs_pending_send, rqpair, send_link);
	}
	if (rtransport->rdma_opts.no_wr_batching) {
@@ -1244,9 +1244,9 @@ nvmf_rdma_event_accept(struct rdma_cm_id *id, struct spdk_nvmf_rdma_qpair *rqpai
	ctrlr_event_data.srq = rqpair->srq ? 1 : 0;
	ctrlr_event_data.qp_num = rqpair->qp_num;

	rc = spdk_rdma_qp_accept(rqpair->rdma_qp, &ctrlr_event_data);
	rc = spdk_rdma_provider_qp_accept(rqpair->rdma_qp, &ctrlr_event_data);
	if (rc) {
		SPDK_ERRLOG("Error %d on spdk_rdma_qp_accept\n", errno);
		SPDK_ERRLOG("Error %d on spdk_rdma_provider_qp_accept\n", errno);
	} else {
		SPDK_DEBUGLOG(rdma, "Sent back the accept\n");
	}
@@ -3971,7 +3971,7 @@ nvmf_rdma_poller_create(struct spdk_nvmf_rdma_transport *rtransport,
			struct spdk_nvmf_rdma_poller **out_poller)
{
	struct spdk_nvmf_rdma_poller		*poller;
	struct spdk_rdma_srq_init_attr		srq_init_attr;
	struct spdk_rdma_provider_srq_init_attr	srq_init_attr;
	struct spdk_nvmf_rdma_resource_opts	opts;
	int					num_cqe;

@@ -4004,7 +4004,7 @@ nvmf_rdma_poller_create(struct spdk_nvmf_rdma_transport *rtransport,
		srq_init_attr.stats = &poller->stat.qp_stats.recv;
		srq_init_attr.srq_init_attr.attr.max_wr = poller->max_srq_depth;
		srq_init_attr.srq_init_attr.attr.max_sge = spdk_min(device->attr.max_sge, NVMF_DEFAULT_RX_SGE);
		poller->srq = spdk_rdma_srq_create(&srq_init_attr);
		poller->srq = spdk_rdma_provider_srq_create(&srq_init_attr);
		if (!poller->srq) {
			SPDK_ERRLOG("Unable to create shared receive queue, errno %d\n", errno);
			return -1;
@@ -4191,7 +4191,7 @@ nvmf_rdma_poller_destroy(struct spdk_nvmf_rdma_poller *poller)
		if (poller->resources) {
			nvmf_rdma_resources_destroy(poller->resources);
		}
		spdk_rdma_srq_destroy(poller->srq);
		spdk_rdma_provider_srq_destroy(poller->srq);
		SPDK_DEBUGLOG(rdma, "Destroyed RDMA shared queue %p\n", poller->srq);
	}

@@ -4354,8 +4354,8 @@ nvmf_rdma_request_free(struct spdk_nvmf_request *req)
		int rc;
		struct ibv_recv_wr *bad_recv_wr;

		spdk_rdma_srq_queue_recv_wrs(rqpair->srq, &rdma_req->recv->wr);
		rc = spdk_rdma_srq_flush_recv_wrs(rqpair->srq, &bad_recv_wr);
		spdk_rdma_provider_srq_queue_recv_wrs(rqpair->srq, &rdma_req->recv->wr);
		rc = spdk_rdma_provider_srq_flush_recv_wrs(rqpair->srq, &bad_recv_wr);
		if (rc) {
			SPDK_ERRLOG("Unable to re-post rx descriptor\n");
		}
@@ -4408,7 +4408,7 @@ nvmf_rdma_close_qpair(struct spdk_nvmf_qpair *qpair,
	}

	if (rqpair->rdma_qp) {
		spdk_rdma_qp_disconnect(rqpair->rdma_qp);
		spdk_rdma_provider_qp_disconnect(rqpair->rdma_qp);
	}

	nvmf_rdma_destroy_drained_qpair(rqpair);
@@ -4476,14 +4476,14 @@ _poller_submit_recvs(struct spdk_nvmf_rdma_transport *rtransport,
	int				rc;

	if (rpoller->srq) {
		rc = spdk_rdma_srq_flush_recv_wrs(rpoller->srq, &bad_recv_wr);
		rc = spdk_rdma_provider_srq_flush_recv_wrs(rpoller->srq, &bad_recv_wr);
		if (spdk_unlikely(rc)) {
			_poller_reset_failed_recvs(rpoller, bad_recv_wr, rc);
		}
	} else {
		while (!STAILQ_EMPTY(&rpoller->qpairs_pending_recv)) {
			rqpair = STAILQ_FIRST(&rpoller->qpairs_pending_recv);
			rc = spdk_rdma_qp_flush_recv_wrs(rqpair->rdma_qp, &bad_recv_wr);
			rc = spdk_rdma_provider_qp_flush_recv_wrs(rqpair->rdma_qp, &bad_recv_wr);
			if (spdk_unlikely(rc)) {
				_qp_reset_failed_recvs(rqpair, bad_recv_wr, rc);
			}
@@ -4564,7 +4564,7 @@ _poller_submit_sends(struct spdk_nvmf_rdma_transport *rtransport,

	while (!STAILQ_EMPTY(&rpoller->qpairs_pending_send)) {
		rqpair = STAILQ_FIRST(&rpoller->qpairs_pending_send);
		rc = spdk_rdma_qp_flush_send_wrs(rqpair->rdma_qp, &bad_wr);
		rc = spdk_rdma_provider_qp_flush_send_wrs(rqpair->rdma_qp, &bad_wr);

		/* bad wr always points to the first wr that failed. */
		if (spdk_unlikely(rc)) {
@@ -4684,8 +4684,8 @@ nvmf_rdma_poller_poll(struct spdk_nvmf_rdma_transport *rtransport,
					struct ibv_recv_wr *bad_wr;

					rdma_recv->wr.next = NULL;
					spdk_rdma_srq_queue_recv_wrs(rpoller->srq, &rdma_recv->wr);
					rc = spdk_rdma_srq_flush_recv_wrs(rpoller->srq, &bad_wr);
					spdk_rdma_provider_srq_queue_recv_wrs(rpoller->srq, &rdma_recv->wr);
					rc = spdk_rdma_provider_srq_flush_recv_wrs(rpoller->srq, &bad_wr);
					if (rc) {
						SPDK_ERRLOG("Failed to re-post recv WR to SRQ, err %d\n", rc);
					}
Loading