Commit 9307ff5a authored by Tomasz Kulasek's avatar Tomasz Kulasek Committed by Jim Harris
Browse files

net/vpp: move to VPP 19.04



This patch updates net/vpp implementation from version VPP 19.01 to
VPP 19.04.

1. Some binary APIs are deprecated in 19.04 and message queue is used
   to handle control events:

   - vl_api_bind_sock_reply_t_handler by SESSION_CTRL_EVT_BOUND,
   - vl_api_unbind_sock_reply_t_handler by SESSION_CTRL_EVT_UNLISTEN_REPLY,
   - vl_api_accept_session_t_handler by SESSION_CTRL_EVT_ACCEPTED,
   - vl_api_connect_session_reply_t_handler by SESSION_CTRL_EVT_CONNECTED,
   - vl_api_disconnect_session_t_handler by SESSION_CTRL_EVT_DISCONNECTED,
   - vl_api_reset_session_t_handler by SESSION_CTRL_EVT_RESET

2. Fixes for Fedora 29/30:

   - added "-Wno-address-of-packed-member" (DPDK 19.02 fails to compile
     with gcc9.1),
   - force "-maes" compile flag for gcc9.1 to compile crypto_ia32 and
     crypto_ipsecmb plugins (gcc9.1 doesn't do that for -march=silvermont)
   - some minor fixes

3. Default path for VPP instalation is changed for test scripts from
   /usr/local/src/vpp to /usr/local/src/vpp-19.04 to avoid VPP version
   conflict.

Change-Id: I1d20ad7f138f5086ba7fab41d77d86f8139d038e
Signed-off-by: default avatarTomasz Kulasek <tomaszx.kulasek@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/459113


Tested-by: default avatarSPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: default avatarTomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: default avatarJim Harris <james.r.harris@intel.com>
Reviewed-by: default avatarShuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
parent bee1130c
Loading
Loading
Loading
Loading
+2 −21
Original line number Diff line number Diff line
@@ -7,7 +7,7 @@ packet processing graph (see [What is VPP?](https://wiki.fd.io/view/VPP/What_is_
Detailed instructions for **simplified steps 1-3** below, can be found on
VPP [Quick Start Guide](https://wiki.fd.io/view/VPP).

*SPDK supports VPP version 19.01.1.*
*SPDK supports VPP version 19.04.2.*

#  1. Building VPP (optional) {#vpp_build}

@@ -16,21 +16,7 @@ VPP [Quick Start Guide](https://wiki.fd.io/view/VPP).
Clone and checkout VPP
~~~
git clone https://gerrit.fd.io/r/vpp && cd vpp
git checkout v19.01.1
git cherry-pick 97dcf5bd26ca6de580943f5d39681f0144782c3d
git cherry-pick f5dc9fbf814865b31b52b20f5bf959e9ff818b25
~~~

NOTE: Cherry-picks are required for better integration with SPDK. They are
already merged to VPP 19.04.

NOTE: We have noticed that VPP tries to close connections to the non existing,
already closed applications, after timeout. It causes intermittent VPP application
segfaults when few instances of VPP clients connects and disconnects several times.
The following workaround for this issue helps to create more stable environment
for VPP v19.01.1. This issue should be solved in the next release of VPP.
~~~
git apply test/common/config/patch/vpp/workaround-dont-notify-transport-closing.patch
git checkout stable/1904
~~~

Install VPP build dependencies
@@ -93,8 +79,6 @@ DPDK section (`dpdk`):
- `num-rx-queues <num>` -- number of receive queues.
- `num-tx-queues <num>` -- number of transmit queues.
- `dev <PCI address>` -- whitelisted device.
- `num-mbufs` -- numbers of allocated buffers. For the most of our scenarios this
parameter requires to be increased over default value.

Session section (`session`):
- `evt_qs_memfd_seg` -- uses a memfd segment for event queues. This is required for SPDK.
@@ -115,9 +99,6 @@ unix {
cpu {
	main-core 1
}
dpdk {
	num-mbufs 128000
}
session {
	evt_qs_memfd_seg
}
+75 −50
Original line number Diff line number Diff line
@@ -284,13 +284,11 @@ enum spdk_vpp_create_type {
 * VPP message handlers
 */
static void
vl_api_accept_session_t_handler(vl_api_accept_session_t *mp)
session_accepted_handler(session_accepted_msg_t *mp)
{
	svm_fifo_t *rx_fifo, *tx_fifo;
	struct spdk_vpp_session *client_session, *listen_session;

	SPDK_DEBUGLOG(SPDK_SOCK_VPP, "listeners handle is %" PRIu64 "\n", mp->listener_handle);

	pthread_mutex_lock(&g_svm.session_get_lock);
	listen_session = _spdk_vpp_session_get_by_handle(mp->listener_handle, true);
	pthread_mutex_unlock(&g_svm.session_get_lock);
@@ -299,6 +297,8 @@ vl_api_accept_session_t_handler(vl_api_accept_session_t *mp)
		return;
	}

	SPDK_DEBUGLOG(SPDK_SOCK_VPP, "Listeners handle is %" PRIu64 "\n", mp->listener_handle);

	/* Allocate local session for a client and set it up */
	client_session = _spdk_vpp_session_create();
	if (client_session == NULL) {
@@ -343,7 +343,7 @@ vl_api_accept_session_t_handler(vl_api_accept_session_t *mp)
}

static void
vl_api_connect_session_reply_t_handler(vl_api_connect_session_reply_t *mp)
session_connected_handler(session_connected_msg_t *mp)
{
	struct spdk_vpp_session *session;
	svm_fifo_t *rx_fifo, *tx_fifo;
@@ -380,14 +380,15 @@ vl_api_connect_session_reply_t_handler(vl_api_connect_session_reply_t *mp)
}

static void
vl_api_disconnect_session_t_handler(vl_api_disconnect_session_t *mp)
session_disconnected_handler(session_disconnected_msg_t *mp)
{
	struct spdk_vpp_session *session = 0;

	pthread_mutex_lock(&g_svm.session_get_lock);
	session = _spdk_vpp_session_get_by_handle(mp->handle, false);
	if (session == NULL) {
		SPDK_ERRLOG("Invalid session handler (%" PRIu64 ").\n", mp->handle);
		SPDK_ERRLOG("Session with handle=%" PRIu64 " not found.\n",
			    mp->handle);
		pthread_mutex_unlock(&g_svm.session_get_lock);
		return;
	}
@@ -399,16 +400,18 @@ vl_api_disconnect_session_t_handler(vl_api_disconnect_session_t *mp)
}

static void
vl_api_reset_session_t_handler(vl_api_reset_session_t *mp)
session_reset_handler(session_reset_msg_t *mp)
{
	vl_api_reset_session_reply_t *rmp;
	int rv = 0;
	struct spdk_vpp_session *session = 0;
	struct spdk_vpp_session *session = NULL;
	app_session_evt_t app_evt;
	session_reset_reply_msg_t *rmp;

	pthread_mutex_lock(&g_svm.session_get_lock);
	session = _spdk_vpp_session_get_by_handle(mp->handle, false);
	if (session == NULL) {
		SPDK_ERRLOG("Invalid session handler (%" PRIu64 ").\n", mp->handle);
		SPDK_ERRLOG("Session with handle=%" PRIu64 " not found.\n",
			    mp->handle);
		pthread_mutex_unlock(&g_svm.session_get_lock);
		return;
	}
@@ -417,19 +420,16 @@ vl_api_reset_session_t_handler(vl_api_reset_session_t *mp)
	session->app_session.session_state = VPP_SESSION_STATE_DISCONNECT;
	pthread_mutex_unlock(&g_svm.session_get_lock);

	rmp = vl_msg_api_alloc(sizeof(*rmp));
	if (rmp == NULL) {
		return;
	}
	memset(rmp, 0, sizeof(*rmp));
	rmp->_vl_msg_id = ntohs(VL_API_RESET_SESSION_REPLY);
	app_alloc_ctrl_evt_to_vpp(session->app_session.vpp_evt_q, &app_evt,
				  SESSION_CTRL_EVT_RESET_REPLY);
	rmp = (session_reset_reply_msg_t *) app_evt.evt->data;
	rmp->retval = rv;
	rmp->handle = mp->handle;
	vl_msg_api_send_shmem(g_svm.vl_input_queue, (u8 *)&rmp);
	app_send_ctrl_evt_to_vpp(session->app_session.vpp_evt_q, &app_evt);
}

static void
vl_api_bind_sock_reply_t_handler(vl_api_bind_sock_reply_t *mp)
session_bound_handler(session_bound_msg_t *mp)
{
	struct spdk_vpp_session *session;

@@ -459,7 +459,7 @@ vl_api_bind_sock_reply_t_handler(vl_api_bind_sock_reply_t *mp)
}

static void
vl_api_unbind_sock_reply_t_handler(vl_api_unbind_sock_reply_t *mp)
session_unlisten_reply_handler(session_unlisten_reply_msg_t *mp)
{
	struct spdk_vpp_session *session;

@@ -478,6 +478,33 @@ vl_api_unbind_sock_reply_t_handler(vl_api_unbind_sock_reply_t *mp)
	session->app_session.session_state = VPP_SESSION_STATE_CLOSE;
}

static void
handle_mq_event(session_event_t *e)
{
	switch (e->event_type) {
	case SESSION_CTRL_EVT_BOUND:
		session_bound_handler((session_bound_msg_t *) e->data);
		break;
	case SESSION_CTRL_EVT_ACCEPTED:
		session_accepted_handler((session_accepted_msg_t *) e->data);
		break;
	case SESSION_CTRL_EVT_CONNECTED:
		session_connected_handler((session_connected_msg_t *) e->data);
		break;
	case SESSION_CTRL_EVT_DISCONNECTED:
		session_disconnected_handler((session_disconnected_msg_t *) e->data);
		break;
	case SESSION_CTRL_EVT_RESET:
		session_reset_handler((session_reset_msg_t *) e->data);
		break;
	case SESSION_CTRL_EVT_UNLISTEN_REPLY:
		session_unlisten_reply_handler((session_unlisten_reply_msg_t *) e->data);
		break;
	default:
		SPDK_DEBUGLOG(SPDK_SOCK_VPP, "Unhandled event %u\n", e->event_type);
	}
}

static int
vpp_queue_poller(void *ctx)
{
@@ -494,12 +521,15 @@ vpp_queue_poller(void *ctx)
static int
app_queue_poller(void *ctx)
{
	session_event_t *e;
	svm_msg_q_msg_t msg;

	if (!svm_msg_q_is_empty(g_svm.app_event_queue)) {
		svm_msg_q_sub(g_svm.app_event_queue, &msg, SVM_Q_WAIT, 0);
		e = svm_msg_q_msg_data(g_svm.app_event_queue, &msg);
		handle_mq_event(e);
		svm_msg_q_free_msg(g_svm.app_event_queue, &msg);
	}

	return 0;
}

@@ -580,22 +610,21 @@ _spdk_vpp_session_disconnect(struct spdk_vpp_session *session)
{
	int rv = 0;
	vl_api_disconnect_session_t *dmp;
	vl_api_disconnect_session_reply_t *rmp;
	session_disconnected_reply_msg_t *rmp;
	app_session_evt_t app_evt;

	if (session->app_session.session_state == VPP_SESSION_STATE_DISCONNECT) {
		SPDK_DEBUGLOG(SPDK_SOCK_VPP, "Session is already in disconnecting state %p (%d)\n",
			      session, session->id);

		rmp = vl_msg_api_alloc(sizeof(*rmp));
		if (rmp == NULL) {
			return -ENOMEM;
		}
		memset(rmp, 0, sizeof(*rmp));

		rmp->_vl_msg_id = ntohs(VL_API_DISCONNECT_SESSION_REPLY);
		app_alloc_ctrl_evt_to_vpp(session->app_session.vpp_evt_q, &app_evt,
					  SESSION_CTRL_EVT_DISCONNECTED_REPLY);
		rmp = (session_disconnected_reply_msg_t *) app_evt.evt->data;
		rmp->retval = rv;
		rmp->handle = session->handle;
		vl_msg_api_send_shmem(g_svm.vl_input_queue, (u8 *)&rmp);
		rmp->context = session->context;
		app_send_ctrl_evt_to_vpp(session->app_session.vpp_evt_q, &app_evt);

		return 0;
	}
	SPDK_DEBUGLOG(SPDK_SOCK_VPP, "Disconnect session %p (%d)\n", session, session->id);
@@ -747,8 +776,8 @@ spdk_vpp_sock_accept(struct spdk_sock *_sock)
	struct spdk_vpp_session *client_session = NULL;
	u32 client_session_index = ~0;
	uword elts = 0;
	int rv = 0;
	vl_api_accept_session_reply_t *rmp;
	app_session_evt_t app_evt;
	session_accepted_reply_msg_t *rmp;

	assert(listen_session != NULL);
	assert(g_svm.vpp_initialized);
@@ -790,16 +819,12 @@ spdk_vpp_sock_accept(struct spdk_sock *_sock)
	/*
	 * Send accept session reply
	 */
	rmp = vl_msg_api_alloc(sizeof(*rmp));
	if (rmp == NULL) {
		return NULL;
	}
	memset(rmp, 0, sizeof(*rmp));
	rmp->_vl_msg_id = ntohs(VL_API_ACCEPT_SESSION_REPLY);
	rmp->retval = htonl(rv);
	rmp->context = client_session->context;
	app_alloc_ctrl_evt_to_vpp(client_session->app_session.vpp_evt_q, &app_evt,
				  SESSION_CTRL_EVT_ACCEPTED_REPLY);
	rmp = (session_accepted_reply_msg_t *) app_evt.evt->data;
	rmp->handle = client_session->handle;
	vl_msg_api_send_shmem(g_svm.vl_input_queue, (u8 *)&rmp);
	rmp->context = client_session->context;
	app_send_ctrl_evt_to_vpp(client_session->app_session.vpp_evt_q, &app_evt);

	return &client_session->base;
}
@@ -843,6 +868,8 @@ spdk_vpp_sock_recv(struct spdk_sock *_sock, void *buf, size_t len)
	if (bytes == 0) {
		if (session->app_session.session_state == VPP_SESSION_STATE_DISCONNECT) {
			/* Socket is disconnected */
			SPDK_DEBUGLOG(SPDK_SOCK_VPP, "Client %p(%" PRIu32 ") is disconnected.\n",
				      session, session->id);
			errno = 0;
			return 0;
		}
@@ -850,7 +877,7 @@ spdk_vpp_sock_recv(struct spdk_sock *_sock, void *buf, size_t len)
		return -1;
	}

	rc = svm_fifo_dequeue_nowait(rx_fifo, bytes, buf);
	rc = app_recv_stream_raw(rx_fifo, buf, bytes, 0, 0);
	if (rc < 0) {
		errno = -rc;
		return rc;
@@ -901,7 +928,7 @@ spdk_vpp_sock_writev(struct spdk_sock *_sock, struct iovec *iov, int iovcnt)
	assert(g_svm.vpp_initialized);

	tx_fifo = session->app_session.tx_fifo;
	et = FIFO_EVENT_APP_TX;
	et = SESSION_IO_EVT_TX;

	for (i = 0; i < iovcnt; ++i) {
		if (svm_fifo_is_full(tx_fifo)) {
@@ -911,7 +938,9 @@ spdk_vpp_sock_writev(struct spdk_sock *_sock, struct iovec *iov, int iovcnt)

		/* We use only stream connection for now */
		rc = app_send_stream_raw(tx_fifo, session->app_session.vpp_evt_q,
					 iov[i].iov_base, iov[i].iov_len, et, SVM_Q_WAIT);
					 iov[i].iov_base, iov[i].iov_len, et,
					 1, SVM_Q_WAIT);

		if (rc < 0) {
			if (total > 0) {
				break;
@@ -1102,7 +1131,9 @@ _spdk_vpp_app_attach(void)
	bmp->client_index = g_svm.my_client_index;
	bmp->context = ntohl(0xfeedface);

	bmp->options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_ADD_SEGMENT;
	bmp->options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_ACCEPT_REDIRECT;
	bmp->options[APP_OPTIONS_FLAGS] |= APP_OPTIONS_FLAGS_ADD_SEGMENT;

	bmp->options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = 16;
	bmp->options[APP_OPTIONS_RX_FIFO_SIZE] = fifo_size;
	bmp->options[APP_OPTIONS_TX_FIFO_SIZE] = fifo_size;
@@ -1311,13 +1342,7 @@ spdk_vpp_net_framework_set_handlers(void)
			vl_api_##n##_t_print,		\
			sizeof(vl_api_##n##_t), 1);
	_(SESSION_ENABLE_DISABLE_REPLY, session_enable_disable_reply)   \
	_(BIND_SOCK_REPLY, bind_sock_reply)                     \
	_(UNBIND_SOCK_REPLY, unbind_sock_reply)                 \
	_(ACCEPT_SESSION, accept_session)                       \
	_(CONNECT_SESSION_REPLY, connect_session_reply)         \
	_(DISCONNECT_SESSION, disconnect_session)               \
	_(DISCONNECT_SESSION_REPLY, disconnect_session_reply)   \
	_(RESET_SESSION, reset_session)                         \
	_(APPLICATION_ATTACH_REPLY, application_attach_reply)   \
	_(APPLICATION_DETACH_REPLY, application_detach_reply)	\
	_(MAP_ANOTHER_SEGMENT, map_another_segment)
+1 −1
Original line number Diff line number Diff line
@@ -179,7 +179,7 @@ if [ -d /usr/include/rbd ] && [ -d /usr/include/rados ] && [ $SPDK_TEST_RBD -eq
fi

if [ $SPDK_TEST_VPP -eq 1 ]; then
	VPP_PATH="/usr/local/src/vpp/build-root/install-vpp_debug-native/vpp/"
	VPP_PATH="/usr/local/src/vpp-19.04/build-root/install-vpp_debug-native/vpp/"
	export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${VPP_PATH}/lib/
	export PATH=${PATH}:${VPP_PATH}/bin/
	config_params+=" --with-vpp=${VPP_PATH}"
+7 −6
Original line number Diff line number Diff line
diff --git a/Makefile b/Makefile
index 900c1efb4..4889eefbe 100644
index 8c7f3523f..b6a79529c 100644
--- a/Makefile
+++ b/Makefile
@@ -92,9 +92,11 @@ RPM_DEPENDS += ninja-build
 RPM_DEPENDS += libuuid-devel
@@ -90,10 +90,12 @@ RPM_DEPENDS += libuuid-devel
 RPM_DEPENDS += mbedtls-devel
 
 ifeq ($(OS_ID),fedora)
-	RPM_DEPENDS += dnf-utils
@@ -12,8 +12,9 @@ index 900c1efb4..4889eefbe 100644
+	endif
 	RPM_DEPENDS += subunit subunit-devel
-	RPM_DEPENDS += compat-openssl10-devel
-	RPM_DEPENDS += python2-devel python34-ply
+	RPM_DEPENDS += openssl-devel
 	RPM_DEPENDS += python2-devel python2-ply
+	RPM_DEPENDS += python2-devel
 	RPM_DEPENDS += python2-virtualenv
 	RPM_DEPENDS += mbedtls-devel
 	RPM_DEPENDS += cmake
 	RPM_DEPENDS_GROUPS = 'C Development Tools and Libraries'
+24 −52
Original line number Diff line number Diff line
diff --git a/Makefile b/Makefile
index 900c1efb4..4a2aa231e 100644
index 8c7f3523f..20814ee8d 100644
--- a/Makefile
+++ b/Makefile
@@ -94,7 +94,7 @@ RPM_DEPENDS += libuuid-devel
@@ -92,8 +92,8 @@ RPM_DEPENDS += mbedtls-devel
 ifeq ($(OS_ID),fedora)
 	RPM_DEPENDS += dnf-utils
 	RPM_DEPENDS += subunit subunit-devel
-	RPM_DEPENDS += compat-openssl10-devel
-	RPM_DEPENDS += python2-devel python34-ply
+	RPM_DEPENDS += openssl-devel
 	RPM_DEPENDS += python2-devel python2-ply
+	RPM_DEPENDS += python2-devel
 	RPM_DEPENDS += python2-virtualenv
 	RPM_DEPENDS += mbedtls-devel
 	RPM_DEPENDS += cmake
 	RPM_DEPENDS_GROUPS = 'C Development Tools and Libraries'
diff --git a/build/external/packages/dpdk.mk b/build/external/packages/dpdk.mk
index 6c46ac298..227a0772d 100644
index a551151bb..b0258017a 100644
--- a/build/external/packages/dpdk.mk
+++ b/build/external/packages/dpdk.mk
@@ -148,7 +148,7 @@ endif
@@ -147,7 +147,7 @@ endif
 endif
 endif
 
@@ -24,51 +26,21 @@ index 6c46ac298..227a0772d 100644
 
 # assemble DPDK make arguments
 DPDK_MAKE_ARGS := -C $(DPDK_SOURCE) -j $(JOBS) \
diff --git a/src/cmake/memfd.cmake b/src/cmake/memfd.cmake
index ca499c459..f7eec2c10 100644
--- a/src/cmake/memfd.cmake
+++ b/src/cmake/memfd.cmake
@@ -24,3 +24,12 @@ if (HAVE_MEMFD_CREATE)
     add_definitions(-DHAVE_MEMFD_CREATE)
 endif()
 
+check_c_source_compiles("
+  #define _GNU_SOURCE
+  #include <sched.h>
+  int main() { return getcpu (0, 0); }
+" HAVE_GETCPU)
+
+if (HAVE_GETCPU)
+  add_definitions(-DHAVE_GETCPU)
+endif()
diff --git a/src/vppinfra/linux/syscall.h b/src/vppinfra/linux/syscall.h
index 1ae029d58..99d1a3ab6 100644
--- a/src/vppinfra/linux/syscall.h
+++ b/src/vppinfra/linux/syscall.h
@@ -19,11 +19,13 @@
 #include <unistd.h>
 #include <sys/syscall.h>
diff --git a/src/plugins/crypto_ia32/CMakeLists.txt b/src/plugins/crypto_ia32/CMakeLists.txt
index a100cdbb6..92e408098 100644
--- a/src/plugins/crypto_ia32/CMakeLists.txt
+++ b/src/plugins/crypto_ia32/CMakeLists.txt
@@ -22,3 +22,4 @@ add_vpp_plugin(crypto_ia32
 )
 
+#ifndef HAVE_GETCPU
 static inline int
 getcpu (unsigned *cpu, unsigned *node, void *tcache)
 {
   return syscall (__NR_getcpu, cpu, node, tcache);
 }
+#endif
 target_compile_options(crypto_ia32_plugin PRIVATE "-march=silvermont")
+target_compile_options(crypto_ia32_plugin PRIVATE "-maes")
diff --git a/src/plugins/crypto_ipsecmb/CMakeLists.txt b/src/plugins/crypto_ipsecmb/CMakeLists.txt
index 0d08032c0..6a7eb148f 100644
--- a/src/plugins/crypto_ipsecmb/CMakeLists.txt
+++ b/src/plugins/crypto_ipsecmb/CMakeLists.txt
@@ -39,3 +39,4 @@ else()
 endif()
 
 static inline long
 set_mempolicy (int mode, const unsigned long *nodemask, unsigned long maxnode)
diff --git a/src/vppinfra/pmalloc.c b/src/vppinfra/pmalloc.c
index 365ee0443..ed1c0329f 100644
--- a/src/vppinfra/pmalloc.c
+++ b/src/vppinfra/pmalloc.c
@@ -53,7 +53,7 @@ pmalloc_validate_numa_node (u32 * numa_node)
   if (*numa_node == CLIB_PMALLOC_NUMA_LOCAL)
     {
       u32 cpu;
-      if (getcpu (&cpu, numa_node, 0) != 0)
+      if (getcpu (&cpu, numa_node) != 0)
 	return 1;
     }
   return 0;
 target_compile_options(crypto_ipsecmb_plugin PRIVATE "-march=silvermont")
+target_compile_options(crypto_ipsecmb_plugin PRIVATE "-maes")
Loading