2
0
mirror of https://github.com/openvswitch/ovs synced 2025-08-31 06:15:47 +00:00

acinclude: Add seperate checks for AVX512 ISA.

Checking for each of the required AVX512 ISA separately will allow the
compiler to generate some AVX512 code where there is some support in the
compiler rather than only generating all AVX512 code when all of it is
supported or no AVX512 code at all.

For example, in GCC 4.9 where there is just support for AVX512F, this
patch will allow building the AVX512 DPIF.

Another example, in GCC 5 and 6, most AVX512 code can be generated, just
without AVX512VPOPCNTDQ support.

Signed-off-by: Cian Ferriter <cian.ferriter@intel.com>
Acked-by: Sunil Pai G <sunil.pai.g@intel.com>
Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
This commit is contained in:
Cian Ferriter
2022-05-17 10:08:18 +00:00
committed by Ilya Maximets
parent fb85ae4340
commit cb1c640077
7 changed files with 129 additions and 52 deletions

View File

@@ -73,16 +73,13 @@ AC_DEFUN([OVS_CHECK_DPIF_AVX512_DEFAULT], [
dnl OVS_CHECK_AVX512
dnl
dnl Checks if compiler and binutils supports AVX512.
dnl Checks if compiler and binutils supports various AVX512 ISA.
AC_DEFUN([OVS_CHECK_AVX512], [
OVS_CHECK_BINUTILS_AVX512
OVS_CHECK_CC_OPTION(
[-mavx512f -mavx512vpopcntdq], [ovs_have_cc_mavx512f=yes], [ovs_have_cc_mavx512f=no])
AM_CONDITIONAL([HAVE_AVX512F], [test $ovs_have_cc_mavx512f = yes])
if test "$ovs_have_cc_mavx512f" = yes; then
AC_DEFINE([HAVE_AVX512F], [1],
[Define to 1 if compiler supports AVX512.])
fi
OVS_CONDITIONAL_CC_OPTION_DEFINE([-mavx512f], [HAVE_AVX512F])
OVS_CONDITIONAL_CC_OPTION_DEFINE([-mavx512bw], [HAVE_AVX512BW])
OVS_CONDITIONAL_CC_OPTION_DEFINE([-mavx512vbmi], [HAVE_AVX512VBMI])
OVS_CONDITIONAL_CC_OPTION_DEFINE([-mavx512vpopcntdq], [HAVE_AVX512VPOPCNTDQ])
])
dnl OVS_ENABLE_WERROR
@@ -1360,6 +1357,19 @@ AC_DEFUN([OVS_CONDITIONAL_CC_OPTION],
AM_CONDITIONAL([$2], [test $ovs_have_cc_option = yes])])
dnl ----------------------------------------------------------------------
dnl OVS_CONDITIONAL_CC_OPTION_DEFINE([OPTION], [CONDITIONAL])
dnl Check whether the given C compiler OPTION is accepted.
dnl If so, enable the given Automake CONDITIONAL and define it.
dnl Example: OVS_CONDITIONAL_CC_OPTION_DEFINE([-mavx512f], [HAVE_AVX512F])
AC_DEFUN([OVS_CONDITIONAL_CC_OPTION_DEFINE],
[OVS_CHECK_CC_OPTION(
[$1], [ovs_have_cc_option=yes], [ovs_have_cc_option=no])
AM_CONDITIONAL([$2], [test $ovs_have_cc_option = yes])
if test "$ovs_have_cc_option" = yes; then
AC_DEFINE([$2], [1],
[Define to 1 if compiler supports the '$1' option.])
fi])
dnl Check for too-old XenServer.
AC_DEFUN([OVS_CHECK_XENSERVER_VERSION],
[AC_CACHE_CHECK([XenServer release], [ovs_cv_xsversion],

View File

@@ -31,7 +31,6 @@ lib_LTLIBRARIES += lib/libopenvswitchavx512.la
lib_libopenvswitch_la_LIBADD += lib/libopenvswitchavx512.la
lib_libopenvswitchavx512_la_CFLAGS = \
-mavx512f \
-mavx512bw \
-mbmi \
-mbmi2 \
-fPIC \
@@ -39,13 +38,18 @@ lib_libopenvswitchavx512_la_CFLAGS = \
lib_libopenvswitchavx512_la_SOURCES = \
lib/cpu.c \
lib/cpu.h \
lib/dpif-netdev-lookup-avx512-gather.c \
lib/dpif-netdev-extract-avx512.c \
lib/dpif-netdev-avx512.c
if HAVE_AVX512BW
lib_libopenvswitchavx512_la_CFLAGS += \
-mavx512bw
lib_libopenvswitchavx512_la_SOURCES += \
lib/dpif-netdev-extract-avx512.c \
lib/dpif-netdev-lookup-avx512-gather.c
endif # HAVE_AVX512BW
lib_libopenvswitchavx512_la_LDFLAGS = \
-static
endif
endif
endif # HAVE_LD_AVX512_GOOD
endif # HAVE_AVX512F
# Build core vswitch libraries as before
lib_libopenvswitch_la_SOURCES = \

View File

@@ -108,13 +108,42 @@ _mm512_maskz_permutex2var_epi8_skx(__mmask64 k_mask,
return v_result_kmskd;
}
/* Wrapper function required to enable ISA. */
/* Wrapper function to enable VBMI ISA required by the
* _mm512_maskz_permutexvar_epi8 intrinsic. */
#if HAVE_AVX512VBMI
static inline __m512i
__attribute__((__target__("avx512vbmi")))
_mm512_maskz_permutexvar_epi8_wrap(__mmask64 kmask, __m512i idx, __m512i a)
{
return _mm512_maskz_permutexvar_epi8(kmask, idx, a);
}
#endif
static inline __m512i
_mm512_maskz_permutexvar_epi8_selector(__mmask64 k_shuf, __m512i v_shuf,
__m512i v_pkt0,
const uint32_t use_vbmi OVS_UNUSED)
{
/* Permute the packet layout into miniflow blocks shape. */
__m512i v512_zeros = _mm512_setzero_si512();
__m512i v_blk0;
#if HAVE_AVX512VBMI
if (__builtin_constant_p(use_vbmi) && use_vbmi) {
/* As different AVX512 ISA levels have different implementations,
* this specializes on the use_vbmi attribute passed in.
*/
v_blk0 = _mm512_maskz_permutexvar_epi8_wrap(k_shuf, v_shuf, v_pkt0);
} else {
v_blk0 = _mm512_maskz_permutex2var_epi8_skx(k_shuf, v_pkt0, v_shuf,
v512_zeros);
}
#else
v_blk0 = _mm512_maskz_permutex2var_epi8_skx(k_shuf, v_pkt0, v_shuf,
v512_zeros);
#endif
return v_blk0;
}
/* This file contains optimized implementations of miniflow_extract()
@@ -481,7 +510,7 @@ mfex_avx512_process(struct dp_packet_batch *packets,
odp_port_t in_port,
void *pmd_handle OVS_UNUSED,
const enum MFEX_PROFILES profile_id,
const uint32_t use_vbmi)
const uint32_t use_vbmi OVS_UNUSED)
{
uint32_t hitmask = 0;
struct dp_packet *packet;
@@ -538,19 +567,9 @@ mfex_avx512_process(struct dp_packet_batch *packets,
_mm_storeu_si128((void *) bits, v_bits);
_mm_storeu_si128((void *) blocks, v_blocks01);
/* Permute the packet layout into miniflow blocks shape.
* As different AVX512 ISA levels have different implementations,
* this specializes on the "use_vbmi" attribute passed in.
*/
__m512i v512_zeros = _mm512_setzero_si512();
__m512i v_blk0;
if (__builtin_constant_p(use_vbmi) && use_vbmi) {
v_blk0 = _mm512_maskz_permutexvar_epi8_wrap(k_shuf, v_shuf,
v_pkt0);
} else {
v_blk0 = _mm512_maskz_permutex2var_epi8_skx(k_shuf, v_pkt0,
v_shuf, v512_zeros);
}
__m512i v_blk0 = _mm512_maskz_permutexvar_epi8_selector(k_shuf, v_shuf,
v_pkt0,
use_vbmi);
__m512i v_blk0_strip = _mm512_and_si512(v_blk0, v_strp);
_mm512_storeu_si512(&blocks[2], v_blk0_strip);
@@ -629,7 +648,8 @@ mfex_avx512_process(struct dp_packet_batch *packets,
}
#define DECLARE_MFEX_FUNC(name, profile) \
#if HAVE_AVX512VBMI
#define VBMI_MFEX_FUNC(name, profile) \
uint32_t \
__attribute__((__target__("avx512vbmi"))) \
mfex_avx512_vbmi_##name(struct dp_packet_batch *packets, \
@@ -639,8 +659,12 @@ mfex_avx512_vbmi_##name(struct dp_packet_batch *packets, \
{ \
return mfex_avx512_process(packets, keys, keys_size, in_port, \
pmd_handle, profile, 1); \
} \
\
}
#else
#define VBMI_MFEX_FUNC(name, profile)
#endif
#define BASIC_MFEX_FUNC(name, profile) \
uint32_t \
mfex_avx512_##name(struct dp_packet_batch *packets, \
struct netdev_flow_key *keys, uint32_t keys_size, \
@@ -651,6 +675,10 @@ mfex_avx512_##name(struct dp_packet_batch *packets, \
pmd_handle, profile, 0); \
}
#define DECLARE_MFEX_FUNC(name, profile) \
VBMI_MFEX_FUNC(name, profile) \
BASIC_MFEX_FUNC(name, profile) \
/* Each profile gets a single declare here, which specializes the function
* as required.
*/

View File

@@ -78,22 +78,26 @@ _mm512_popcnt_epi64_manual(__m512i v_in)
return _mm512_sad_epu8(v_u8_pop, _mm512_setzero_si512());
}
/* Wrapper function required to enable ISA. First enable the ISA via the
* attribute target for this function, then check if the compiler actually
* #defines the ISA itself. If the ISA is not #define-ed by the compiler it
* indicates the compiler is too old or is not capable of compiling the
* requested ISA level, so fallback to the integer manual implementation.
/* Wrapper function required to enable ISA. First check if the compiler
* supports the ISA itself. If the ISA is supported, enable it via the
* attribute target. If the ISA is not supported by the compiler it indicates
* the compiler is too old or is not capable of compiling the requested ISA
* level, so fallback to the integer manual implementation.
*/
#if HAVE_AVX512VPOPCNTDQ
static inline __m512i
__attribute__((__target__("avx512vpopcntdq")))
_mm512_popcnt_epi64_wrapper(__m512i v_in)
{
#ifdef __AVX512VPOPCNTDQ__
return _mm512_popcnt_epi64(v_in);
#else
return _mm512_popcnt_epi64_manual(v_in);
#endif
}
#else
static inline __m512i
_mm512_popcnt_epi64_wrapper(__m512i v_in)
{
return _mm512_popcnt_epi64_manual(v_in);
}
#endif
static inline uint64_t
netdev_rule_matches_key(const struct dpcls_rule *rule,
@@ -334,6 +338,19 @@ avx512_lookup_impl(struct dpcls_subtable *subtable,
return found_map;
}
/* Use a different pattern to conditionally use the VPOPCNTDQ target attribute
* here.
* The usual pattern using a '#if HAVE_AVX512VPOPCNTDQ' type check won't work
* inside a macro.
* Define VPOPCNTDQ_TARGET which will either be the "avx512vpopcntdq" target
* attribute or nothing depending on AVX512VPOPCNTDQ support in the compiler.
*/
#if HAVE_AVX512VPOPCNTDQ
#define VPOPCNTDQ_TARGET __attribute__((__target__("avx512vpopcntdq")))
#else
#define VPOPCNTDQ_TARGET
#endif
/* Expand out specialized functions with U0 and U1 bit attributes. As the
* AVX512 vpopcnt instruction is not supported on all AVX512 capable CPUs,
* create two functions for each miniflow signature. This allows the runtime
@@ -351,7 +368,7 @@ avx512_lookup_impl(struct dpcls_subtable *subtable,
U0, U1, use_vpop); \
} \
\
static uint32_t __attribute__((__target__("avx512vpopcntdq"))) \
static uint32_t VPOPCNTDQ_TARGET \
dpcls_avx512_gather_mf_##U0##_##U1##_vpop(struct dpcls_subtable *subtable,\
uint32_t keys_map, \
const struct netdev_flow_key *keys[], \

View File

@@ -45,7 +45,8 @@ static struct dpcls_subtable_lookup_info_t subtable_lookups[] = {
.name = "generic",
.usage_cnt = ATOMIC_COUNT_INIT(0), },
#if (__x86_64__ && HAVE_AVX512F && HAVE_LD_AVX512_GOOD && __SSE4_2__)
#if (__x86_64__ && HAVE_AVX512F && HAVE_LD_AVX512_GOOD && HAVE_AVX512BW \
&& __SSE4_2__)
/* Only available on x86_64 bit builds with SSE 4.2 used for OVS core. */
{ .prio = 0,
.probe = dpcls_subtable_avx512_gather_probe,

View File

@@ -54,42 +54,44 @@ static struct dpif_miniflow_extract_impl mfex_impls[] = {
.name = "study", },
/* Compile in implementations only if the compiler ISA checks pass. */
#if (__x86_64__ && HAVE_AVX512F && HAVE_LD_AVX512_GOOD && __SSE4_2__)
#if (__x86_64__ && HAVE_AVX512F && HAVE_LD_AVX512_GOOD && HAVE_AVX512BW \
&& __SSE4_2__)
#if HAVE_AVX512VBMI
[MFEX_IMPL_VBMI_IPv4_UDP] = {
.probe = mfex_avx512_vbmi_probe,
.extract_func = mfex_avx512_vbmi_ip_udp,
.name = "avx512_vbmi_ipv4_udp", },
#endif
[MFEX_IMPL_IPv4_UDP] = {
.probe = mfex_avx512_probe,
.extract_func = mfex_avx512_ip_udp,
.name = "avx512_ipv4_udp", },
#if HAVE_AVX512VBMI
[MFEX_IMPL_VBMI_IPv4_TCP] = {
.probe = mfex_avx512_vbmi_probe,
.extract_func = mfex_avx512_vbmi_ip_tcp,
.name = "avx512_vbmi_ipv4_tcp", },
#endif
[MFEX_IMPL_IPv4_TCP] = {
.probe = mfex_avx512_probe,
.extract_func = mfex_avx512_ip_tcp,
.name = "avx512_ipv4_tcp", },
#if HAVE_AVX512VBMI
[MFEX_IMPL_VBMI_DOT1Q_IPv4_UDP] = {
.probe = mfex_avx512_vbmi_probe,
.extract_func = mfex_avx512_vbmi_dot1q_ip_udp,
.name = "avx512_vbmi_dot1q_ipv4_udp", },
#endif
[MFEX_IMPL_DOT1Q_IPv4_UDP] = {
.probe = mfex_avx512_probe,
.extract_func = mfex_avx512_dot1q_ip_udp,
.name = "avx512_dot1q_ipv4_udp", },
#if HAVE_AVX512VBMI
[MFEX_IMPL_VBMI_DOT1Q_IPv4_TCP] = {
.probe = mfex_avx512_vbmi_probe,
.extract_func = mfex_avx512_vbmi_dot1q_ip_tcp,
.name = "avx512_vbmi_dot1q_ipv4_tcp", },
#endif
[MFEX_IMPL_DOT1Q_IPv4_TCP] = {
.probe = mfex_avx512_probe,
.extract_func = mfex_avx512_dot1q_ip_tcp,

View File

@@ -81,14 +81,23 @@ enum dpif_miniflow_extract_impl_idx {
MFEX_IMPL_AUTOVALIDATOR,
MFEX_IMPL_SCALAR,
MFEX_IMPL_STUDY,
#if (__x86_64__ && HAVE_AVX512F && HAVE_LD_AVX512_GOOD && __SSE4_2__)
#if (__x86_64__ && HAVE_AVX512F && HAVE_LD_AVX512_GOOD && HAVE_AVX512BW \
&& __SSE4_2__)
#if HAVE_AVX512VBMI
MFEX_IMPL_VBMI_IPv4_UDP,
#endif
MFEX_IMPL_IPv4_UDP,
#if HAVE_AVX512VBMI
MFEX_IMPL_VBMI_IPv4_TCP,
#endif
MFEX_IMPL_IPv4_TCP,
#if HAVE_AVX512VBMI
MFEX_IMPL_VBMI_DOT1Q_IPv4_UDP,
#endif
MFEX_IMPL_DOT1Q_IPv4_UDP,
#if HAVE_AVX512VBMI
MFEX_IMPL_VBMI_DOT1Q_IPv4_TCP,
#endif
MFEX_IMPL_DOT1Q_IPv4_TCP,
#endif
MFEX_IMPL_MAX
@@ -99,9 +108,15 @@ extern struct ovs_mutex dp_netdev_mutex;
/* Define a index which points to the first traffic optimized MFEX
* option from the enum list else holds max value.
*/
#if (__x86_64__ && HAVE_AVX512F && HAVE_LD_AVX512_GOOD && __SSE4_2__)
#if (__x86_64__ && HAVE_AVX512F && HAVE_LD_AVX512_GOOD && HAVE_AVX512BW \
&& __SSE4_2__)
#if HAVE_AVX512VBMI
#define MFEX_IMPL_START_IDX MFEX_IMPL_VBMI_IPv4_UDP
#else
#define MFEX_IMPL_START_IDX MFEX_IMPL_IPv4_UDP
#endif
#else
#define MFEX_IMPL_START_IDX MFEX_IMPL_MAX