2
0
mirror of https://github.com/openvswitch/ovs synced 2025-08-22 18:07:40 +00:00
ovs/lib/dpif-netdev-lookup.c

194 lines
6.1 KiB
C
Raw Normal View History

/*
* Copyright (c) 2020 Intel Corporation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <config.h>
#include <errno.h>
#include "dpif-netdev-lookup.h"
dpif-netdev: Refactor AVX512 runtime checks. As described in the bugzilla below, cpu_has_isa code may be compiled with some AVX512 instructions in it, because cpu.c is built as part of the libopenvswitchavx512. This is a problem when this function (supposed to probe for AVX512 instructions availability) is invoked from generic OVS code, on older CPUs that don't support them. For the same reason, dpcls_subtable_avx512_gather_probe, dp_netdev_input_outer_avx512_probe, mfex_avx512_probe and mfex_avx512_vbmi_probe are potential runtime bombs and can't either be built as part of libopenvswitchavx512. Move cpu.c to be part of the "normal" libopenvswitch. And move other helpers in generic OVS code. Note: - dpcls_subtable_avx512_gather_probe is split in two, because it also needs to do its own magic, - while moving those helpers, prefer direct calls to cpu_has_isa and avoid cast to intermediate integer variables when a simple boolean is enough, Fixes: 352b6c7116cd ("dpif-lookup: add avx512 gather implementation.") Fixes: abb807e27dd4 ("dpif-netdev: Add command to switch dpif implementation.") Fixes: 250ceddcc2d0 ("dpif-netdev/mfex: Add AVX512 based optimized miniflow extract") Fixes: b366fa2f4947 ("dpif-netdev: Call cpuid for x86 isa availability.") Reported-at: https://bugzilla.redhat.com/2100393 Reported-by: Ales Musil <amusil@redhat.com> Co-authored-by: Ales Musil <amusil@redhat.com> Signed-off-by: Ales Musil <amusil@redhat.com> Signed-off-by: David Marchand <david.marchand@redhat.com> Acked-by: Sunil Pai G <sunil.pai.g@intel.com> Acked-by: Ales Musil <amusil@redhat.com> Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
2022-06-29 09:32:24 +02:00
#include "cpu.h"
#include "openvswitch/vlog.h"
VLOG_DEFINE_THIS_MODULE(dpif_netdev_lookup);
#define DPCLS_IMPL_AVX512_CHECK (__x86_64__ && HAVE_AVX512F \
&& HAVE_LD_AVX512_GOOD && HAVE_AVX512BW && __SSE4_2__)
#if DPCLS_IMPL_AVX512_CHECK
dpif-netdev: Refactor AVX512 runtime checks. As described in the bugzilla below, cpu_has_isa code may be compiled with some AVX512 instructions in it, because cpu.c is built as part of the libopenvswitchavx512. This is a problem when this function (supposed to probe for AVX512 instructions availability) is invoked from generic OVS code, on older CPUs that don't support them. For the same reason, dpcls_subtable_avx512_gather_probe, dp_netdev_input_outer_avx512_probe, mfex_avx512_probe and mfex_avx512_vbmi_probe are potential runtime bombs and can't either be built as part of libopenvswitchavx512. Move cpu.c to be part of the "normal" libopenvswitch. And move other helpers in generic OVS code. Note: - dpcls_subtable_avx512_gather_probe is split in two, because it also needs to do its own magic, - while moving those helpers, prefer direct calls to cpu_has_isa and avoid cast to intermediate integer variables when a simple boolean is enough, Fixes: 352b6c7116cd ("dpif-lookup: add avx512 gather implementation.") Fixes: abb807e27dd4 ("dpif-netdev: Add command to switch dpif implementation.") Fixes: 250ceddcc2d0 ("dpif-netdev/mfex: Add AVX512 based optimized miniflow extract") Fixes: b366fa2f4947 ("dpif-netdev: Call cpuid for x86 isa availability.") Reported-at: https://bugzilla.redhat.com/2100393 Reported-by: Ales Musil <amusil@redhat.com> Co-authored-by: Ales Musil <amusil@redhat.com> Signed-off-by: Ales Musil <amusil@redhat.com> Signed-off-by: David Marchand <david.marchand@redhat.com> Acked-by: Sunil Pai G <sunil.pai.g@intel.com> Acked-by: Ales Musil <amusil@redhat.com> Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
2022-06-29 09:32:24 +02:00
static dpcls_subtable_lookup_func
dpcls_subtable_avx512_gather_probe(uint32_t u0_bits, uint32_t u1_bits)
{
if (!cpu_has_isa(OVS_CPU_ISA_X86_AVX512F)
|| !cpu_has_isa(OVS_CPU_ISA_X86_BMI2)) {
return NULL;
}
return dpcls_subtable_avx512_gather_probe__(u0_bits, u1_bits,
cpu_has_isa(OVS_CPU_ISA_X86_VPOPCNTDQ));
}
#endif
/* Actual list of implementations goes here */
static struct dpcls_subtable_lookup_info_t subtable_lookups[] = {
/* The autovalidator implementation will not be used by default, it must
* be enabled at compile time to be the default lookup implementation. The
* user may enable it at runtime using the normal "prio-set" command if
* desired. The compile time default switch is here to enable all unit
* tests to transparently run with the autovalidator.
*/
#ifdef DPCLS_AUTOVALIDATOR_DEFAULT
{ .prio = 255,
#else
{ .prio = 0,
#endif
.probe = dpcls_subtable_autovalidator_probe,
.name = "autovalidator",
.usage_cnt = ATOMIC_COUNT_INIT(0), },
/* The default scalar C code implementation. */
{ .prio = 1,
.probe = dpcls_subtable_generic_probe,
.name = "generic",
.usage_cnt = ATOMIC_COUNT_INIT(0), },
dpif-lookup: add avx512 gather implementation. This commit adds an AVX-512 dpcls lookup implementation. It uses the AVX-512 SIMD ISA to perform multiple miniflow operations in parallel. To run this implementation, the "avx512f" and "bmi2" ISAs are required. These ISA checks are performed at runtime while probing the subtable implementation. If a CPU does not provide both "avx512f" and "bmi2", then this code does not execute. The avx512 code is built as a separate static library, with added CFLAGS to enable the required ISA features. By building only this static library with avx512 enabled, it is ensured that the main OVS core library is *not* using avx512, and that OVS continues to run as before on CPUs that do not support avx512. The approach taken in this implementation is to use the gather instruction to access the packet miniflow, allowing any miniflow blocks to be loaded into an AVX-512 register. This maximizes the usefulness of the register, and hence this implementation handles any subtable with up to miniflow 8 bits. Note that specialization of these avx512 lookup routines still provides performance value, as the hashing of the resulting data is performed in scalar code, and compile-time loop unrolling occurs when specialized to miniflow bits. This commit checks at configure time if the assembling in use has a known bug in assembling AVX512 code. If this bug is present, all AVX512 code is disabled. Checking the version string of the binutils or assembler is not a good method to detect the issue, as back ported fixes would not be reflected. Signed-off-by: Harry van Haaren <harry.van.haaren@intel.com> Acked-by: William Tu <u9012063@gmail.com> Signed-off-by: Ian Stokes <ian.stokes@intel.com>
2020-07-13 13:42:14 +01:00
#if DPCLS_IMPL_AVX512_CHECK
dpif-lookup: add avx512 gather implementation. This commit adds an AVX-512 dpcls lookup implementation. It uses the AVX-512 SIMD ISA to perform multiple miniflow operations in parallel. To run this implementation, the "avx512f" and "bmi2" ISAs are required. These ISA checks are performed at runtime while probing the subtable implementation. If a CPU does not provide both "avx512f" and "bmi2", then this code does not execute. The avx512 code is built as a separate static library, with added CFLAGS to enable the required ISA features. By building only this static library with avx512 enabled, it is ensured that the main OVS core library is *not* using avx512, and that OVS continues to run as before on CPUs that do not support avx512. The approach taken in this implementation is to use the gather instruction to access the packet miniflow, allowing any miniflow blocks to be loaded into an AVX-512 register. This maximizes the usefulness of the register, and hence this implementation handles any subtable with up to miniflow 8 bits. Note that specialization of these avx512 lookup routines still provides performance value, as the hashing of the resulting data is performed in scalar code, and compile-time loop unrolling occurs when specialized to miniflow bits. This commit checks at configure time if the assembling in use has a known bug in assembling AVX512 code. If this bug is present, all AVX512 code is disabled. Checking the version string of the binutils or assembler is not a good method to detect the issue, as back ported fixes would not be reflected. Signed-off-by: Harry van Haaren <harry.van.haaren@intel.com> Acked-by: William Tu <u9012063@gmail.com> Signed-off-by: Ian Stokes <ian.stokes@intel.com>
2020-07-13 13:42:14 +01:00
/* Only available on x86_64 bit builds with SSE 4.2 used for OVS core. */
{ .prio = 0,
.probe = dpcls_subtable_avx512_gather_probe,
.name = "avx512_gather",
.usage_cnt = ATOMIC_COUNT_INIT(0), },
dpif-lookup: add avx512 gather implementation. This commit adds an AVX-512 dpcls lookup implementation. It uses the AVX-512 SIMD ISA to perform multiple miniflow operations in parallel. To run this implementation, the "avx512f" and "bmi2" ISAs are required. These ISA checks are performed at runtime while probing the subtable implementation. If a CPU does not provide both "avx512f" and "bmi2", then this code does not execute. The avx512 code is built as a separate static library, with added CFLAGS to enable the required ISA features. By building only this static library with avx512 enabled, it is ensured that the main OVS core library is *not* using avx512, and that OVS continues to run as before on CPUs that do not support avx512. The approach taken in this implementation is to use the gather instruction to access the packet miniflow, allowing any miniflow blocks to be loaded into an AVX-512 register. This maximizes the usefulness of the register, and hence this implementation handles any subtable with up to miniflow 8 bits. Note that specialization of these avx512 lookup routines still provides performance value, as the hashing of the resulting data is performed in scalar code, and compile-time loop unrolling occurs when specialized to miniflow bits. This commit checks at configure time if the assembling in use has a known bug in assembling AVX512 code. If this bug is present, all AVX512 code is disabled. Checking the version string of the binutils or assembler is not a good method to detect the issue, as back ported fixes would not be reflected. Signed-off-by: Harry van Haaren <harry.van.haaren@intel.com> Acked-by: William Tu <u9012063@gmail.com> Signed-off-by: Ian Stokes <ian.stokes@intel.com>
2020-07-13 13:42:14 +01:00
#else
/* Disabling AVX512 at compile time, as compile time requirements not met.
* This could be due to a number of reasons:
* 1) core OVS is not compiled with SSE4.2 instruction set.
* The SSE42 instructions are required to use CRC32 ISA for high-
* performance hashing. Consider ./configure of OVS with -msse42 (or
* newer) to enable CRC32 hashing and higher performance.
* 2) The assembler in binutils versions 2.30 and 2.31 has bugs in AVX512
* assembly. Compile time probes check for this assembler issue, and
* disable the HAVE_LD_AVX512_GOOD check if an issue is detected.
* Please upgrade binutils, or backport this binutils fix commit:
* 2069ccaf8dc28ea699bd901fdd35d90613e4402a
*/
#endif
};
int
dpcls_subtable_lookup_info_get(struct dpcls_subtable_lookup_info_t **out_ptr)
{
if (out_ptr == NULL) {
return -1;
}
*out_ptr = subtable_lookups;
return ARRAY_SIZE(subtable_lookups);
}
/* sets the priority of the lookup function with "name". */
int
dpcls_subtable_set_prio(const char *name, uint8_t priority)
{
for (int i = 0; i < ARRAY_SIZE(subtable_lookups); i++) {
if (strcmp(name, subtable_lookups[i].name) == 0) {
subtable_lookups[i].prio = priority;
VLOG_INFO("Subtable function '%s' set priority to %d\n",
name, priority);
return 0;
}
}
VLOG_WARN("Subtable function '%s' not found, failed to set priority\n",
name);
return -EINVAL;
}
dpcls_subtable_lookup_func
dpcls_subtable_get_best_impl(uint32_t u0_bit_count, uint32_t u1_bit_count,
struct dpcls_subtable_lookup_info_t **info)
{
struct dpcls_subtable_lookup_info_t *best_info = NULL;
dpcls_subtable_lookup_func best_func = NULL;
int prio = -1;
/* Iter over each subtable impl, and get highest priority one. */
for (int i = 0; i < ARRAY_SIZE(subtable_lookups); i++) {
struct dpcls_subtable_lookup_info_t *impl_info = &subtable_lookups[i];
dpcls_subtable_lookup_func probed_func;
if (impl_info->prio <= prio) {
continue;
}
probed_func = subtable_lookups[i].probe(u0_bit_count,
u1_bit_count);
if (!probed_func) {
continue;
}
best_func = probed_func;
best_info = impl_info;
prio = impl_info->prio;
}
/* Programming error - we must always return a valid func ptr. */
ovs_assert(best_func != NULL && best_info != NULL);
VLOG_DBG("Subtable lookup function '%s' with units (%d,%d), priority %d\n",
best_info->name, u0_bit_count, u1_bit_count, prio);
if (info) {
*info = best_info;
}
return best_func;
}
void
dpcls_info_inc_usage(struct dpcls_subtable_lookup_info_t *info)
{
if (info) {
atomic_count_inc(&info->usage_cnt);
}
}
void
dpcls_info_dec_usage(struct dpcls_subtable_lookup_info_t *info)
{
if (info) {
atomic_count_dec(&info->usage_cnt);
}
}
void
dpcls_impl_print_stats(struct ds *reply)
{
struct dpcls_subtable_lookup_info_t *lookup_funcs = NULL;
int count = dpcls_subtable_lookup_info_get(&lookup_funcs);
/* Add all DPCLS functions to reply string. */
ds_put_cstr(reply, "Available dpcls implementations:\n");
for (int i = 0; i < count; i++) {
ds_put_format(reply, " %s (Use count: %d, Priority: %d",
lookup_funcs[i].name,
atomic_count_get(&lookup_funcs[i].usage_cnt),
lookup_funcs[i].prio);
if (ds_last(reply) == ' ') {
ds_put_cstr(reply, "none");
}
ds_put_cstr(reply, ")\n");
}
}