2
0
mirror of https://github.com/openvswitch/ovs synced 2025-10-17 14:28:02 +00:00
Files
openvswitch/datapath/loop_counter.c
Jesse Gross 7eaa983051 datapath: Add loop detection for RT kernels.
Our normal loop detection requires disabling preemption while
packet processing takes place.  On RT kernels this isn't acceptable
and interacts badly with spinlocks, so we can't use it.  This
takes advantage of some extra space that is added to struct
task_struct on RT kernels (and the knowledge that we will always
have a valid task_struct) to store the loop counter for a given
thread.  Since we can't make these assumptions on non-RT kernels,
we continue to use the previous method of loop detection there.

Signed-off-by: Jesse Gross <jesse@nicira.com>
Acked-by: Ben Pfaff <blp@nicira.com>
2010-10-25 13:40:51 -07:00

72 lines
1.7 KiB
C

/*
* Distributed under the terms of the GNU GPL version 2.
* Copyright (c) 2010 Nicira Networks.
*
* Significant portions of this file may be copied from parts of the Linux
* kernel, by Linus Torvalds and others.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/hardirq.h>
#include <linux/kernel.h>
#include <linux/percpu.h>
#include <linux/sched.h>
#include "loop_counter.h"
void loop_suppress(struct datapath *dp, struct sw_flow_actions *actions)
{
if (net_ratelimit())
pr_warn("%s: flow looped %d times, dropping\n",
dp_name(dp), MAX_LOOPS);
actions->n_actions = 0;
}
#ifndef CONFIG_PREEMPT_RT
/* We use a separate counter for each CPU for both interrupt and non-interrupt
* context in order to keep the limit deterministic for a given packet.
*/
struct percpu_loop_counters {
struct loop_counter counters[2];
};
static DEFINE_PER_CPU(struct percpu_loop_counters, loop_counters);
struct loop_counter *loop_get_counter(void)
{
return &get_cpu_var(loop_counters).counters[!!in_interrupt()];
}
void loop_put_counter(void)
{
put_cpu_var(loop_counters);
}
#else /* !CONFIG_PREEMPT_RT */
struct loop_counter *loop_get_counter(void)
{
WARN_ON(in_interrupt());
/* Only two bits of the extra_flags field in struct task_struct are
* used and it's an unsigned int. We hijack the most significant bits
* to be our counter structure. On RT kernels softirqs always run in
* process context so we are guaranteed to have a valid task_struct.
*/
#ifdef __LITTLE_ENDIAN
return (void *)(&current->extra_flags + 1) -
sizeof(struct loop_counter);
#elif __BIG_ENDIAN
return (struct loop_counter *)&current->extra_flags;
#else
#error "Please fix <asm/byteorder.h>."
#endif
}
void loop_put_counter(void) { }
#endif /* CONFIG_PREEMPT_RT */