mirror of
https://github.com/openvswitch/ovs
synced 2025-09-05 08:45:23 +00:00
coverage: Synchronize per-thread counters less aggressively
When profiling CPU usage in situations involving high numbers of ports, coverage_clear() was highlighted as a commonly called function. It appears that it can be quite expensive to access all of the per-thread coverage counters when threads are constantly waking up. This patch makes each thread only do coverage_clear() logic roughly once per second by introducing per-thread timers. Upcall handler counters may become less accurate, as these threads may sleep without synchronising and not wake up for some time. When the main thread is under load at ~90% CPU, this drops to ~85%. Upcall handler threads sitting at ~2.5% drop to ~1.5%. Signed-off-by: Joe Stringer <joestringer@nicira.com> Signed-off-by: Ben Pfaff <blp@nicira.com>
This commit is contained in:
@@ -63,6 +63,7 @@ struct coverage_counter *coverage_counters[] = {
|
||||
|
||||
static struct ovs_mutex coverage_mutex = OVS_MUTEX_INITIALIZER;
|
||||
|
||||
DEFINE_STATIC_PER_THREAD_DATA(long long int, coverage_clear_time, LLONG_MIN);
|
||||
static long long int coverage_run_time = LLONG_MIN;
|
||||
|
||||
/* Index counter used to compute the moving average array's index. */
|
||||
@@ -258,17 +259,33 @@ coverage_read(struct svec *lines)
|
||||
free(totals);
|
||||
}
|
||||
|
||||
/* Runs approximately every COVERAGE_CLEAR_INTERVAL amount of time to
|
||||
* synchronize per-thread counters with global counters. Every thread maintains
|
||||
* a separate timer to ensure all counters are periodically aggregated. */
|
||||
void
|
||||
coverage_clear(void)
|
||||
{
|
||||
size_t i;
|
||||
long long int now, *thread_time;
|
||||
|
||||
ovs_mutex_lock(&coverage_mutex);
|
||||
for (i = 0; i < n_coverage_counters; i++) {
|
||||
struct coverage_counter *c = coverage_counters[i];
|
||||
c->total += c->count();
|
||||
now = time_msec();
|
||||
thread_time = coverage_clear_time_get();
|
||||
|
||||
/* Initialize the coverage_clear_time. */
|
||||
if (*thread_time == LLONG_MIN) {
|
||||
*thread_time = now + COVERAGE_CLEAR_INTERVAL;
|
||||
}
|
||||
|
||||
if (now >= *thread_time) {
|
||||
size_t i;
|
||||
|
||||
ovs_mutex_lock(&coverage_mutex);
|
||||
for (i = 0; i < n_coverage_counters; i++) {
|
||||
struct coverage_counter *c = coverage_counters[i];
|
||||
c->total += c->count();
|
||||
}
|
||||
ovs_mutex_unlock(&coverage_mutex);
|
||||
*thread_time = now + COVERAGE_CLEAR_INTERVAL;
|
||||
}
|
||||
ovs_mutex_unlock(&coverage_mutex);
|
||||
}
|
||||
|
||||
/* Runs approximately every COVERAGE_RUN_INTERVAL amount of time to update the
|
||||
|
Reference in New Issue
Block a user