2009-07-08 13:19:16 -07:00
|
|
|
/*
|
2019-03-26 09:58:20 -07:00
|
|
|
* Copyright (c) 2008, 2009, 2010, 2012, 2013, 2015, 2019 Nicira, Inc.
|
2009-07-08 13:19:16 -07:00
|
|
|
*
|
2009-06-15 15:11:30 -07:00
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at:
|
2009-07-08 13:19:16 -07:00
|
|
|
*
|
2009-06-15 15:11:30 -07:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
2009-07-08 13:19:16 -07:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <config.h>
|
2016-07-12 16:37:34 -05:00
|
|
|
#include "openvswitch/hmap.h"
|
2009-07-08 13:19:16 -07:00
|
|
|
#include <stdint.h>
|
2010-07-19 11:22:10 -07:00
|
|
|
#include <string.h>
|
2009-07-08 13:19:16 -07:00
|
|
|
#include "coverage.h"
|
2010-08-11 10:24:40 -07:00
|
|
|
#include "random.h"
|
2009-07-08 13:19:16 -07:00
|
|
|
#include "util.h"
|
2014-12-15 14:10:38 +01:00
|
|
|
#include "openvswitch/vlog.h"
|
2013-09-24 14:42:35 -07:00
|
|
|
|
|
|
|
VLOG_DEFINE_THIS_MODULE(hmap);
|
2009-07-08 13:19:16 -07:00
|
|
|
|
coverage: Make the coverage counters catalog program-specific.
Until now, the collection of coverage counters supported by a given OVS
program was not specific to that program. That means that, for example,
even though ovs-dpctl does not have anything to do with mac_learning, it
still has a coverage counter for it. This is confusing, at best.
This commit fixes the problem on some systems, in particular on ones that
use GCC and the GNU linker. It uses the feature of the GNU linker
described in its manual as:
If an orphaned section's name is representable as a C identifier then
the linker will automatically see PROVIDE two symbols: __start_SECNAME
and __end_SECNAME, where SECNAME is the name of the section. These
indicate the start address and end address of the orphaned section
respectively.
Systems that don't support these features retain the earlier behavior.
This commit also fixes the annoyance that files that include coverage
counters must be listed on COVERAGE_FILES in lib/automake.mk.
This commit also fixes the annoyance that modifying any source file that
includes a coverage counter caused all programs that link against
libopenvswitch.a to relink, even programs that the source file was not
linked into. For example, modifying ofproto/ofproto.c (which includes
coverage counters) caused tests/test-aes128 to relink, even though
test-aes128 does not link again ofproto.o.
2010-11-01 14:14:27 -07:00
|
|
|
COVERAGE_DEFINE(hmap_pathological);
|
|
|
|
COVERAGE_DEFINE(hmap_expand);
|
|
|
|
COVERAGE_DEFINE(hmap_shrink);
|
|
|
|
COVERAGE_DEFINE(hmap_reserve);
|
|
|
|
|
2009-07-08 13:19:16 -07:00
|
|
|
/* Initializes 'hmap' as an empty hash table. */
|
|
|
|
void
|
|
|
|
hmap_init(struct hmap *hmap)
|
|
|
|
{
|
|
|
|
hmap->buckets = &hmap->one;
|
|
|
|
hmap->one = NULL;
|
|
|
|
hmap->mask = 0;
|
|
|
|
hmap->n = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Frees memory reserved by 'hmap'. It is the client's responsibility to free
|
|
|
|
* the nodes themselves, if necessary. */
|
|
|
|
void
|
|
|
|
hmap_destroy(struct hmap *hmap)
|
|
|
|
{
|
|
|
|
if (hmap && hmap->buckets != &hmap->one) {
|
|
|
|
free(hmap->buckets);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-07-19 11:22:10 -07:00
|
|
|
/* Removes all node from 'hmap', leaving it ready to accept more nodes. Does
|
|
|
|
* not free memory allocated for 'hmap'.
|
|
|
|
*
|
|
|
|
* This function is appropriate when 'hmap' will soon have about as many
|
2015-07-12 21:28:19 -07:00
|
|
|
* elements as it did before. If 'hmap' will likely have fewer elements than
|
|
|
|
* before, use hmap_destroy() followed by hmap_init() to save memory and
|
2010-07-19 11:22:10 -07:00
|
|
|
* iteration time. */
|
|
|
|
void
|
|
|
|
hmap_clear(struct hmap *hmap)
|
|
|
|
{
|
|
|
|
if (hmap->n > 0) {
|
|
|
|
hmap->n = 0;
|
|
|
|
memset(hmap->buckets, 0, (hmap->mask + 1) * sizeof *hmap->buckets);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-07-08 13:19:16 -07:00
|
|
|
/* Exchanges hash maps 'a' and 'b'. */
|
|
|
|
void
|
|
|
|
hmap_swap(struct hmap *a, struct hmap *b)
|
|
|
|
{
|
|
|
|
struct hmap tmp = *a;
|
|
|
|
*a = *b;
|
|
|
|
*b = tmp;
|
2010-01-28 14:21:31 -08:00
|
|
|
hmap_moved(a);
|
|
|
|
hmap_moved(b);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Adjusts 'hmap' to compensate for having moved position in memory (e.g. due
|
|
|
|
* to realloc()). */
|
|
|
|
void
|
|
|
|
hmap_moved(struct hmap *hmap)
|
|
|
|
{
|
|
|
|
if (!hmap->mask) {
|
|
|
|
hmap->buckets = &hmap->one;
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2013-09-24 14:42:35 -07:00
|
|
|
resize(struct hmap *hmap, size_t new_mask, const char *where)
|
2009-07-08 13:19:16 -07:00
|
|
|
{
|
|
|
|
struct hmap tmp;
|
|
|
|
size_t i;
|
|
|
|
|
2013-06-18 09:55:02 -07:00
|
|
|
ovs_assert(is_pow2(new_mask + 1));
|
2009-07-08 13:19:16 -07:00
|
|
|
|
|
|
|
hmap_init(&tmp);
|
|
|
|
if (new_mask) {
|
|
|
|
tmp.buckets = xmalloc(sizeof *tmp.buckets * (new_mask + 1));
|
|
|
|
tmp.mask = new_mask;
|
|
|
|
for (i = 0; i <= tmp.mask; i++) {
|
|
|
|
tmp.buckets[i] = NULL;
|
|
|
|
}
|
|
|
|
}
|
2019-03-26 09:58:20 -07:00
|
|
|
int n_big_buckets = 0;
|
|
|
|
int biggest_count = 0;
|
|
|
|
int n_biggest_buckets = 0;
|
2009-07-08 13:19:16 -07:00
|
|
|
for (i = 0; i <= hmap->mask; i++) {
|
|
|
|
struct hmap_node *node, *next;
|
|
|
|
int count = 0;
|
|
|
|
for (node = hmap->buckets[i]; node; node = next) {
|
|
|
|
next = node->next;
|
|
|
|
hmap_insert_fast(&tmp, node, node->hash);
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
if (count > 5) {
|
2019-03-26 09:58:20 -07:00
|
|
|
n_big_buckets++;
|
|
|
|
if (count > biggest_count) {
|
|
|
|
biggest_count = count;
|
|
|
|
n_biggest_buckets = 1;
|
|
|
|
} else if (count == biggest_count) {
|
|
|
|
n_biggest_buckets++;
|
|
|
|
}
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
hmap_swap(hmap, &tmp);
|
|
|
|
hmap_destroy(&tmp);
|
2019-03-26 09:58:20 -07:00
|
|
|
|
|
|
|
if (n_big_buckets) {
|
|
|
|
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
|
|
|
|
COVERAGE_INC(hmap_pathological);
|
|
|
|
VLOG_DBG_RL(&rl, "%s: %d bucket%s with 6+ nodes, "
|
|
|
|
"including %d bucket%s with %d nodes "
|
|
|
|
"(%"PRIuSIZE" nodes total across %"PRIuSIZE" buckets)",
|
|
|
|
where,
|
|
|
|
n_big_buckets, n_big_buckets > 1 ? "s" : "",
|
|
|
|
n_biggest_buckets, n_biggest_buckets > 1 ? "s" : "",
|
|
|
|
biggest_count,
|
|
|
|
hmap->n, hmap->mask + 1);
|
|
|
|
}
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static size_t
|
|
|
|
calc_mask(size_t capacity)
|
|
|
|
{
|
|
|
|
size_t mask = capacity / 2;
|
|
|
|
mask |= mask >> 1;
|
|
|
|
mask |= mask >> 2;
|
|
|
|
mask |= mask >> 4;
|
|
|
|
mask |= mask >> 8;
|
|
|
|
mask |= mask >> 16;
|
|
|
|
#if SIZE_MAX > UINT32_MAX
|
|
|
|
mask |= mask >> 32;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* If we need to dynamically allocate buckets we might as well allocate at
|
|
|
|
* least 4 of them. */
|
|
|
|
mask |= (mask & 1) << 1;
|
|
|
|
|
|
|
|
return mask;
|
|
|
|
}
|
|
|
|
|
2013-09-24 14:42:35 -07:00
|
|
|
/* Expands 'hmap', if necessary, to optimize the performance of searches.
|
|
|
|
*
|
|
|
|
* ('where' is used in debug logging. Commonly one would use hmap_expand() to
|
|
|
|
* automatically provide the caller's source file and line number for
|
|
|
|
* 'where'.) */
|
2009-07-08 13:19:16 -07:00
|
|
|
void
|
2013-09-24 14:42:35 -07:00
|
|
|
hmap_expand_at(struct hmap *hmap, const char *where)
|
2009-07-08 13:19:16 -07:00
|
|
|
{
|
|
|
|
size_t new_mask = calc_mask(hmap->n);
|
|
|
|
if (new_mask > hmap->mask) {
|
|
|
|
COVERAGE_INC(hmap_expand);
|
2013-09-24 14:42:35 -07:00
|
|
|
resize(hmap, new_mask, where);
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-09-24 14:42:35 -07:00
|
|
|
/* Shrinks 'hmap', if necessary, to optimize the performance of iteration.
|
|
|
|
*
|
|
|
|
* ('where' is used in debug logging. Commonly one would use hmap_shrink() to
|
|
|
|
* automatically provide the caller's source file and line number for
|
|
|
|
* 'where'.) */
|
2009-07-08 13:19:16 -07:00
|
|
|
void
|
2013-09-24 14:42:35 -07:00
|
|
|
hmap_shrink_at(struct hmap *hmap, const char *where)
|
2009-07-08 13:19:16 -07:00
|
|
|
{
|
|
|
|
size_t new_mask = calc_mask(hmap->n);
|
|
|
|
if (new_mask < hmap->mask) {
|
|
|
|
COVERAGE_INC(hmap_shrink);
|
2013-09-24 14:42:35 -07:00
|
|
|
resize(hmap, new_mask, where);
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Expands 'hmap', if necessary, to optimize the performance of searches when
|
|
|
|
* it has up to 'n' elements. (But iteration will be slow in a hash map whose
|
2013-09-24 14:42:35 -07:00
|
|
|
* allocated capacity is much higher than its current number of nodes.)
|
|
|
|
*
|
|
|
|
* ('where' is used in debug logging. Commonly one would use hmap_reserve() to
|
|
|
|
* automatically provide the caller's source file and line number for
|
|
|
|
* 'where'.) */
|
2009-07-08 13:19:16 -07:00
|
|
|
void
|
2013-09-24 14:42:35 -07:00
|
|
|
hmap_reserve_at(struct hmap *hmap, size_t n, const char *where)
|
2009-07-08 13:19:16 -07:00
|
|
|
{
|
|
|
|
size_t new_mask = calc_mask(n);
|
|
|
|
if (new_mask > hmap->mask) {
|
|
|
|
COVERAGE_INC(hmap_reserve);
|
2013-09-24 14:42:35 -07:00
|
|
|
resize(hmap, new_mask, where);
|
2009-07-08 13:19:16 -07:00
|
|
|
}
|
|
|
|
}
|
2010-01-28 14:12:56 -08:00
|
|
|
|
|
|
|
/* Adjusts 'hmap' to compensate for 'old_node' having moved position in memory
|
|
|
|
* to 'node' (e.g. due to realloc()). */
|
|
|
|
void
|
|
|
|
hmap_node_moved(struct hmap *hmap,
|
|
|
|
struct hmap_node *old_node, struct hmap_node *node)
|
|
|
|
{
|
|
|
|
struct hmap_node **bucket = &hmap->buckets[node->hash & hmap->mask];
|
|
|
|
while (*bucket != old_node) {
|
|
|
|
bucket = &(*bucket)->next;
|
|
|
|
}
|
|
|
|
*bucket = node;
|
|
|
|
}
|
|
|
|
|
2010-08-11 10:24:40 -07:00
|
|
|
/* Chooses and returns a randomly selected node from 'hmap', which must not be
|
|
|
|
* empty.
|
|
|
|
*
|
|
|
|
* I wouldn't depend on this algorithm to be fair, since I haven't analyzed it.
|
|
|
|
* But it does at least ensure that any node in 'hmap' can be chosen. */
|
|
|
|
struct hmap_node *
|
|
|
|
hmap_random_node(const struct hmap *hmap)
|
|
|
|
{
|
|
|
|
struct hmap_node *bucket, *node;
|
|
|
|
size_t n, i;
|
|
|
|
|
|
|
|
/* Choose a random non-empty bucket. */
|
2014-04-22 13:34:36 +09:00
|
|
|
for (;;) {
|
|
|
|
bucket = hmap->buckets[random_uint32() & hmap->mask];
|
2010-08-11 10:24:40 -07:00
|
|
|
if (bucket) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Count nodes in bucket. */
|
|
|
|
n = 0;
|
|
|
|
for (node = bucket; node; node = node->next) {
|
|
|
|
n++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Choose random node from bucket. */
|
|
|
|
i = random_range(n);
|
|
|
|
for (node = bucket; i-- > 0; node = node->next) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
return node;
|
|
|
|
}
|
2010-12-28 10:05:05 -08:00
|
|
|
|
|
|
|
/* Returns the next node in 'hmap' in hash order, or NULL if no nodes remain in
|
2016-04-01 18:31:22 -07:00
|
|
|
* 'hmap'. Uses '*pos' to determine where to begin iteration, and updates
|
|
|
|
* '*pos' to pass on the next iteration into them before returning.
|
2010-12-28 10:05:05 -08:00
|
|
|
*
|
|
|
|
* It's better to use plain HMAP_FOR_EACH and related functions, since they are
|
|
|
|
* faster and better at dealing with hmaps that change during iteration.
|
|
|
|
*
|
2016-04-01 18:31:22 -07:00
|
|
|
* Before beginning iteration, set '*pos' to all zeros. */
|
2010-12-28 10:05:05 -08:00
|
|
|
struct hmap_node *
|
|
|
|
hmap_at_position(const struct hmap *hmap,
|
2016-04-01 18:31:22 -07:00
|
|
|
struct hmap_position *pos)
|
2010-12-28 10:05:05 -08:00
|
|
|
{
|
|
|
|
size_t offset;
|
|
|
|
size_t b_idx;
|
|
|
|
|
2016-04-01 18:31:22 -07:00
|
|
|
offset = pos->offset;
|
|
|
|
for (b_idx = pos->bucket; b_idx <= hmap->mask; b_idx++) {
|
2010-12-28 10:05:05 -08:00
|
|
|
struct hmap_node *node;
|
|
|
|
size_t n_idx;
|
|
|
|
|
|
|
|
for (n_idx = 0, node = hmap->buckets[b_idx]; node != NULL;
|
|
|
|
n_idx++, node = node->next) {
|
|
|
|
if (n_idx == offset) {
|
|
|
|
if (node->next) {
|
2016-04-01 18:31:22 -07:00
|
|
|
pos->bucket = node->hash & hmap->mask;
|
|
|
|
pos->offset = offset + 1;
|
2010-12-28 10:05:05 -08:00
|
|
|
} else {
|
2016-04-01 18:31:22 -07:00
|
|
|
pos->bucket = (node->hash & hmap->mask) + 1;
|
|
|
|
pos->offset = 0;
|
2010-12-28 10:05:05 -08:00
|
|
|
}
|
|
|
|
return node;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
offset = 0;
|
|
|
|
}
|
|
|
|
|
2016-04-01 18:31:22 -07:00
|
|
|
pos->bucket = 0;
|
|
|
|
pos->offset = 0;
|
2010-12-28 10:05:05 -08:00
|
|
|
return NULL;
|
|
|
|
}
|
2012-03-20 15:00:46 -07:00
|
|
|
|
|
|
|
/* Returns true if 'node' is in 'hmap', false otherwise. */
|
|
|
|
bool
|
|
|
|
hmap_contains(const struct hmap *hmap, const struct hmap_node *node)
|
|
|
|
{
|
|
|
|
struct hmap_node *p;
|
|
|
|
|
|
|
|
for (p = hmap_first_in_bucket(hmap, node->hash); p; p = p->next) {
|
|
|
|
if (p == node) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|