2014-09-03 23:28:14 -07:00
|
|
|
/*
|
|
|
|
* Copyright (C) Internet Systems Consortium, Inc. ("ISC")
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: MPL-2.0
|
2021-06-03 08:37:05 +02:00
|
|
|
*
|
2014-09-03 23:28:14 -07:00
|
|
|
* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
* file, you can obtain one at https://mozilla.org/MPL/2.0/.
|
2018-02-23 09:53:12 +01:00
|
|
|
*
|
2014-09-03 23:28:14 -07:00
|
|
|
* See the COPYRIGHT file distributed with this work for additional
|
2016-03-04 11:12:23 +05:30
|
|
|
* information regarding copyright ownership.
|
2014-09-03 23:28:14 -07:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*! \file */
|
|
|
|
|
2018-03-28 14:56:40 +02:00
|
|
|
#include <inttypes.h>
|
2018-04-17 08:29:14 -07:00
|
|
|
#include <stdbool.h>
|
2018-03-28 14:56:40 +02:00
|
|
|
|
2023-02-14 13:40:45 +01:00
|
|
|
#include <isc/atomic.h>
|
2014-09-03 23:28:14 -07:00
|
|
|
#include <isc/buffer.h>
|
|
|
|
#include <isc/hash.h>
|
|
|
|
#include <isc/log.h>
|
|
|
|
#include <isc/mem.h>
|
|
|
|
#include <isc/mutex.h>
|
2020-02-17 10:37:39 +01:00
|
|
|
#include <isc/rwlock.h>
|
2014-09-03 23:28:14 -07:00
|
|
|
#include <isc/string.h>
|
|
|
|
#include <isc/time.h>
|
|
|
|
#include <isc/util.h>
|
|
|
|
|
|
|
|
#include <dns/badcache.h>
|
2021-05-21 17:10:59 -07:00
|
|
|
#include <dns/fixedname.h>
|
2014-09-03 23:28:14 -07:00
|
|
|
#include <dns/name.h>
|
|
|
|
#include <dns/rdatatype.h>
|
|
|
|
#include <dns/types.h>
|
|
|
|
|
|
|
|
typedef struct dns_bcentry dns_bcentry_t;
|
|
|
|
|
|
|
|
struct dns_badcache {
|
|
|
|
unsigned int magic;
|
2020-02-17 10:37:39 +01:00
|
|
|
isc_rwlock_t lock;
|
2014-09-03 23:28:14 -07:00
|
|
|
isc_mem_t *mctx;
|
2020-02-12 13:59:18 +01:00
|
|
|
|
2020-02-17 10:37:39 +01:00
|
|
|
isc_mutex_t *tlocks;
|
2014-09-03 23:28:14 -07:00
|
|
|
dns_bcentry_t **table;
|
2020-02-17 10:37:39 +01:00
|
|
|
|
|
|
|
atomic_uint_fast32_t count;
|
|
|
|
atomic_uint_fast32_t sweep;
|
|
|
|
|
2014-09-03 23:28:14 -07:00
|
|
|
unsigned int minsize;
|
|
|
|
unsigned int size;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define BADCACHE_MAGIC ISC_MAGIC('B', 'd', 'C', 'a')
|
|
|
|
#define VALID_BADCACHE(m) ISC_MAGIC_VALID(m, BADCACHE_MAGIC)
|
|
|
|
|
|
|
|
struct dns_bcentry {
|
|
|
|
dns_bcentry_t *next;
|
|
|
|
dns_rdatatype_t type;
|
|
|
|
isc_time_t expire;
|
2018-03-28 14:19:37 +02:00
|
|
|
uint32_t flags;
|
2014-09-03 23:28:14 -07:00
|
|
|
unsigned int hashval;
|
2021-05-21 17:10:59 -07:00
|
|
|
dns_fixedname_t fname;
|
|
|
|
dns_name_t *name;
|
2014-09-03 23:28:14 -07:00
|
|
|
};
|
|
|
|
|
2020-02-17 10:37:39 +01:00
|
|
|
static void
|
|
|
|
badcache_resize(dns_badcache_t *bc, isc_time_t *now);
|
2014-09-03 23:28:14 -07:00
|
|
|
|
|
|
|
isc_result_t
|
|
|
|
dns_badcache_init(isc_mem_t *mctx, unsigned int size, dns_badcache_t **bcp) {
|
|
|
|
dns_badcache_t *bc = NULL;
|
2020-02-17 10:37:39 +01:00
|
|
|
unsigned int i;
|
2014-09-03 23:28:14 -07:00
|
|
|
|
|
|
|
REQUIRE(bcp != NULL && *bcp == NULL);
|
|
|
|
REQUIRE(mctx != NULL);
|
|
|
|
|
2022-08-26 11:58:51 +02:00
|
|
|
bc = isc_mem_get(mctx, sizeof(*bc));
|
|
|
|
|
|
|
|
*bc = (dns_badcache_t){
|
|
|
|
.size = size,
|
|
|
|
.minsize = size,
|
|
|
|
};
|
2014-09-03 23:28:14 -07:00
|
|
|
|
|
|
|
isc_mem_attach(mctx, &bc->mctx);
|
Add the reader-writer synchronization with modified C-RW-WP
This changes the internal isc_rwlock implementation to:
Irina Calciu, Dave Dice, Yossi Lev, Victor Luchangco, Virendra
J. Marathe, and Nir Shavit. 2013. NUMA-aware reader-writer locks.
SIGPLAN Not. 48, 8 (August 2013), 157–166.
DOI:https://doi.org/10.1145/2517327.24425
(The full article available from:
http://mcg.cs.tau.ac.il/papers/ppopp2013-rwlocks.pdf)
The implementation is based on the The Writer-Preference Lock (C-RW-WP)
variant (see the 3.4 section of the paper for the rationale).
The implemented algorithm has been modified for simplicity and for usage
patterns in rbtdb.c.
The changes compared to the original algorithm:
* We haven't implemented the cohort locks because that would require a
knowledge of NUMA nodes, instead a simple atomic_bool is used as
synchronization point for writer lock.
* The per-thread reader counters are not being used - this would
require the internal thread id (isc_tid_v) to be always initialized,
even in the utilities; the change has a slight performance penalty,
so we might revisit this change in the future. However, this change
also saves a lot of memory, because cache-line aligned counters were
used, so on 32-core machine, the rwlock would be 4096+ bytes big.
* The readers use a writer_barrier that will raise after a while when
readers lock can't be acquired to prevent readers starvation.
* Separate ingress and egress readers counters queues to reduce both
inter and intra-thread contention.
2021-03-24 17:52:56 +01:00
|
|
|
isc_rwlock_init(&bc->lock);
|
2014-09-03 23:28:14 -07:00
|
|
|
|
2022-08-26 11:58:51 +02:00
|
|
|
bc->table = isc_mem_getx(bc->mctx, sizeof(bc->table[0]) * size,
|
|
|
|
ISC_MEM_ZERO);
|
|
|
|
bc->tlocks = isc_mem_getx(bc->mctx, sizeof(bc->tlocks[0]) * size,
|
|
|
|
ISC_MEM_ZERO);
|
2020-02-17 10:37:39 +01:00
|
|
|
for (i = 0; i < size; i++) {
|
|
|
|
isc_mutex_init(&bc->tlocks[i]);
|
|
|
|
}
|
2014-09-03 23:28:14 -07:00
|
|
|
|
|
|
|
bc->magic = BADCACHE_MAGIC;
|
|
|
|
|
|
|
|
*bcp = bc;
|
|
|
|
return (ISC_R_SUCCESS);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
dns_badcache_destroy(dns_badcache_t **bcp) {
|
|
|
|
dns_badcache_t *bc;
|
2020-02-17 10:37:39 +01:00
|
|
|
unsigned int i;
|
2014-09-03 23:28:14 -07:00
|
|
|
|
|
|
|
REQUIRE(bcp != NULL && *bcp != NULL);
|
|
|
|
bc = *bcp;
|
2020-02-08 04:37:54 -08:00
|
|
|
*bcp = NULL;
|
2014-09-03 23:28:14 -07:00
|
|
|
|
|
|
|
dns_badcache_flush(bc);
|
|
|
|
|
|
|
|
bc->magic = 0;
|
2020-02-17 10:37:39 +01:00
|
|
|
isc_rwlock_destroy(&bc->lock);
|
|
|
|
for (i = 0; i < bc->size; i++) {
|
|
|
|
isc_mutex_destroy(&bc->tlocks[i]);
|
|
|
|
}
|
2022-08-26 11:58:51 +02:00
|
|
|
isc_mem_put(bc->mctx, bc->table, sizeof(bc->table[0]) * bc->size);
|
|
|
|
isc_mem_put(bc->mctx, bc->tlocks, sizeof(bc->tlocks[0]) * bc->size);
|
2014-09-03 23:28:14 -07:00
|
|
|
isc_mem_putanddetach(&bc->mctx, bc, sizeof(dns_badcache_t));
|
|
|
|
}
|
|
|
|
|
2020-02-17 10:37:39 +01:00
|
|
|
static void
|
|
|
|
badcache_resize(dns_badcache_t *bc, isc_time_t *now) {
|
2015-11-18 14:06:51 +11:00
|
|
|
dns_bcentry_t **newtable, *bad, *next;
|
2020-02-17 10:37:39 +01:00
|
|
|
isc_mutex_t *newlocks;
|
2014-09-03 23:28:14 -07:00
|
|
|
unsigned int newsize, i;
|
2020-02-17 10:37:39 +01:00
|
|
|
bool grow;
|
|
|
|
|
|
|
|
RWLOCK(&bc->lock, isc_rwlocktype_write);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* XXXWPK we will have a thundering herd problem here,
|
|
|
|
* as all threads will wait on the RWLOCK when there's
|
|
|
|
* a need to resize badcache.
|
|
|
|
* However, it happens so rarely it should not be a
|
|
|
|
* performance issue. This is because we double the
|
|
|
|
* size every time we grow it, and we don't shrink
|
|
|
|
* unless the number of entries really shrunk. In a
|
|
|
|
* high load situation, the number of badcache entries
|
|
|
|
* will eventually stabilize.
|
|
|
|
*/
|
|
|
|
if (atomic_load_relaxed(&bc->count) > bc->size * 8) {
|
|
|
|
grow = true;
|
|
|
|
} else if (atomic_load_relaxed(&bc->count) < bc->size * 2 &&
|
|
|
|
bc->size > bc->minsize)
|
|
|
|
{
|
|
|
|
grow = false;
|
|
|
|
} else {
|
|
|
|
/* Someone resized it already, bail. */
|
|
|
|
RWUNLOCK(&bc->lock, isc_rwlocktype_write);
|
|
|
|
return;
|
|
|
|
}
|
2014-09-03 23:28:14 -07:00
|
|
|
|
|
|
|
if (grow) {
|
|
|
|
newsize = bc->size * 2 + 1;
|
|
|
|
} else {
|
|
|
|
newsize = (bc->size - 1) / 2;
|
2020-03-02 14:12:05 +01:00
|
|
|
#ifdef __clang_analyzer__
|
|
|
|
/*
|
|
|
|
* XXXWPK there's a bug in clang static analyzer -
|
|
|
|
* `value % newsize` is considered undefined even though
|
|
|
|
* we check if newsize is larger than 0. This helps.
|
|
|
|
*/
|
|
|
|
newsize += 1;
|
|
|
|
#endif
|
2020-02-13 21:48:23 +01:00
|
|
|
}
|
2020-02-17 10:37:39 +01:00
|
|
|
RUNTIME_CHECK(newsize > 0);
|
2014-09-03 23:28:14 -07:00
|
|
|
|
2022-06-03 12:36:24 +02:00
|
|
|
newtable = isc_mem_getx(bc->mctx, sizeof(dns_bcentry_t *) * newsize,
|
|
|
|
ISC_MEM_ZERO);
|
2014-09-03 23:28:14 -07:00
|
|
|
|
2020-02-17 10:37:39 +01:00
|
|
|
newlocks = isc_mem_get(bc->mctx, sizeof(isc_mutex_t) * newsize);
|
|
|
|
|
|
|
|
/* Copy existing mutexes */
|
|
|
|
for (i = 0; i < newsize && i < bc->size; i++) {
|
|
|
|
newlocks[i] = bc->tlocks[i];
|
|
|
|
}
|
|
|
|
/* Initialize additional mutexes if we're growing */
|
|
|
|
for (i = bc->size; i < newsize; i++) {
|
|
|
|
isc_mutex_init(&newlocks[i]);
|
|
|
|
}
|
|
|
|
/* Destroy extra mutexes if we're shrinking */
|
|
|
|
for (i = newsize; i < bc->size; i++) {
|
|
|
|
isc_mutex_destroy(&bc->tlocks[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; atomic_load_relaxed(&bc->count) > 0 && i < bc->size; i++) {
|
2014-09-03 23:28:14 -07:00
|
|
|
for (bad = bc->table[i]; bad != NULL; bad = next) {
|
|
|
|
next = bad->next;
|
|
|
|
if (isc_time_compare(&bad->expire, now) < 0) {
|
2021-05-21 17:10:59 -07:00
|
|
|
isc_mem_put(bc->mctx, bad, sizeof(*bad));
|
2020-02-17 10:37:39 +01:00
|
|
|
atomic_fetch_sub_relaxed(&bc->count, 1);
|
2014-09-03 23:28:14 -07:00
|
|
|
} else {
|
2015-11-18 14:06:51 +11:00
|
|
|
bad->next = newtable[bad->hashval % newsize];
|
|
|
|
newtable[bad->hashval % newsize] = bad;
|
2014-09-03 23:28:14 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
bc->table[i] = NULL;
|
|
|
|
}
|
|
|
|
|
2020-02-17 10:37:39 +01:00
|
|
|
isc_mem_put(bc->mctx, bc->tlocks, sizeof(isc_mutex_t) * bc->size);
|
|
|
|
bc->tlocks = newlocks;
|
|
|
|
|
2014-09-03 23:28:14 -07:00
|
|
|
isc_mem_put(bc->mctx, bc->table, sizeof(*bc->table) * bc->size);
|
|
|
|
bc->size = newsize;
|
2015-11-18 14:06:51 +11:00
|
|
|
bc->table = newtable;
|
2014-09-03 23:28:14 -07:00
|
|
|
|
2020-02-17 10:37:39 +01:00
|
|
|
RWUNLOCK(&bc->lock, isc_rwlocktype_write);
|
2014-09-03 23:28:14 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-12-30 15:45:08 +11:00
|
|
|
dns_badcache_add(dns_badcache_t *bc, const dns_name_t *name,
|
2018-04-17 08:29:14 -07:00
|
|
|
dns_rdatatype_t type, bool update, uint32_t flags,
|
2018-03-28 14:19:37 +02:00
|
|
|
isc_time_t *expire) {
|
2020-02-17 10:37:39 +01:00
|
|
|
unsigned int hashval, hash;
|
2014-09-03 23:28:14 -07:00
|
|
|
dns_bcentry_t *bad, *prev, *next;
|
|
|
|
isc_time_t now;
|
2020-02-17 10:37:39 +01:00
|
|
|
bool resize = false;
|
2014-09-03 23:28:14 -07:00
|
|
|
|
|
|
|
REQUIRE(VALID_BADCACHE(bc));
|
|
|
|
REQUIRE(name != NULL);
|
|
|
|
REQUIRE(expire != NULL);
|
|
|
|
|
2020-02-17 10:37:39 +01:00
|
|
|
RWLOCK(&bc->lock, isc_rwlocktype_read);
|
2014-09-03 23:28:14 -07:00
|
|
|
|
2023-03-31 00:12:33 +02:00
|
|
|
now = isc_time_now();
|
2014-09-03 23:28:14 -07:00
|
|
|
|
2023-03-30 21:37:12 +02:00
|
|
|
hashval = dns_name_hash(name);
|
2020-02-17 10:37:39 +01:00
|
|
|
hash = hashval % bc->size;
|
|
|
|
LOCK(&bc->tlocks[hash]);
|
2014-09-03 23:28:14 -07:00
|
|
|
prev = NULL;
|
2020-02-17 10:37:39 +01:00
|
|
|
for (bad = bc->table[hash]; bad != NULL; bad = next) {
|
2014-09-03 23:28:14 -07:00
|
|
|
next = bad->next;
|
2021-05-21 17:10:59 -07:00
|
|
|
if (bad->type == type && dns_name_equal(name, bad->name)) {
|
2014-09-03 23:28:14 -07:00
|
|
|
if (update) {
|
|
|
|
bad->expire = *expire;
|
|
|
|
bad->flags = flags;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (isc_time_compare(&bad->expire, &now) < 0) {
|
|
|
|
if (prev == NULL) {
|
2020-02-17 10:37:39 +01:00
|
|
|
bc->table[hash] = bad->next;
|
2014-09-03 23:28:14 -07:00
|
|
|
} else {
|
|
|
|
prev->next = bad->next;
|
2020-02-13 21:48:23 +01:00
|
|
|
}
|
2021-05-21 17:10:59 -07:00
|
|
|
isc_mem_put(bc->mctx, bad, sizeof(*bad));
|
2020-02-17 10:37:39 +01:00
|
|
|
atomic_fetch_sub_relaxed(&bc->count, 1);
|
2014-09-03 23:28:14 -07:00
|
|
|
} else {
|
|
|
|
prev = bad;
|
2020-02-13 21:48:23 +01:00
|
|
|
}
|
2014-09-03 23:28:14 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (bad == NULL) {
|
2022-09-07 17:22:47 +02:00
|
|
|
unsigned int count;
|
2014-09-03 23:28:14 -07:00
|
|
|
isc_buffer_t buffer;
|
2021-05-21 17:10:59 -07:00
|
|
|
|
|
|
|
bad = isc_mem_get(bc->mctx, sizeof(*bad));
|
|
|
|
*bad = (dns_bcentry_t){ .type = type,
|
|
|
|
.hashval = hashval,
|
|
|
|
.expire = *expire,
|
|
|
|
.flags = flags,
|
|
|
|
.next = bc->table[hash] };
|
|
|
|
|
2014-09-03 23:28:14 -07:00
|
|
|
isc_buffer_init(&buffer, bad + 1, name->length);
|
2021-05-21 17:10:59 -07:00
|
|
|
bad->name = dns_fixedname_initname(&bad->fname);
|
2021-05-21 17:20:44 -07:00
|
|
|
dns_name_copy(name, bad->name);
|
2020-02-17 10:37:39 +01:00
|
|
|
bc->table[hash] = bad;
|
2021-05-21 17:10:59 -07:00
|
|
|
|
|
|
|
count = atomic_fetch_add_relaxed(&bc->count, 1);
|
2020-02-17 10:37:39 +01:00
|
|
|
if ((count > bc->size * 8) ||
|
2022-11-02 19:33:14 +01:00
|
|
|
(count < bc->size * 2 && bc->size > bc->minsize))
|
|
|
|
{
|
2020-02-17 10:37:39 +01:00
|
|
|
resize = true;
|
2020-02-13 21:48:23 +01:00
|
|
|
}
|
2014-09-03 23:28:14 -07:00
|
|
|
} else {
|
|
|
|
bad->expire = *expire;
|
2020-02-13 21:48:23 +01:00
|
|
|
}
|
2014-09-03 23:28:14 -07:00
|
|
|
|
2020-02-17 10:37:39 +01:00
|
|
|
UNLOCK(&bc->tlocks[hash]);
|
|
|
|
RWUNLOCK(&bc->lock, isc_rwlocktype_read);
|
|
|
|
if (resize) {
|
|
|
|
badcache_resize(bc, &now);
|
|
|
|
}
|
2014-09-03 23:28:14 -07:00
|
|
|
}
|
|
|
|
|
2018-04-17 08:29:14 -07:00
|
|
|
bool
|
2016-12-30 15:45:08 +11:00
|
|
|
dns_badcache_find(dns_badcache_t *bc, const dns_name_t *name,
|
2018-03-28 14:19:37 +02:00
|
|
|
dns_rdatatype_t type, uint32_t *flagp, isc_time_t *now) {
|
2014-09-03 23:28:14 -07:00
|
|
|
dns_bcentry_t *bad, *prev, *next;
|
2018-04-17 08:29:14 -07:00
|
|
|
bool answer = false;
|
Fix the rbt hashtable and grow it when setting max-cache-size
There were several problems with rbt hashtable implementation:
1. Our internal hashing function returns uint64_t value, but it was
silently truncated to unsigned int in dns_name_hash() and
dns_name_fullhash() functions. As the SipHash 2-4 higher bits are
more random, we need to use the upper half of the return value.
2. The hashtable implementation in rbt.c was using modulo to pick the
slot number for the hash table. This has several problems because
modulo is: a) slow, b) oblivious to patterns in the input data. This
could lead to very uneven distribution of the hashed data in the
hashtable. Combined with the single-linked lists we use, it could
really hog-down the lookup and removal of the nodes from the rbt
tree[a]. The Fibonacci Hashing is much better fit for the hashtable
function here. For longer description, read "Fibonacci Hashing: The
Optimization that the World Forgot"[b] or just look at the Linux
kernel. Also this will make Diego very happy :).
3. The hashtable would rehash every time the number of nodes in the rbt
tree would exceed 3 * (hashtable size). The overcommit will make the
uneven distribution in the hashtable even worse, but the main problem
lies in the rehashing - every time the database grows beyond the
limit, each subsequent rehashing will be much slower. The mitigation
here is letting the rbt know how big the cache can grown and
pre-allocate the hashtable to be big enough to actually never need to
rehash. This will consume more memory at the start, but since the
size of the hashtable is capped to `1 << 32` (e.g. 4 mio entries), it
will only consume maximum of 32GB of memory for hashtable in the
worst case (and max-cache-size would need to be set to more than
4TB). Calling the dns_db_adjusthashsize() will also cap the maximum
size of the hashtable to the pre-computed number of bits, so it won't
try to consume more gigabytes of memory than available for the
database.
FIXME: What is the average size of the rbt node that gets hashed? I
chose the pagesize (4k) as initial value to precompute the size of
the hashtable, but the value is based on feeling and not any real
data.
For future work, there are more places where we use result of the hash
value modulo some small number and that would benefit from Fibonacci
Hashing to get better distribution.
Notes:
a. A doubly linked list should be used here to speedup the removal of
the entries from the hashtable.
b. https://probablydance.com/2018/06/16/fibonacci-hashing-the-optimization-that-the-world-forgot-or-a-better-alternative-to-integer-modulo/
2020-07-16 10:29:54 +02:00
|
|
|
unsigned int i;
|
|
|
|
unsigned int hash;
|
2014-09-03 23:28:14 -07:00
|
|
|
|
|
|
|
REQUIRE(VALID_BADCACHE(bc));
|
|
|
|
REQUIRE(name != NULL);
|
|
|
|
REQUIRE(now != NULL);
|
|
|
|
|
2020-02-17 10:37:39 +01:00
|
|
|
RWLOCK(&bc->lock, isc_rwlocktype_read);
|
2014-09-03 23:28:14 -07:00
|
|
|
|
2016-03-04 11:12:23 +05:30
|
|
|
/*
|
|
|
|
* XXXMUKS: dns_name_equal() is expensive as it does a
|
|
|
|
* octet-by-octet comparison, and it can be made better in two
|
|
|
|
* ways here. First, lowercase the names (use
|
|
|
|
* dns_name_downcase() instead of dns_name_copy() in
|
|
|
|
* dns_badcache_add()) so that dns_name_caseequal() can be used
|
|
|
|
* which the compiler will emit as SIMD instructions. Second,
|
|
|
|
* don't put multiple copies of the same name in the chain (or
|
|
|
|
* multiple names will have to be matched for equality), but use
|
|
|
|
* name->link to store the type specific part.
|
|
|
|
*/
|
|
|
|
|
2020-02-17 10:37:39 +01:00
|
|
|
if (atomic_load_relaxed(&bc->count) == 0) {
|
2014-09-03 23:28:14 -07:00
|
|
|
goto skip;
|
2020-02-13 21:48:23 +01:00
|
|
|
}
|
2014-09-03 23:28:14 -07:00
|
|
|
|
2023-03-30 21:37:12 +02:00
|
|
|
hash = dns_name_hash(name) % bc->size;
|
2014-09-03 23:28:14 -07:00
|
|
|
prev = NULL;
|
2020-02-17 10:37:39 +01:00
|
|
|
LOCK(&bc->tlocks[hash]);
|
|
|
|
for (bad = bc->table[hash]; bad != NULL; bad = next) {
|
2014-09-03 23:28:14 -07:00
|
|
|
next = bad->next;
|
|
|
|
/*
|
|
|
|
* Search the hash list. Clean out expired records as we go.
|
|
|
|
*/
|
|
|
|
if (isc_time_compare(&bad->expire, now) < 0) {
|
|
|
|
if (prev != NULL) {
|
|
|
|
prev->next = bad->next;
|
|
|
|
} else {
|
2020-02-17 10:37:39 +01:00
|
|
|
bc->table[hash] = bad->next;
|
2020-02-13 21:48:23 +01:00
|
|
|
}
|
2014-09-03 23:28:14 -07:00
|
|
|
|
2021-05-21 17:10:59 -07:00
|
|
|
isc_mem_put(bc->mctx, bad, sizeof(*bad));
|
2020-02-17 10:37:39 +01:00
|
|
|
atomic_fetch_sub(&bc->count, 1);
|
2014-09-03 23:28:14 -07:00
|
|
|
continue;
|
|
|
|
}
|
2021-05-21 17:10:59 -07:00
|
|
|
if (bad->type == type && dns_name_equal(name, bad->name)) {
|
2014-09-03 23:28:14 -07:00
|
|
|
if (flagp != NULL) {
|
|
|
|
*flagp = bad->flags;
|
2020-02-13 21:48:23 +01:00
|
|
|
}
|
2018-04-17 08:29:14 -07:00
|
|
|
answer = true;
|
2014-09-03 23:28:14 -07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
prev = bad;
|
|
|
|
}
|
2020-02-17 10:37:39 +01:00
|
|
|
UNLOCK(&bc->tlocks[hash]);
|
2014-09-03 23:28:14 -07:00
|
|
|
skip:
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Slow sweep to clean out stale records.
|
|
|
|
*/
|
2020-02-17 10:37:39 +01:00
|
|
|
i = atomic_fetch_add(&bc->sweep, 1) % bc->size;
|
|
|
|
if (isc_mutex_trylock(&bc->tlocks[i]) == ISC_R_SUCCESS) {
|
|
|
|
bad = bc->table[i];
|
|
|
|
if (bad != NULL && isc_time_compare(&bad->expire, now) < 0) {
|
|
|
|
bc->table[i] = bad->next;
|
2021-05-21 17:10:59 -07:00
|
|
|
isc_mem_put(bc->mctx, bad, sizeof(*bad));
|
2020-02-17 10:37:39 +01:00
|
|
|
atomic_fetch_sub_relaxed(&bc->count, 1);
|
|
|
|
}
|
|
|
|
UNLOCK(&bc->tlocks[i]);
|
2014-09-03 23:28:14 -07:00
|
|
|
}
|
|
|
|
|
2020-02-17 10:37:39 +01:00
|
|
|
RWUNLOCK(&bc->lock, isc_rwlocktype_read);
|
2014-09-03 23:28:14 -07:00
|
|
|
return (answer);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
dns_badcache_flush(dns_badcache_t *bc) {
|
|
|
|
dns_bcentry_t *entry, *next;
|
|
|
|
unsigned int i;
|
|
|
|
|
2020-02-17 10:37:39 +01:00
|
|
|
RWLOCK(&bc->lock, isc_rwlocktype_write);
|
2014-09-03 23:28:14 -07:00
|
|
|
REQUIRE(VALID_BADCACHE(bc));
|
|
|
|
|
2020-02-17 10:37:39 +01:00
|
|
|
for (i = 0; atomic_load_relaxed(&bc->count) > 0 && i < bc->size; i++) {
|
2014-09-03 23:28:14 -07:00
|
|
|
for (entry = bc->table[i]; entry != NULL; entry = next) {
|
|
|
|
next = entry->next;
|
2021-05-21 17:10:59 -07:00
|
|
|
isc_mem_put(bc->mctx, entry, sizeof(*entry));
|
2020-02-17 10:37:39 +01:00
|
|
|
atomic_fetch_sub_relaxed(&bc->count, 1);
|
2014-09-03 23:28:14 -07:00
|
|
|
}
|
|
|
|
bc->table[i] = NULL;
|
|
|
|
}
|
2020-02-17 10:37:39 +01:00
|
|
|
RWUNLOCK(&bc->lock, isc_rwlocktype_write);
|
2014-09-03 23:28:14 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-12-30 15:45:08 +11:00
|
|
|
dns_badcache_flushname(dns_badcache_t *bc, const dns_name_t *name) {
|
2014-09-03 23:28:14 -07:00
|
|
|
dns_bcentry_t *bad, *prev, *next;
|
2014-09-05 11:39:42 +10:00
|
|
|
isc_time_t now;
|
Fix the rbt hashtable and grow it when setting max-cache-size
There were several problems with rbt hashtable implementation:
1. Our internal hashing function returns uint64_t value, but it was
silently truncated to unsigned int in dns_name_hash() and
dns_name_fullhash() functions. As the SipHash 2-4 higher bits are
more random, we need to use the upper half of the return value.
2. The hashtable implementation in rbt.c was using modulo to pick the
slot number for the hash table. This has several problems because
modulo is: a) slow, b) oblivious to patterns in the input data. This
could lead to very uneven distribution of the hashed data in the
hashtable. Combined with the single-linked lists we use, it could
really hog-down the lookup and removal of the nodes from the rbt
tree[a]. The Fibonacci Hashing is much better fit for the hashtable
function here. For longer description, read "Fibonacci Hashing: The
Optimization that the World Forgot"[b] or just look at the Linux
kernel. Also this will make Diego very happy :).
3. The hashtable would rehash every time the number of nodes in the rbt
tree would exceed 3 * (hashtable size). The overcommit will make the
uneven distribution in the hashtable even worse, but the main problem
lies in the rehashing - every time the database grows beyond the
limit, each subsequent rehashing will be much slower. The mitigation
here is letting the rbt know how big the cache can grown and
pre-allocate the hashtable to be big enough to actually never need to
rehash. This will consume more memory at the start, but since the
size of the hashtable is capped to `1 << 32` (e.g. 4 mio entries), it
will only consume maximum of 32GB of memory for hashtable in the
worst case (and max-cache-size would need to be set to more than
4TB). Calling the dns_db_adjusthashsize() will also cap the maximum
size of the hashtable to the pre-computed number of bits, so it won't
try to consume more gigabytes of memory than available for the
database.
FIXME: What is the average size of the rbt node that gets hashed? I
chose the pagesize (4k) as initial value to precompute the size of
the hashtable, but the value is based on feeling and not any real
data.
For future work, there are more places where we use result of the hash
value modulo some small number and that would benefit from Fibonacci
Hashing to get better distribution.
Notes:
a. A doubly linked list should be used here to speedup the removal of
the entries from the hashtable.
b. https://probablydance.com/2018/06/16/fibonacci-hashing-the-optimization-that-the-world-forgot-or-a-better-alternative-to-integer-modulo/
2020-07-16 10:29:54 +02:00
|
|
|
unsigned int hash;
|
2014-09-03 23:28:14 -07:00
|
|
|
|
|
|
|
REQUIRE(VALID_BADCACHE(bc));
|
|
|
|
REQUIRE(name != NULL);
|
|
|
|
|
2020-02-17 10:37:39 +01:00
|
|
|
RWLOCK(&bc->lock, isc_rwlocktype_read);
|
2014-09-03 23:28:14 -07:00
|
|
|
|
2023-03-31 00:12:33 +02:00
|
|
|
now = isc_time_now();
|
2023-03-30 21:37:12 +02:00
|
|
|
hash = dns_name_hash(name) % bc->size;
|
Fix the rbt hashtable and grow it when setting max-cache-size
There were several problems with rbt hashtable implementation:
1. Our internal hashing function returns uint64_t value, but it was
silently truncated to unsigned int in dns_name_hash() and
dns_name_fullhash() functions. As the SipHash 2-4 higher bits are
more random, we need to use the upper half of the return value.
2. The hashtable implementation in rbt.c was using modulo to pick the
slot number for the hash table. This has several problems because
modulo is: a) slow, b) oblivious to patterns in the input data. This
could lead to very uneven distribution of the hashed data in the
hashtable. Combined with the single-linked lists we use, it could
really hog-down the lookup and removal of the nodes from the rbt
tree[a]. The Fibonacci Hashing is much better fit for the hashtable
function here. For longer description, read "Fibonacci Hashing: The
Optimization that the World Forgot"[b] or just look at the Linux
kernel. Also this will make Diego very happy :).
3. The hashtable would rehash every time the number of nodes in the rbt
tree would exceed 3 * (hashtable size). The overcommit will make the
uneven distribution in the hashtable even worse, but the main problem
lies in the rehashing - every time the database grows beyond the
limit, each subsequent rehashing will be much slower. The mitigation
here is letting the rbt know how big the cache can grown and
pre-allocate the hashtable to be big enough to actually never need to
rehash. This will consume more memory at the start, but since the
size of the hashtable is capped to `1 << 32` (e.g. 4 mio entries), it
will only consume maximum of 32GB of memory for hashtable in the
worst case (and max-cache-size would need to be set to more than
4TB). Calling the dns_db_adjusthashsize() will also cap the maximum
size of the hashtable to the pre-computed number of bits, so it won't
try to consume more gigabytes of memory than available for the
database.
FIXME: What is the average size of the rbt node that gets hashed? I
chose the pagesize (4k) as initial value to precompute the size of
the hashtable, but the value is based on feeling and not any real
data.
For future work, there are more places where we use result of the hash
value modulo some small number and that would benefit from Fibonacci
Hashing to get better distribution.
Notes:
a. A doubly linked list should be used here to speedup the removal of
the entries from the hashtable.
b. https://probablydance.com/2018/06/16/fibonacci-hashing-the-optimization-that-the-world-forgot-or-a-better-alternative-to-integer-modulo/
2020-07-16 10:29:54 +02:00
|
|
|
LOCK(&bc->tlocks[hash]);
|
2014-09-03 23:28:14 -07:00
|
|
|
prev = NULL;
|
Fix the rbt hashtable and grow it when setting max-cache-size
There were several problems with rbt hashtable implementation:
1. Our internal hashing function returns uint64_t value, but it was
silently truncated to unsigned int in dns_name_hash() and
dns_name_fullhash() functions. As the SipHash 2-4 higher bits are
more random, we need to use the upper half of the return value.
2. The hashtable implementation in rbt.c was using modulo to pick the
slot number for the hash table. This has several problems because
modulo is: a) slow, b) oblivious to patterns in the input data. This
could lead to very uneven distribution of the hashed data in the
hashtable. Combined with the single-linked lists we use, it could
really hog-down the lookup and removal of the nodes from the rbt
tree[a]. The Fibonacci Hashing is much better fit for the hashtable
function here. For longer description, read "Fibonacci Hashing: The
Optimization that the World Forgot"[b] or just look at the Linux
kernel. Also this will make Diego very happy :).
3. The hashtable would rehash every time the number of nodes in the rbt
tree would exceed 3 * (hashtable size). The overcommit will make the
uneven distribution in the hashtable even worse, but the main problem
lies in the rehashing - every time the database grows beyond the
limit, each subsequent rehashing will be much slower. The mitigation
here is letting the rbt know how big the cache can grown and
pre-allocate the hashtable to be big enough to actually never need to
rehash. This will consume more memory at the start, but since the
size of the hashtable is capped to `1 << 32` (e.g. 4 mio entries), it
will only consume maximum of 32GB of memory for hashtable in the
worst case (and max-cache-size would need to be set to more than
4TB). Calling the dns_db_adjusthashsize() will also cap the maximum
size of the hashtable to the pre-computed number of bits, so it won't
try to consume more gigabytes of memory than available for the
database.
FIXME: What is the average size of the rbt node that gets hashed? I
chose the pagesize (4k) as initial value to precompute the size of
the hashtable, but the value is based on feeling and not any real
data.
For future work, there are more places where we use result of the hash
value modulo some small number and that would benefit from Fibonacci
Hashing to get better distribution.
Notes:
a. A doubly linked list should be used here to speedup the removal of
the entries from the hashtable.
b. https://probablydance.com/2018/06/16/fibonacci-hashing-the-optimization-that-the-world-forgot-or-a-better-alternative-to-integer-modulo/
2020-07-16 10:29:54 +02:00
|
|
|
for (bad = bc->table[hash]; bad != NULL; bad = next) {
|
2014-09-03 23:28:14 -07:00
|
|
|
int n;
|
|
|
|
next = bad->next;
|
|
|
|
n = isc_time_compare(&bad->expire, &now);
|
2021-05-21 17:10:59 -07:00
|
|
|
if (n < 0 || dns_name_equal(name, bad->name)) {
|
2014-09-03 23:28:14 -07:00
|
|
|
if (prev == NULL) {
|
Fix the rbt hashtable and grow it when setting max-cache-size
There were several problems with rbt hashtable implementation:
1. Our internal hashing function returns uint64_t value, but it was
silently truncated to unsigned int in dns_name_hash() and
dns_name_fullhash() functions. As the SipHash 2-4 higher bits are
more random, we need to use the upper half of the return value.
2. The hashtable implementation in rbt.c was using modulo to pick the
slot number for the hash table. This has several problems because
modulo is: a) slow, b) oblivious to patterns in the input data. This
could lead to very uneven distribution of the hashed data in the
hashtable. Combined with the single-linked lists we use, it could
really hog-down the lookup and removal of the nodes from the rbt
tree[a]. The Fibonacci Hashing is much better fit for the hashtable
function here. For longer description, read "Fibonacci Hashing: The
Optimization that the World Forgot"[b] or just look at the Linux
kernel. Also this will make Diego very happy :).
3. The hashtable would rehash every time the number of nodes in the rbt
tree would exceed 3 * (hashtable size). The overcommit will make the
uneven distribution in the hashtable even worse, but the main problem
lies in the rehashing - every time the database grows beyond the
limit, each subsequent rehashing will be much slower. The mitigation
here is letting the rbt know how big the cache can grown and
pre-allocate the hashtable to be big enough to actually never need to
rehash. This will consume more memory at the start, but since the
size of the hashtable is capped to `1 << 32` (e.g. 4 mio entries), it
will only consume maximum of 32GB of memory for hashtable in the
worst case (and max-cache-size would need to be set to more than
4TB). Calling the dns_db_adjusthashsize() will also cap the maximum
size of the hashtable to the pre-computed number of bits, so it won't
try to consume more gigabytes of memory than available for the
database.
FIXME: What is the average size of the rbt node that gets hashed? I
chose the pagesize (4k) as initial value to precompute the size of
the hashtable, but the value is based on feeling and not any real
data.
For future work, there are more places where we use result of the hash
value modulo some small number and that would benefit from Fibonacci
Hashing to get better distribution.
Notes:
a. A doubly linked list should be used here to speedup the removal of
the entries from the hashtable.
b. https://probablydance.com/2018/06/16/fibonacci-hashing-the-optimization-that-the-world-forgot-or-a-better-alternative-to-integer-modulo/
2020-07-16 10:29:54 +02:00
|
|
|
bc->table[hash] = bad->next;
|
2014-09-03 23:28:14 -07:00
|
|
|
} else {
|
|
|
|
prev->next = bad->next;
|
2020-02-13 21:48:23 +01:00
|
|
|
}
|
2014-09-03 23:28:14 -07:00
|
|
|
|
2021-05-21 17:10:59 -07:00
|
|
|
isc_mem_put(bc->mctx, bad, sizeof(*bad));
|
2020-02-17 10:37:39 +01:00
|
|
|
atomic_fetch_sub_relaxed(&bc->count, 1);
|
2014-09-03 23:28:14 -07:00
|
|
|
} else {
|
|
|
|
prev = bad;
|
2020-02-13 21:48:23 +01:00
|
|
|
}
|
2014-09-03 23:28:14 -07:00
|
|
|
}
|
Fix the rbt hashtable and grow it when setting max-cache-size
There were several problems with rbt hashtable implementation:
1. Our internal hashing function returns uint64_t value, but it was
silently truncated to unsigned int in dns_name_hash() and
dns_name_fullhash() functions. As the SipHash 2-4 higher bits are
more random, we need to use the upper half of the return value.
2. The hashtable implementation in rbt.c was using modulo to pick the
slot number for the hash table. This has several problems because
modulo is: a) slow, b) oblivious to patterns in the input data. This
could lead to very uneven distribution of the hashed data in the
hashtable. Combined with the single-linked lists we use, it could
really hog-down the lookup and removal of the nodes from the rbt
tree[a]. The Fibonacci Hashing is much better fit for the hashtable
function here. For longer description, read "Fibonacci Hashing: The
Optimization that the World Forgot"[b] or just look at the Linux
kernel. Also this will make Diego very happy :).
3. The hashtable would rehash every time the number of nodes in the rbt
tree would exceed 3 * (hashtable size). The overcommit will make the
uneven distribution in the hashtable even worse, but the main problem
lies in the rehashing - every time the database grows beyond the
limit, each subsequent rehashing will be much slower. The mitigation
here is letting the rbt know how big the cache can grown and
pre-allocate the hashtable to be big enough to actually never need to
rehash. This will consume more memory at the start, but since the
size of the hashtable is capped to `1 << 32` (e.g. 4 mio entries), it
will only consume maximum of 32GB of memory for hashtable in the
worst case (and max-cache-size would need to be set to more than
4TB). Calling the dns_db_adjusthashsize() will also cap the maximum
size of the hashtable to the pre-computed number of bits, so it won't
try to consume more gigabytes of memory than available for the
database.
FIXME: What is the average size of the rbt node that gets hashed? I
chose the pagesize (4k) as initial value to precompute the size of
the hashtable, but the value is based on feeling and not any real
data.
For future work, there are more places where we use result of the hash
value modulo some small number and that would benefit from Fibonacci
Hashing to get better distribution.
Notes:
a. A doubly linked list should be used here to speedup the removal of
the entries from the hashtable.
b. https://probablydance.com/2018/06/16/fibonacci-hashing-the-optimization-that-the-world-forgot-or-a-better-alternative-to-integer-modulo/
2020-07-16 10:29:54 +02:00
|
|
|
UNLOCK(&bc->tlocks[hash]);
|
2014-09-03 23:28:14 -07:00
|
|
|
|
2020-02-17 10:37:39 +01:00
|
|
|
RWUNLOCK(&bc->lock, isc_rwlocktype_read);
|
2014-09-03 23:28:14 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-12-30 15:45:08 +11:00
|
|
|
dns_badcache_flushtree(dns_badcache_t *bc, const dns_name_t *name) {
|
2014-09-03 23:28:14 -07:00
|
|
|
dns_bcentry_t *bad, *prev, *next;
|
|
|
|
unsigned int i;
|
|
|
|
int n;
|
|
|
|
isc_time_t now;
|
|
|
|
|
|
|
|
REQUIRE(VALID_BADCACHE(bc));
|
|
|
|
REQUIRE(name != NULL);
|
|
|
|
|
2020-02-17 10:37:39 +01:00
|
|
|
/*
|
|
|
|
* We write lock the tree to avoid relocking every node
|
|
|
|
* individually.
|
|
|
|
*/
|
|
|
|
RWLOCK(&bc->lock, isc_rwlocktype_write);
|
2014-09-03 23:28:14 -07:00
|
|
|
|
2023-03-31 00:12:33 +02:00
|
|
|
now = isc_time_now();
|
2014-09-03 23:28:14 -07:00
|
|
|
|
2020-02-17 10:37:39 +01:00
|
|
|
for (i = 0; atomic_load_relaxed(&bc->count) > 0 && i < bc->size; i++) {
|
2014-09-03 23:28:14 -07:00
|
|
|
prev = NULL;
|
|
|
|
for (bad = bc->table[i]; bad != NULL; bad = next) {
|
|
|
|
next = bad->next;
|
|
|
|
n = isc_time_compare(&bad->expire, &now);
|
2021-05-21 17:10:59 -07:00
|
|
|
if (n < 0 || dns_name_issubdomain(bad->name, name)) {
|
2014-09-03 23:28:14 -07:00
|
|
|
if (prev == NULL) {
|
|
|
|
bc->table[i] = bad->next;
|
|
|
|
} else {
|
|
|
|
prev->next = bad->next;
|
2020-02-13 21:48:23 +01:00
|
|
|
}
|
2014-09-03 23:28:14 -07:00
|
|
|
|
2021-05-21 17:10:59 -07:00
|
|
|
isc_mem_put(bc->mctx, bad, sizeof(*bad));
|
2020-02-17 10:37:39 +01:00
|
|
|
atomic_fetch_sub_relaxed(&bc->count, 1);
|
2014-09-03 23:28:14 -07:00
|
|
|
} else {
|
|
|
|
prev = bad;
|
2020-02-13 21:48:23 +01:00
|
|
|
}
|
2014-09-03 23:28:14 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-17 10:37:39 +01:00
|
|
|
RWUNLOCK(&bc->lock, isc_rwlocktype_write);
|
2014-09-03 23:28:14 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
dns_badcache_print(dns_badcache_t *bc, const char *cachename, FILE *fp) {
|
|
|
|
char namebuf[DNS_NAME_FORMATSIZE];
|
|
|
|
char typebuf[DNS_RDATATYPE_FORMATSIZE];
|
|
|
|
dns_bcentry_t *bad, *next, *prev;
|
|
|
|
isc_time_t now;
|
|
|
|
unsigned int i;
|
2018-03-28 14:19:37 +02:00
|
|
|
uint64_t t;
|
2014-09-03 23:28:14 -07:00
|
|
|
|
|
|
|
REQUIRE(VALID_BADCACHE(bc));
|
|
|
|
REQUIRE(cachename != NULL);
|
|
|
|
REQUIRE(fp != NULL);
|
|
|
|
|
2020-02-17 10:37:39 +01:00
|
|
|
/*
|
|
|
|
* We write lock the tree to avoid relocking every node
|
|
|
|
* individually.
|
|
|
|
*/
|
|
|
|
RWLOCK(&bc->lock, isc_rwlocktype_write);
|
2014-09-03 23:28:14 -07:00
|
|
|
fprintf(fp, ";\n; %s\n;\n", cachename);
|
|
|
|
|
2023-03-31 00:12:33 +02:00
|
|
|
now = isc_time_now();
|
2020-02-17 10:37:39 +01:00
|
|
|
for (i = 0; atomic_load_relaxed(&bc->count) > 0 && i < bc->size; i++) {
|
2014-09-03 23:28:14 -07:00
|
|
|
prev = NULL;
|
|
|
|
for (bad = bc->table[i]; bad != NULL; bad = next) {
|
|
|
|
next = bad->next;
|
|
|
|
if (isc_time_compare(&bad->expire, &now) < 0) {
|
|
|
|
if (prev != NULL) {
|
|
|
|
prev->next = bad->next;
|
|
|
|
} else {
|
|
|
|
bc->table[i] = bad->next;
|
2020-02-13 21:48:23 +01:00
|
|
|
}
|
2014-09-03 23:28:14 -07:00
|
|
|
|
2021-05-21 17:10:59 -07:00
|
|
|
isc_mem_put(bc->mctx, bad, sizeof(*bad));
|
2020-02-17 10:37:39 +01:00
|
|
|
atomic_fetch_sub_relaxed(&bc->count, 1);
|
2014-09-03 23:28:14 -07:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
prev = bad;
|
2021-05-21 17:10:59 -07:00
|
|
|
dns_name_format(bad->name, namebuf, sizeof(namebuf));
|
2014-09-03 23:28:14 -07:00
|
|
|
dns_rdatatype_format(bad->type, typebuf,
|
|
|
|
sizeof(typebuf));
|
|
|
|
t = isc_time_microdiff(&bad->expire, &now);
|
|
|
|
t /= 1000;
|
|
|
|
fprintf(fp,
|
|
|
|
"; %s/%s [ttl "
|
2018-03-28 14:56:40 +02:00
|
|
|
"%" PRIu64 "]\n",
|
2014-09-03 23:28:14 -07:00
|
|
|
namebuf, typebuf, t);
|
|
|
|
}
|
|
|
|
}
|
2020-02-17 10:37:39 +01:00
|
|
|
RWUNLOCK(&bc->lock, isc_rwlocktype_write);
|
2014-09-03 23:28:14 -07:00
|
|
|
}
|