2024-01-09 16:18:57 +01:00
|
|
|
/*
|
|
|
|
* Copyright (C) Internet Systems Consortium, Inc. ("ISC")
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: MPL-2.0
|
|
|
|
*
|
|
|
|
* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
* file, you can obtain one at https://mozilla.org/MPL/2.0/.
|
|
|
|
*
|
|
|
|
* See the COPYRIGHT file distributed with this work for additional
|
|
|
|
* information regarding copyright ownership.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*! \file */
|
|
|
|
|
|
|
|
#include <inttypes.h>
|
2025-02-03 13:36:27 +01:00
|
|
|
#include <stdalign.h>
|
2024-01-09 16:18:57 +01:00
|
|
|
#include <stdbool.h>
|
|
|
|
|
|
|
|
#include <isc/ascii.h>
|
|
|
|
#include <isc/async.h>
|
|
|
|
#include <isc/atomic.h>
|
|
|
|
#include <isc/file.h>
|
|
|
|
#include <isc/heap.h>
|
|
|
|
#include <isc/hex.h>
|
2024-08-14 13:25:50 +02:00
|
|
|
#include <isc/log.h>
|
2024-01-09 16:18:57 +01:00
|
|
|
#include <isc/loop.h>
|
|
|
|
#include <isc/mem.h>
|
|
|
|
#include <isc/mutex.h>
|
2025-02-04 18:02:29 +01:00
|
|
|
#include <isc/os.h>
|
2024-03-25 12:17:42 +01:00
|
|
|
#include <isc/queue.h>
|
2024-01-09 16:18:57 +01:00
|
|
|
#include <isc/random.h>
|
|
|
|
#include <isc/refcount.h>
|
|
|
|
#include <isc/result.h>
|
|
|
|
#include <isc/rwlock.h>
|
2025-02-23 14:36:35 +01:00
|
|
|
#include <isc/sieve.h>
|
2024-01-09 16:18:57 +01:00
|
|
|
#include <isc/stdio.h>
|
|
|
|
#include <isc/string.h>
|
|
|
|
#include <isc/time.h>
|
|
|
|
#include <isc/urcu.h>
|
|
|
|
#include <isc/util.h>
|
|
|
|
|
|
|
|
#include <dns/callbacks.h>
|
|
|
|
#include <dns/db.h>
|
|
|
|
#include <dns/dbiterator.h>
|
|
|
|
#include <dns/fixedname.h>
|
|
|
|
#include <dns/masterdump.h>
|
|
|
|
#include <dns/nsec.h>
|
|
|
|
#include <dns/qp.h>
|
|
|
|
#include <dns/rdata.h>
|
|
|
|
#include <dns/rdataset.h>
|
|
|
|
#include <dns/rdatasetiter.h>
|
|
|
|
#include <dns/rdataslab.h>
|
|
|
|
#include <dns/rdatastruct.h>
|
|
|
|
#include <dns/stats.h>
|
|
|
|
#include <dns/time.h>
|
|
|
|
#include <dns/view.h>
|
|
|
|
|
|
|
|
#include "db_p.h"
|
2024-03-06 17:54:37 -08:00
|
|
|
#include "qpcache_p.h"
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2025-06-19 14:49:53 +02:00
|
|
|
#ifndef DNS_QPCACHE_LOG_STATS_LEVEL
|
|
|
|
#define DNS_QPCACHE_LOG_STATS_LEVEL 3
|
|
|
|
#endif
|
|
|
|
|
2024-01-09 16:18:57 +01:00
|
|
|
#define CHECK(op) \
|
|
|
|
do { \
|
|
|
|
result = (op); \
|
|
|
|
if (result != ISC_R_SUCCESS) \
|
|
|
|
goto failure; \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define EXISTS(header) \
|
|
|
|
((atomic_load_acquire(&(header)->attributes) & \
|
|
|
|
DNS_SLABHEADERATTR_NONEXISTENT) == 0)
|
|
|
|
#define NXDOMAIN(header) \
|
|
|
|
((atomic_load_acquire(&(header)->attributes) & \
|
|
|
|
DNS_SLABHEADERATTR_NXDOMAIN) != 0)
|
|
|
|
#define STALE(header) \
|
|
|
|
((atomic_load_acquire(&(header)->attributes) & \
|
|
|
|
DNS_SLABHEADERATTR_STALE) != 0)
|
|
|
|
#define STALE_WINDOW(header) \
|
|
|
|
((atomic_load_acquire(&(header)->attributes) & \
|
|
|
|
DNS_SLABHEADERATTR_STALE_WINDOW) != 0)
|
|
|
|
#define OPTOUT(header) \
|
|
|
|
((atomic_load_acquire(&(header)->attributes) & \
|
|
|
|
DNS_SLABHEADERATTR_OPTOUT) != 0)
|
|
|
|
#define NEGATIVE(header) \
|
|
|
|
((atomic_load_acquire(&(header)->attributes) & \
|
|
|
|
DNS_SLABHEADERATTR_NEGATIVE) != 0)
|
|
|
|
#define PREFETCH(header) \
|
|
|
|
((atomic_load_acquire(&(header)->attributes) & \
|
|
|
|
DNS_SLABHEADERATTR_PREFETCH) != 0)
|
|
|
|
#define ZEROTTL(header) \
|
|
|
|
((atomic_load_acquire(&(header)->attributes) & \
|
|
|
|
DNS_SLABHEADERATTR_ZEROTTL) != 0)
|
|
|
|
#define ANCIENT(header) \
|
|
|
|
((atomic_load_acquire(&(header)->attributes) & \
|
|
|
|
DNS_SLABHEADERATTR_ANCIENT) != 0)
|
|
|
|
#define STATCOUNT(header) \
|
|
|
|
((atomic_load_acquire(&(header)->attributes) & \
|
|
|
|
DNS_SLABHEADERATTR_STATCOUNT) != 0)
|
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
#define STALE_TTL(header, qpdb) \
|
|
|
|
(NXDOMAIN(header) ? 0 : qpdb->common.serve_stale_ttl)
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2025-02-02 13:31:36 +01:00
|
|
|
#define ACTIVE(header, now) \
|
|
|
|
(((header)->expire > (now)) || \
|
|
|
|
((header)->expire == (now) && ZEROTTL(header)))
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-09-13 19:26:54 -07:00
|
|
|
#define EXPIREDOK(iterator) \
|
|
|
|
(((iterator)->common.options & DNS_DB_EXPIREDOK) != 0)
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-09-13 19:26:54 -07:00
|
|
|
#define STALEOK(iterator) (((iterator)->common.options & DNS_DB_STALEOK) != 0)
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
#define KEEPSTALE(qpdb) ((qpdb)->common.serve_stale_ttl > 0)
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-06 17:33:37 -08:00
|
|
|
/*%
|
|
|
|
* Note that "impmagic" is not the first four bytes of the struct, so
|
|
|
|
* ISC_MAGIC_VALID cannot be used.
|
|
|
|
*/
|
|
|
|
#define QPDB_MAGIC ISC_MAGIC('Q', 'P', 'D', '4')
|
|
|
|
#define VALID_QPDB(qpdb) \
|
|
|
|
((qpdb) != NULL && (qpdb)->common.impmagic == QPDB_MAGIC)
|
|
|
|
|
2024-04-29 15:54:37 -07:00
|
|
|
#define HEADERNODE(h) ((qpcnode_t *)((h)->node))
|
2024-03-06 17:33:37 -08:00
|
|
|
|
|
|
|
/*
|
2025-03-21 06:17:55 +01:00
|
|
|
* Allow clients with a virtual time of up to 10 seconds in the past to see
|
2024-03-06 17:33:37 -08:00
|
|
|
* records that would have otherwise have expired.
|
|
|
|
*/
|
2025-03-21 06:17:55 +01:00
|
|
|
#define QPDB_VIRTUAL 10
|
2024-03-06 17:33:37 -08:00
|
|
|
|
2024-01-09 16:18:57 +01:00
|
|
|
/*
|
|
|
|
* This defines the number of headers that we try to expire each time the
|
|
|
|
* expire_ttl_headers() is run. The number should be small enough, so the
|
|
|
|
* TTL-based header expiration doesn't take too long, but it should be large
|
|
|
|
* enough, so we expire enough headers if their TTL is clustered.
|
|
|
|
*/
|
2024-03-05 14:28:43 -08:00
|
|
|
#define DNS_QPDB_EXPIRE_TTL_COUNT 10
|
2024-01-09 16:18:57 +01:00
|
|
|
|
Decouple database and node lifetimes by adding node-specific vtables
All databases in the codebase follow the same structure: a database is
an associative container from DNS names to nodes, and each node is an
associative container from RR types to RR data.
Each database implementation (qpzone, qpcache, sdlz, builtin, dyndb) has
its own corresponding node type (qpznode, qpcnode, etc). However, some
code needs to work with nodes generically regardless of their specific
type - for example, to acquire locks, manage references, or
register/unregister slabs from the heap.
Currently, these generic node operations are implemented as methods in
the database vtable, which creates problematic coupling between database
and node lifetimes. If a node outlives its parent database, the node
destructor will destroy all RR data, and each RR data destructor will
try to unregister from heaps by calling a virtual function from the
database vtable. Since the database was already freed, this causes a
crash.
This commit breaks the coupling by standardizing the layout of all
database nodes, adding a dedicated vtable for node operations, and
moving node-specific methods from the database vtable to the node
vtable.
2025-06-05 11:51:29 +02:00
|
|
|
/*%
|
|
|
|
* Forward declarations
|
|
|
|
*/
|
|
|
|
typedef struct qpcache qpcache_t;
|
|
|
|
|
2024-03-06 17:33:37 -08:00
|
|
|
/*%
|
2025-02-23 14:36:35 +01:00
|
|
|
* This is the structure that is used for each node in the qp trie of
|
|
|
|
* trees.
|
2024-03-06 17:33:37 -08:00
|
|
|
*/
|
2024-04-29 15:29:33 -07:00
|
|
|
typedef struct qpcnode qpcnode_t;
|
|
|
|
struct qpcnode {
|
Decouple database and node lifetimes by adding node-specific vtables
All databases in the codebase follow the same structure: a database is
an associative container from DNS names to nodes, and each node is an
associative container from RR types to RR data.
Each database implementation (qpzone, qpcache, sdlz, builtin, dyndb) has
its own corresponding node type (qpznode, qpcnode, etc). However, some
code needs to work with nodes generically regardless of their specific
type - for example, to acquire locks, manage references, or
register/unregister slabs from the heap.
Currently, these generic node operations are implemented as methods in
the database vtable, which creates problematic coupling between database
and node lifetimes. If a node outlives its parent database, the node
destructor will destroy all RR data, and each RR data destructor will
try to unregister from heaps by calling a virtual function from the
database vtable. Since the database was already freed, this causes a
crash.
This commit breaks the coupling by standardizing the layout of all
database nodes, adding a dedicated vtable for node operations, and
moving node-specific methods from the database vtable to the node
vtable.
2025-06-05 11:51:29 +02:00
|
|
|
DBNODE_FIELDS;
|
|
|
|
|
|
|
|
qpcache_t *qpdb;
|
2024-03-06 17:33:37 -08:00
|
|
|
|
2024-03-12 01:05:07 -07:00
|
|
|
uint8_t : 0;
|
|
|
|
unsigned int delegating : 1;
|
2025-05-26 17:36:33 +02:00
|
|
|
unsigned int nspace : 2; /*%< range is 0..3 */
|
Prepend qpkey with denial byte
In preparation to merge the three qp tries (tree, nsec, nsec3) into
one, add the piece of information into the qpkey. This is the most
significant bit of information, so prepend the denial type to the qpkey.
This means we need to pass on the denial type when constructing the
qpkey from a name, or doing a lookup.
Reuse the the DNS_DB_NSEC_* values. Most qp tries in the code we just
pass on 0 (nta, rpz, zt, etc.), because there is no need for denial of
existence, but for qpzone and qpcache we must pass the right value.
Change the code, so that node->nsec no longer can have the value
DNS_DB_NSEC_HAS_NSEC, instead track this in a new attribute 'havensec'.
Since we use node->nsec to convert names to keys, the value MUST be set
before inserting the node into the qp-trie.
Update the fuzzing and unit tests accordingly. This only adds a few
extra test cases, more are needed.
In the qp_test.c we can remove test code for empty keys as this is
no longer possible.
2025-04-25 17:21:16 +02:00
|
|
|
unsigned int havensec : 1;
|
2024-03-12 01:05:07 -07:00
|
|
|
uint8_t : 0;
|
|
|
|
|
2025-01-30 14:42:57 -08:00
|
|
|
/*
|
|
|
|
* 'erefs' counts external references held by a caller: for
|
|
|
|
* example, it could be incremented by dns_db_findnode(),
|
|
|
|
* and decremented by dns_db_detachnode().
|
|
|
|
*
|
|
|
|
* 'references' counts internal references to the node object,
|
|
|
|
* including the one held by the QP trie so the node won't be
|
|
|
|
* deleted while it's quiescently stored in the database - even
|
|
|
|
* though 'erefs' may be zero because no external caller is
|
|
|
|
* using it at the time.
|
|
|
|
*
|
|
|
|
* Generally when 'erefs' is incremented or decremented,
|
|
|
|
* 'references' is too. When both go to zero (meaning callers
|
|
|
|
* and the database have both released the object) the object
|
|
|
|
* is freed.
|
|
|
|
*
|
|
|
|
* Whenever 'erefs' is incremented from zero, we also aquire a
|
|
|
|
* node use reference (see 'qpcache->references' below), and
|
|
|
|
* release it when 'erefs' goes back to zero. This prevents the
|
|
|
|
* database from being shut down until every caller has released
|
|
|
|
* all nodes.
|
|
|
|
*/
|
2024-03-12 01:05:07 -07:00
|
|
|
isc_refcount_t references;
|
|
|
|
isc_refcount_t erefs;
|
|
|
|
void *data;
|
|
|
|
|
|
|
|
/*%
|
|
|
|
* NOTE: The 'dirty' flag is protected by the node lock, so
|
|
|
|
* this bitfield has to be separated from the one above.
|
|
|
|
* We don't want it to share the same qword with bits
|
|
|
|
* that can be accessed without the node lock.
|
|
|
|
*/
|
|
|
|
uint8_t : 0;
|
|
|
|
uint8_t dirty : 1;
|
|
|
|
uint8_t : 0;
|
|
|
|
|
2024-03-06 17:33:37 -08:00
|
|
|
/*%
|
2025-03-05 17:36:53 +01:00
|
|
|
* Used for dead nodes cleaning. This linked list is used to mark nodes
|
|
|
|
* which have no data any longer, but we cannot unlink at that exact
|
|
|
|
* moment because we did not or could not obtain a write lock on the
|
|
|
|
* tree.
|
2024-03-06 17:33:37 -08:00
|
|
|
*/
|
2024-03-25 12:17:42 +01:00
|
|
|
isc_queue_node_t deadlink;
|
2024-03-06 17:33:37 -08:00
|
|
|
};
|
|
|
|
|
2025-02-03 13:36:27 +01:00
|
|
|
/*%
|
|
|
|
* One bucket structure will be created for each loop, and
|
|
|
|
* nodes in the database will evenly distributed among buckets
|
|
|
|
* to reduce contention between threads.
|
|
|
|
*/
|
|
|
|
typedef struct qpcache_bucket {
|
2025-03-05 17:36:53 +01:00
|
|
|
/*%
|
|
|
|
* Temporary storage for stale cache nodes and dynamically
|
|
|
|
* deleted nodes that await being cleaned up.
|
2025-02-03 13:36:27 +01:00
|
|
|
*/
|
|
|
|
isc_queue_t deadnodes;
|
|
|
|
|
|
|
|
/* Per-bucket lock. */
|
|
|
|
isc_rwlock_t lock;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The heap is used for TTL based expiry. Note that qpcache->hmctx
|
|
|
|
* is the memory context to use for heap memory; this differs from
|
|
|
|
* the main database memory context, which is qpcache->common.mctx.
|
|
|
|
*/
|
|
|
|
isc_heap_t *heap;
|
|
|
|
|
2025-02-23 14:36:35 +01:00
|
|
|
/* SIEVE-LRU cache cleaning state. */
|
|
|
|
ISC_SIEVE(dns_slabheader_t) sieve;
|
|
|
|
|
2025-02-03 13:36:27 +01:00
|
|
|
/* Padding to prevent false sharing between locks. */
|
|
|
|
uint8_t __padding[ISC_OS_CACHELINE_SIZE -
|
2025-03-21 09:29:17 +01:00
|
|
|
(sizeof(isc_queue_t) + sizeof(isc_rwlock_t) +
|
2025-02-23 14:36:35 +01:00
|
|
|
sizeof(isc_heap_t *) +
|
|
|
|
sizeof(ISC_SIEVE(dns_slabheader_t))) %
|
2025-02-03 13:36:27 +01:00
|
|
|
ISC_OS_CACHELINE_SIZE];
|
|
|
|
|
|
|
|
} qpcache_bucket_t;
|
|
|
|
|
2024-04-29 15:29:33 -07:00
|
|
|
struct qpcache {
|
2024-03-06 17:33:37 -08:00
|
|
|
/* Unlocked. */
|
|
|
|
dns_db_t common;
|
|
|
|
/* Locks the data in this struct */
|
|
|
|
isc_rwlock_t lock;
|
|
|
|
/* Locks the tree structure (prevents nodes appearing/disappearing) */
|
|
|
|
isc_rwlock_t tree_lock;
|
2025-01-27 21:07:11 +01:00
|
|
|
|
2025-01-30 14:42:57 -08:00
|
|
|
/*
|
|
|
|
* NOTE: 'references' is NOT the global reference counter for
|
|
|
|
* the database object handled by dns_db_attach() and _detach();
|
|
|
|
* that one is 'common.references'.
|
|
|
|
*
|
|
|
|
* Instead, 'references' counts the number of nodes being used by
|
|
|
|
* at least one external caller. (It's called 'references' to
|
|
|
|
* leverage the ISC_REFCOUNT_STATIC macros, but 'nodes_in_use'
|
|
|
|
* might be a clearer name.)
|
|
|
|
*
|
|
|
|
* One additional reference to this counter is held by the database
|
|
|
|
* object itself. When 'common.references' goes to zero, that
|
|
|
|
* reference is released. When in turn 'references' goes to zero,
|
|
|
|
* the database is shut down and freed.
|
|
|
|
*/
|
2025-01-27 21:07:11 +01:00
|
|
|
isc_refcount_t references;
|
2025-01-30 14:42:57 -08:00
|
|
|
|
|
|
|
dns_stats_t *rrsetstats;
|
|
|
|
isc_stats_t *cachestats;
|
|
|
|
|
2024-05-25 11:46:56 +02:00
|
|
|
uint32_t maxrrperset; /* Maximum RRs per RRset */
|
|
|
|
uint32_t maxtypepername; /* Maximum number of RR types per owner */
|
2024-03-01 08:26:07 +01:00
|
|
|
|
2024-03-06 17:33:37 -08:00
|
|
|
/*
|
|
|
|
* The time after a failed lookup, where stale answers from cache
|
|
|
|
* may be used directly in a DNS response without attempting a
|
|
|
|
* new iterative lookup.
|
|
|
|
*/
|
|
|
|
uint32_t serve_stale_refresh;
|
|
|
|
|
|
|
|
/* Locked by tree_lock. */
|
|
|
|
dns_qp_t *tree;
|
|
|
|
dns_qp_t *nsec;
|
2025-02-03 13:36:27 +01:00
|
|
|
|
|
|
|
isc_mem_t *hmctx; /* Memory context for the heaps */
|
|
|
|
|
|
|
|
size_t buckets_count;
|
|
|
|
qpcache_bucket_t buckets[]; /* attribute((counted_by(buckets_count))) */
|
2024-03-06 17:33:37 -08:00
|
|
|
};
|
|
|
|
|
2025-01-27 21:07:11 +01:00
|
|
|
#ifdef DNS_DB_NODETRACE
|
|
|
|
#define qpcache_ref(ptr) qpcache__ref(ptr, __func__, __FILE__, __LINE__)
|
2025-02-24 15:55:18 +01:00
|
|
|
#define qpcache_unref(ptr) qpcache__unref(ptr, __func__, __FILE__, __LINE__)
|
2025-01-27 21:07:11 +01:00
|
|
|
#define qpcache_attach(ptr, ptrp) \
|
|
|
|
qpcache__attach(ptr, ptrp, __func__, __FILE__, __LINE__)
|
|
|
|
#define qpcache_detach(ptrp) qpcache__detach(ptrp, __func__, __FILE__, __LINE__)
|
|
|
|
ISC_REFCOUNT_STATIC_TRACE_DECL(qpcache);
|
|
|
|
#else
|
|
|
|
ISC_REFCOUNT_STATIC_DECL(qpcache);
|
|
|
|
#endif
|
|
|
|
|
2024-03-06 17:33:37 -08:00
|
|
|
/*%
|
|
|
|
* Search Context
|
|
|
|
*/
|
|
|
|
typedef struct {
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcache_t *qpdb;
|
2024-03-06 17:33:37 -08:00
|
|
|
unsigned int options;
|
|
|
|
dns_qpchain_t chain;
|
|
|
|
dns_qpiter_t iter;
|
|
|
|
bool need_cleanup;
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcnode_t *zonecut;
|
2024-03-06 17:33:37 -08:00
|
|
|
dns_slabheader_t *zonecut_header;
|
|
|
|
dns_slabheader_t *zonecut_sigheader;
|
|
|
|
isc_stdtime_t now;
|
2024-04-29 15:45:26 -07:00
|
|
|
} qpc_search_t;
|
2024-03-06 17:33:37 -08:00
|
|
|
|
|
|
|
#ifdef DNS_DB_NODETRACE
|
2024-04-29 15:29:33 -07:00
|
|
|
#define qpcnode_ref(ptr) qpcnode__ref(ptr, __func__, __FILE__, __LINE__)
|
|
|
|
#define qpcnode_unref(ptr) qpcnode__unref(ptr, __func__, __FILE__, __LINE__)
|
|
|
|
#define qpcnode_attach(ptr, ptrp) \
|
|
|
|
qpcnode__attach(ptr, ptrp, __func__, __FILE__, __LINE__)
|
|
|
|
#define qpcnode_detach(ptrp) qpcnode__detach(ptrp, __func__, __FILE__, __LINE__)
|
|
|
|
ISC_REFCOUNT_STATIC_TRACE_DECL(qpcnode);
|
2024-03-06 17:33:37 -08:00
|
|
|
#else
|
2024-04-29 15:29:33 -07:00
|
|
|
ISC_REFCOUNT_STATIC_DECL(qpcnode);
|
2024-03-06 17:33:37 -08:00
|
|
|
#endif
|
|
|
|
|
Decouple database and node lifetimes by adding node-specific vtables
All databases in the codebase follow the same structure: a database is
an associative container from DNS names to nodes, and each node is an
associative container from RR types to RR data.
Each database implementation (qpzone, qpcache, sdlz, builtin, dyndb) has
its own corresponding node type (qpznode, qpcnode, etc). However, some
code needs to work with nodes generically regardless of their specific
type - for example, to acquire locks, manage references, or
register/unregister slabs from the heap.
Currently, these generic node operations are implemented as methods in
the database vtable, which creates problematic coupling between database
and node lifetimes. If a node outlives its parent database, the node
destructor will destroy all RR data, and each RR data destructor will
try to unregister from heaps by calling a virtual function from the
database vtable. Since the database was already freed, this causes a
crash.
This commit breaks the coupling by standardizing the layout of all
database nodes, adding a dedicated vtable for node operations, and
moving node-specific methods from the database vtable to the node
vtable.
2025-06-05 11:51:29 +02:00
|
|
|
/*
|
|
|
|
* Node methods forward declarations
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
qpcnode_attachnode(dns_dbnode_t *source, dns_dbnode_t **targetp DNS__DB_FLARG);
|
|
|
|
static void
|
|
|
|
qpcnode_detachnode(dns_dbnode_t **nodep DNS__DB_FLARG);
|
|
|
|
static void
|
|
|
|
qpcnode_locknode(dns_dbnode_t *node, isc_rwlocktype_t type);
|
|
|
|
static void
|
|
|
|
qpcnode_unlocknode(dns_dbnode_t *node, isc_rwlocktype_t type);
|
|
|
|
static void
|
|
|
|
qpcnode_deletedata(dns_dbnode_t *node, void *data);
|
|
|
|
static void
|
|
|
|
qpcnode_expiredata(dns_dbnode_t *node, void *data);
|
|
|
|
|
|
|
|
static dns_dbnode_methods_t qpcnode_methods = (dns_dbnode_methods_t){
|
|
|
|
.attachnode = qpcnode_attachnode,
|
|
|
|
.detachnode = qpcnode_detachnode,
|
|
|
|
.locknode = qpcnode_locknode,
|
|
|
|
.unlocknode = qpcnode_unlocknode,
|
|
|
|
.deletedata = qpcnode_deletedata,
|
|
|
|
.expiredata = qpcnode_expiredata,
|
|
|
|
};
|
|
|
|
|
2024-01-09 16:18:57 +01:00
|
|
|
/* QP methods */
|
|
|
|
static void
|
|
|
|
qp_attach(void *uctx, void *pval, uint32_t ival);
|
|
|
|
static void
|
|
|
|
qp_detach(void *uctx, void *pval, uint32_t ival);
|
|
|
|
static size_t
|
|
|
|
qp_makekey(dns_qpkey_t key, void *uctx, void *pval, uint32_t ival);
|
|
|
|
static void
|
|
|
|
qp_triename(void *uctx, char *buf, size_t size);
|
|
|
|
|
|
|
|
static dns_qpmethods_t qpmethods = {
|
|
|
|
qp_attach,
|
|
|
|
qp_detach,
|
|
|
|
qp_makekey,
|
|
|
|
qp_triename,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
qp_attach(void *uctx ISC_ATTR_UNUSED, void *pval,
|
|
|
|
uint32_t ival ISC_ATTR_UNUSED) {
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcnode_t *data = pval;
|
|
|
|
qpcnode_ref(data);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
qp_detach(void *uctx ISC_ATTR_UNUSED, void *pval,
|
|
|
|
uint32_t ival ISC_ATTR_UNUSED) {
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcnode_t *data = pval;
|
|
|
|
qpcnode_detach(&data);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static size_t
|
|
|
|
qp_makekey(dns_qpkey_t key, void *uctx ISC_ATTR_UNUSED, void *pval,
|
|
|
|
uint32_t ival ISC_ATTR_UNUSED) {
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcnode_t *data = pval;
|
2025-05-26 17:36:33 +02:00
|
|
|
return dns_qpkey_fromname(key, &data->name, data->nspace);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2025-02-03 00:06:48 +01:00
|
|
|
qp_triename(void *uctx ISC_ATTR_UNUSED, char *buf, size_t size) {
|
2024-01-09 16:18:57 +01:00
|
|
|
snprintf(buf, size, "qpdb-lite");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
rdatasetiter_destroy(dns_rdatasetiter_t **iteratorp DNS__DB_FLARG);
|
|
|
|
static isc_result_t
|
|
|
|
rdatasetiter_first(dns_rdatasetiter_t *iterator DNS__DB_FLARG);
|
|
|
|
static isc_result_t
|
|
|
|
rdatasetiter_next(dns_rdatasetiter_t *iterator DNS__DB_FLARG);
|
|
|
|
static void
|
|
|
|
rdatasetiter_current(dns_rdatasetiter_t *iterator,
|
|
|
|
dns_rdataset_t *rdataset DNS__DB_FLARG);
|
|
|
|
|
|
|
|
static dns_rdatasetitermethods_t rdatasetiter_methods = {
|
|
|
|
rdatasetiter_destroy, rdatasetiter_first, rdatasetiter_next,
|
|
|
|
rdatasetiter_current
|
|
|
|
};
|
|
|
|
|
2024-04-29 15:45:26 -07:00
|
|
|
typedef struct qpc_rditer {
|
2024-01-09 16:18:57 +01:00
|
|
|
dns_rdatasetiter_t common;
|
|
|
|
dns_slabheader_t *current;
|
2024-04-29 15:45:26 -07:00
|
|
|
} qpc_rditer_t;
|
2024-01-09 16:18:57 +01:00
|
|
|
|
|
|
|
static void
|
|
|
|
dbiterator_destroy(dns_dbiterator_t **iteratorp DNS__DB_FLARG);
|
|
|
|
static isc_result_t
|
|
|
|
dbiterator_first(dns_dbiterator_t *iterator DNS__DB_FLARG);
|
|
|
|
static isc_result_t
|
|
|
|
dbiterator_last(dns_dbiterator_t *iterator DNS__DB_FLARG);
|
|
|
|
static isc_result_t
|
|
|
|
dbiterator_seek(dns_dbiterator_t *iterator,
|
|
|
|
const dns_name_t *name DNS__DB_FLARG);
|
|
|
|
static isc_result_t
|
|
|
|
dbiterator_prev(dns_dbiterator_t *iterator DNS__DB_FLARG);
|
|
|
|
static isc_result_t
|
|
|
|
dbiterator_next(dns_dbiterator_t *iterator DNS__DB_FLARG);
|
|
|
|
static isc_result_t
|
|
|
|
dbiterator_current(dns_dbiterator_t *iterator, dns_dbnode_t **nodep,
|
|
|
|
dns_name_t *name DNS__DB_FLARG);
|
|
|
|
static isc_result_t
|
|
|
|
dbiterator_pause(dns_dbiterator_t *iterator);
|
|
|
|
static isc_result_t
|
|
|
|
dbiterator_origin(dns_dbiterator_t *iterator, dns_name_t *name);
|
|
|
|
|
|
|
|
static dns_dbiteratormethods_t dbiterator_methods = {
|
|
|
|
dbiterator_destroy, dbiterator_first, dbiterator_last,
|
|
|
|
dbiterator_seek, dbiterator_prev, dbiterator_next,
|
|
|
|
dbiterator_current, dbiterator_pause, dbiterator_origin
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
2024-03-12 22:19:47 -07:00
|
|
|
* Note that the QP cache database only needs a single QP iterator, because
|
|
|
|
* unlike the QP zone database, NSEC3 records are cached in the main tree.
|
|
|
|
*
|
|
|
|
* If we ever implement synth-from-dnssec using NSEC3 records, we'll need
|
|
|
|
* to have a separate tree for NSEC3 records, and to copy in the more complex
|
|
|
|
* iterator implementation from qpzone.c.
|
2024-01-09 16:18:57 +01:00
|
|
|
*/
|
2024-04-29 15:45:26 -07:00
|
|
|
typedef struct qpc_dbit {
|
2024-01-09 16:18:57 +01:00
|
|
|
dns_dbiterator_t common;
|
|
|
|
bool paused;
|
|
|
|
isc_rwlocktype_t tree_locked;
|
|
|
|
isc_result_t result;
|
2024-02-07 14:52:59 +01:00
|
|
|
dns_fixedname_t fixed;
|
|
|
|
dns_name_t *name;
|
2024-01-16 11:26:20 +01:00
|
|
|
dns_qpiter_t iter;
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcnode_t *node;
|
2024-04-29 15:45:26 -07:00
|
|
|
} qpc_dbit_t;
|
2024-01-09 16:18:57 +01:00
|
|
|
|
|
|
|
static void
|
2025-01-27 21:07:11 +01:00
|
|
|
qpcache__destroy(qpcache_t *qpdb);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-06 17:33:37 -08:00
|
|
|
static dns_dbmethods_t qpdb_cachemethods;
|
|
|
|
|
2025-02-23 14:36:35 +01:00
|
|
|
static void
|
|
|
|
cleanup_deadnodes_cb(void *arg);
|
|
|
|
|
2024-01-09 16:18:57 +01:00
|
|
|
/*%
|
2024-03-06 17:33:37 -08:00
|
|
|
* 'init_count' is used to initialize 'newheader->count' which in turn
|
2024-01-09 16:18:57 +01:00
|
|
|
* is used to determine where in the cycle rrset-order cyclic starts.
|
|
|
|
* We don't lock this as we don't care about simultaneous updates.
|
|
|
|
*/
|
|
|
|
static atomic_uint_fast16_t init_count = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Locking
|
|
|
|
*
|
|
|
|
* If a routine is going to lock more than one lock in this module, then
|
|
|
|
* the locking must be done in the following order:
|
|
|
|
*
|
|
|
|
* Tree Lock
|
|
|
|
*
|
|
|
|
* Node Lock (Only one from the set may be locked at one time by
|
|
|
|
* any caller)
|
|
|
|
*
|
|
|
|
* Database Lock
|
|
|
|
*
|
|
|
|
* Failure to follow this hierarchy can result in deadlock.
|
|
|
|
*/
|
|
|
|
|
2025-02-23 14:36:35 +01:00
|
|
|
/*
|
|
|
|
* Cache-eviction routines.
|
2024-01-09 16:18:57 +01:00
|
|
|
*/
|
|
|
|
|
2025-02-23 14:36:35 +01:00
|
|
|
static void
|
|
|
|
expireheader(dns_slabheader_t *header, isc_rwlocktype_t *nlocktypep,
|
|
|
|
isc_rwlocktype_t *tlocktypep, dns_expire_t reason DNS__DB_FLARG);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2025-02-23 14:36:35 +01:00
|
|
|
static size_t
|
|
|
|
rdataset_size(dns_slabheader_t *header) {
|
|
|
|
if (EXISTS(header)) {
|
|
|
|
return dns_rdataslab_size(header);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2025-02-23 14:36:35 +01:00
|
|
|
return sizeof(*header);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2024-03-06 17:33:37 -08:00
|
|
|
static void
|
2025-02-23 14:36:35 +01:00
|
|
|
expire_lru_headers(qpcache_t *qpdb, uint32_t idx, size_t requested,
|
|
|
|
isc_rwlocktype_t *nlocktypep,
|
|
|
|
isc_rwlocktype_t *tlocktypep DNS__DB_FLARG) {
|
|
|
|
size_t expired = 0;
|
|
|
|
|
|
|
|
do {
|
|
|
|
dns_slabheader_t *header =
|
|
|
|
ISC_SIEVE_NEXT(qpdb->buckets[idx].sieve, visited, link);
|
|
|
|
if (header == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ISC_SIEVE_UNLINK(qpdb->buckets[idx].sieve, header, link);
|
2024-03-06 17:33:37 -08:00
|
|
|
|
2025-02-23 14:36:35 +01:00
|
|
|
expired += rdataset_size(header);
|
|
|
|
|
|
|
|
expireheader(header, nlocktypep, tlocktypep,
|
|
|
|
dns_expire_lru DNS__DB_FLARG_PASS);
|
|
|
|
} while (expired < requested);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2025-02-02 19:37:21 +01:00
|
|
|
static void
|
2025-02-23 14:36:35 +01:00
|
|
|
qpcache_miss(qpcache_t *qpdb, dns_slabheader_t *newheader,
|
|
|
|
isc_rwlocktype_t *nlocktypep,
|
|
|
|
isc_rwlocktype_t *tlocktypep DNS__DB_FLARG) {
|
|
|
|
uint32_t idx = HEADERNODE(newheader)->locknum;
|
|
|
|
|
|
|
|
isc_heap_insert(qpdb->buckets[idx].heap, newheader);
|
|
|
|
newheader->heap = qpdb->buckets[idx].heap;
|
|
|
|
|
|
|
|
if (isc_mem_isovermem(qpdb->common.mctx)) {
|
|
|
|
/*
|
|
|
|
* Maximum estimated size of the data being added: The size
|
|
|
|
* of the rdataset, plus a new QP database node and nodename,
|
|
|
|
* and a possible additional NSEC node and nodename. Also add
|
|
|
|
* a 12k margin for a possible QP-trie chunk allocation.
|
|
|
|
* (It's okay to overestimate, we want to get cache memory
|
|
|
|
* down quickly.)
|
|
|
|
*/
|
|
|
|
|
|
|
|
size_t purgesize =
|
|
|
|
2 * (sizeof(qpcnode_t) +
|
|
|
|
dns_name_size(&HEADERNODE(newheader)->name)) +
|
2025-03-09 09:13:16 +01:00
|
|
|
rdataset_size(newheader) + QP_SAFETY_MARGIN;
|
2025-02-23 14:36:35 +01:00
|
|
|
|
|
|
|
expire_lru_headers(qpdb, idx, purgesize, nlocktypep,
|
|
|
|
tlocktypep DNS__DB_FLARG_PASS);
|
2025-02-02 19:37:21 +01:00
|
|
|
}
|
2025-02-23 14:36:35 +01:00
|
|
|
|
|
|
|
ISC_SIEVE_INSERT(qpdb->buckets[idx].sieve, newheader, link);
|
2025-02-02 19:37:21 +01:00
|
|
|
}
|
|
|
|
|
2025-02-23 14:36:35 +01:00
|
|
|
static void
|
|
|
|
qpcache_hit(qpcache_t *qpdb ISC_ATTR_UNUSED, dns_slabheader_t *header) {
|
|
|
|
/*
|
|
|
|
* On cache hit, we only mark the header as seen.
|
|
|
|
*/
|
|
|
|
ISC_SIEVE_MARK(header, visited);
|
|
|
|
}
|
2024-03-06 17:33:37 -08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* DB Routines
|
|
|
|
*/
|
|
|
|
|
2024-01-09 16:18:57 +01:00
|
|
|
static void
|
2024-03-06 17:33:37 -08:00
|
|
|
clean_stale_headers(dns_slabheader_t *top) {
|
|
|
|
dns_slabheader_t *d = NULL, *down_next = NULL;
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-06 17:33:37 -08:00
|
|
|
for (d = top->down; d != NULL; d = down_next) {
|
|
|
|
down_next = d->down;
|
|
|
|
dns_slabheader_destroy(&d);
|
|
|
|
}
|
|
|
|
top->down = NULL;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2024-04-29 15:29:33 -07:00
|
|
|
clean_cache_node(qpcache_t *qpdb, qpcnode_t *node) {
|
2024-03-06 17:33:37 -08:00
|
|
|
dns_slabheader_t *current = NULL, *top_prev = NULL, *top_next = NULL;
|
2024-01-09 16:18:57 +01:00
|
|
|
|
|
|
|
/*
|
2024-03-06 17:33:37 -08:00
|
|
|
* Caller must be holding the node lock.
|
2024-01-09 16:18:57 +01:00
|
|
|
*/
|
|
|
|
|
2024-03-06 17:33:37 -08:00
|
|
|
for (current = node->data; current != NULL; current = top_next) {
|
|
|
|
top_next = current->next;
|
|
|
|
clean_stale_headers(current);
|
2024-01-09 16:18:57 +01:00
|
|
|
/*
|
2024-03-06 17:33:37 -08:00
|
|
|
* If current is nonexistent, ancient, or stale and
|
|
|
|
* we are not keeping stale, we can clean it up.
|
2024-01-09 16:18:57 +01:00
|
|
|
*/
|
2025-01-22 23:08:04 -08:00
|
|
|
if (!EXISTS(current) || ANCIENT(current) ||
|
2024-03-06 17:33:37 -08:00
|
|
|
(STALE(current) && !KEEPSTALE(qpdb)))
|
|
|
|
{
|
|
|
|
if (top_prev != NULL) {
|
|
|
|
top_prev->next = current->next;
|
|
|
|
} else {
|
|
|
|
node->data = current->next;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
2024-03-06 17:33:37 -08:00
|
|
|
dns_slabheader_destroy(¤t);
|
|
|
|
} else {
|
|
|
|
top_prev = current;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
}
|
2024-03-06 17:33:37 -08:00
|
|
|
node->dirty = 0;
|
|
|
|
}
|
2024-01-09 16:18:57 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* tree_lock(write) must be held.
|
|
|
|
*/
|
|
|
|
static void
|
2024-04-29 15:29:33 -07:00
|
|
|
delete_node(qpcache_t *qpdb, qpcnode_t *node) {
|
2024-01-09 16:18:57 +01:00
|
|
|
isc_result_t result = ISC_R_UNEXPECTED;
|
|
|
|
|
2025-06-19 14:49:53 +02:00
|
|
|
if (isc_log_wouldlog(ISC_LOG_DEBUG(DNS_QPCACHE_LOG_STATS_LEVEL))) {
|
2024-01-09 16:18:57 +01:00
|
|
|
char printname[DNS_NAME_FORMATSIZE];
|
2024-03-11 18:53:49 -07:00
|
|
|
dns_name_format(&node->name, printname, sizeof(printname));
|
2024-08-13 18:20:26 +02:00
|
|
|
isc_log_write(DNS_LOGCATEGORY_DATABASE, DNS_LOGMODULE_CACHE,
|
2025-06-19 14:49:53 +02:00
|
|
|
ISC_LOG_DEBUG(DNS_QPCACHE_LOG_STATS_LEVEL),
|
2024-01-09 16:18:57 +01:00
|
|
|
"delete_node(): %p %s (bucket %d)", node,
|
2024-01-16 11:41:34 +01:00
|
|
|
printname, node->locknum);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2025-05-26 17:36:33 +02:00
|
|
|
switch (node->nspace) {
|
2025-07-07 11:29:45 +02:00
|
|
|
case DNS_DBNAMESPACE_NORMAL:
|
Prepend qpkey with denial byte
In preparation to merge the three qp tries (tree, nsec, nsec3) into
one, add the piece of information into the qpkey. This is the most
significant bit of information, so prepend the denial type to the qpkey.
This means we need to pass on the denial type when constructing the
qpkey from a name, or doing a lookup.
Reuse the the DNS_DB_NSEC_* values. Most qp tries in the code we just
pass on 0 (nta, rpz, zt, etc.), because there is no need for denial of
existence, but for qpzone and qpcache we must pass the right value.
Change the code, so that node->nsec no longer can have the value
DNS_DB_NSEC_HAS_NSEC, instead track this in a new attribute 'havensec'.
Since we use node->nsec to convert names to keys, the value MUST be set
before inserting the node into the qp-trie.
Update the fuzzing and unit tests accordingly. This only adds a few
extra test cases, more are needed.
In the qp_test.c we can remove test code for empty keys as this is
no longer possible.
2025-04-25 17:21:16 +02:00
|
|
|
if (node->havensec) {
|
|
|
|
/*
|
|
|
|
* Delete the corresponding node from the auxiliary NSEC
|
|
|
|
* tree before deleting from the main tree.
|
|
|
|
*/
|
|
|
|
result = dns_qp_deletename(qpdb->nsec, &node->name,
|
2025-07-07 11:29:45 +02:00
|
|
|
DNS_DBNAMESPACE_NSEC, NULL,
|
Prepend qpkey with denial byte
In preparation to merge the three qp tries (tree, nsec, nsec3) into
one, add the piece of information into the qpkey. This is the most
significant bit of information, so prepend the denial type to the qpkey.
This means we need to pass on the denial type when constructing the
qpkey from a name, or doing a lookup.
Reuse the the DNS_DB_NSEC_* values. Most qp tries in the code we just
pass on 0 (nta, rpz, zt, etc.), because there is no need for denial of
existence, but for qpzone and qpcache we must pass the right value.
Change the code, so that node->nsec no longer can have the value
DNS_DB_NSEC_HAS_NSEC, instead track this in a new attribute 'havensec'.
Since we use node->nsec to convert names to keys, the value MUST be set
before inserting the node into the qp-trie.
Update the fuzzing and unit tests accordingly. This only adds a few
extra test cases, more are needed.
In the qp_test.c we can remove test code for empty keys as this is
no longer possible.
2025-04-25 17:21:16 +02:00
|
|
|
NULL);
|
|
|
|
if (result != ISC_R_SUCCESS) {
|
|
|
|
isc_log_write(DNS_LOGCATEGORY_DATABASE,
|
|
|
|
DNS_LOGMODULE_CACHE,
|
|
|
|
ISC_LOG_WARNING,
|
|
|
|
"delete_node(): "
|
|
|
|
"dns_qp_deletename: %s",
|
|
|
|
isc_result_totext(result));
|
|
|
|
}
|
|
|
|
}
|
2025-05-26 17:36:33 +02:00
|
|
|
result = dns_qp_deletename(qpdb->tree, &node->name,
|
|
|
|
node->nspace, NULL, NULL);
|
2024-01-09 16:18:57 +01:00
|
|
|
break;
|
2025-07-07 11:29:45 +02:00
|
|
|
case DNS_DBNAMESPACE_NSEC:
|
2025-05-26 17:36:33 +02:00
|
|
|
result = dns_qp_deletename(qpdb->nsec, &node->name,
|
|
|
|
node->nspace, NULL, NULL);
|
2024-01-09 16:18:57 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (result != ISC_R_SUCCESS) {
|
2024-08-13 18:20:26 +02:00
|
|
|
isc_log_write(DNS_LOGCATEGORY_DATABASE, DNS_LOGMODULE_CACHE,
|
|
|
|
ISC_LOG_WARNING,
|
2024-01-09 16:18:57 +01:00
|
|
|
"delete_node(): "
|
2024-01-11 12:33:45 +01:00
|
|
|
"dns_qp_deletename: %s",
|
2024-01-09 16:18:57 +01:00
|
|
|
isc_result_totext(result));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2024-03-26 14:13:24 +01:00
|
|
|
* The caller must specify its currect node and tree lock status.
|
|
|
|
* It's okay for neither lock to be held if there are existing external
|
|
|
|
* references to the node, but if this is the first external reference,
|
|
|
|
* then the caller must be holding at least one lock.
|
2025-01-30 14:42:57 -08:00
|
|
|
*
|
|
|
|
* If incrementing erefs from zero, we also increment the node use counter
|
|
|
|
* in the qpcache object.
|
|
|
|
*
|
|
|
|
* This function is called from qpcnode_acquire(), so that internal
|
|
|
|
* and external references are acquired at the same time, and from
|
|
|
|
* qpcnode_release() when we only need to increase the internal references.
|
2024-01-09 16:18:57 +01:00
|
|
|
*/
|
2024-03-06 17:33:37 -08:00
|
|
|
static void
|
2025-01-30 14:42:57 -08:00
|
|
|
qpcnode_erefs_increment(qpcache_t *qpdb, qpcnode_t *node,
|
|
|
|
isc_rwlocktype_t nlocktype,
|
|
|
|
isc_rwlocktype_t tlocktype DNS__DB_FLARG) {
|
2025-01-17 16:54:19 -08:00
|
|
|
uint_fast32_t refs = isc_refcount_increment0(&node->erefs);
|
Improve node reference counting
QP database node data is not reference counted the same way RBT nodes
were: in the RBT, node->references could be zero if the node was in the
tree but was not in use by any caller, whereas in the QP trie, the
database itself uses reference counting of nodes internally.
this caused some subtle errors. in RBTDB, when the newref() function is
called and the node reference count was zero, the node lock reference
counter would also be incremented. in the QP trie, this can never
happen - because as long as the node is in the database its reference
count cannot be zero - and so the node lock reference counter was never
incremented.
reference counting will probably need to be refactored in more detail
later; the node lock reference count may not be needed at all. but
for now, as a temporary measure, we add a third reference counter,
'erefs' (external references), to the dns_qpdata structure. this is
counted separately from the main reference counter, and should match
the node reference count as it would have been in RBTDB.
this change revealed a number of places where the node reference counter
was being incremented on behalf of a caller without newref() being
called; those were cleaned up as well.
This is an adaptation of commit 3dd686261d2c4bcd15a96ebfea10baffa277732b
2024-01-25 10:19:00 +01:00
|
|
|
|
2024-01-09 16:18:57 +01:00
|
|
|
#if DNS_DB_NODETRACE
|
Improve node reference counting
QP database node data is not reference counted the same way RBT nodes
were: in the RBT, node->references could be zero if the node was in the
tree but was not in use by any caller, whereas in the QP trie, the
database itself uses reference counting of nodes internally.
this caused some subtle errors. in RBTDB, when the newref() function is
called and the node reference count was zero, the node lock reference
counter would also be incremented. in the QP trie, this can never
happen - because as long as the node is in the database its reference
count cannot be zero - and so the node lock reference counter was never
incremented.
reference counting will probably need to be refactored in more detail
later; the node lock reference count may not be needed at all. but
for now, as a temporary measure, we add a third reference counter,
'erefs' (external references), to the dns_qpdata structure. this is
counted separately from the main reference counter, and should match
the node reference count as it would have been in RBTDB.
this change revealed a number of places where the node reference counter
was being incremented on behalf of a caller without newref() being
called; those were cleaned up as well.
This is an adaptation of commit 3dd686261d2c4bcd15a96ebfea10baffa277732b
2024-01-25 10:19:00 +01:00
|
|
|
fprintf(stderr, "incr:node:%s:%s:%u:%p->erefs = %" PRIuFAST32 "\n",
|
2024-01-09 16:18:57 +01:00
|
|
|
func, file, line, node, refs + 1);
|
|
|
|
#endif
|
|
|
|
|
2025-01-27 18:06:17 +01:00
|
|
|
if (refs > 0) {
|
|
|
|
return;
|
|
|
|
}
|
2024-03-26 14:13:24 +01:00
|
|
|
|
2025-01-27 18:06:17 +01:00
|
|
|
/*
|
|
|
|
* this is the first external reference to the node.
|
|
|
|
*
|
|
|
|
* we need to hold the node or tree lock to avoid
|
|
|
|
* incrementing the reference count while also deleting
|
|
|
|
* the node. delete_node() is always protected by both
|
|
|
|
* tree and node locks being write-locked.
|
|
|
|
*/
|
|
|
|
INSIST(nlocktype != isc_rwlocktype_none ||
|
|
|
|
tlocktype != isc_rwlocktype_none);
|
|
|
|
|
2025-01-27 21:07:11 +01:00
|
|
|
qpcache_ref(qpdb);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2025-01-17 16:54:19 -08:00
|
|
|
static void
|
2025-01-30 14:42:57 -08:00
|
|
|
qpcnode_acquire(qpcache_t *qpdb, qpcnode_t *node, isc_rwlocktype_t nlocktype,
|
|
|
|
isc_rwlocktype_t tlocktype DNS__DB_FLARG) {
|
2025-01-17 16:54:19 -08:00
|
|
|
qpcnode_ref(node);
|
2025-01-30 14:42:57 -08:00
|
|
|
qpcnode_erefs_increment(qpdb, node, nlocktype,
|
|
|
|
tlocktype DNS__DB_FLARG_PASS);
|
2025-01-17 16:54:19 -08:00
|
|
|
}
|
|
|
|
|
2025-01-30 14:42:57 -08:00
|
|
|
/*
|
|
|
|
* Decrement the external references to a node. If the counter
|
|
|
|
* goes to zero, decrement the node use counter in the qpcache object
|
|
|
|
* as well, and return true. Otherwise return false.
|
|
|
|
*/
|
2025-01-27 18:06:17 +01:00
|
|
|
static bool
|
2025-01-30 14:42:57 -08:00
|
|
|
qpcnode_erefs_decrement(qpcache_t *qpdb, qpcnode_t *node DNS__DB_FLARG) {
|
2025-01-27 18:06:17 +01:00
|
|
|
uint_fast32_t refs = isc_refcount_decrement(&node->erefs);
|
|
|
|
|
|
|
|
#if DNS_DB_NODETRACE
|
|
|
|
fprintf(stderr, "decr:node:%s:%s:%u:%p->erefs = %" PRIuFAST32 "\n",
|
|
|
|
func, file, line, node, refs - 1);
|
|
|
|
#endif
|
|
|
|
if (refs > 1) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2025-01-27 21:07:11 +01:00
|
|
|
qpcache_unref(qpdb);
|
2025-01-27 18:06:17 +01:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2024-03-06 17:33:37 -08:00
|
|
|
/*
|
2025-01-30 14:42:57 -08:00
|
|
|
* Caller must be holding a node lock, either read or write.
|
|
|
|
*
|
2024-03-06 17:33:37 -08:00
|
|
|
* Note that the lock must be held even when node references are
|
|
|
|
* atomically modified; in that case the decrement operation itself does not
|
|
|
|
* have to be protected, but we must avoid a race condition where multiple
|
|
|
|
* threads are decreasing the reference to zero simultaneously and at least
|
|
|
|
* one of them is going to free the node.
|
2024-01-09 16:18:57 +01:00
|
|
|
*
|
2025-01-30 14:42:57 -08:00
|
|
|
* This calls dec_erefs() to decrement the external node reference counter,
|
|
|
|
* (and possibly the node use counter), cleans up and deletes the node
|
|
|
|
* if necessary, then decrements the internal reference counter as well.
|
2024-01-09 16:18:57 +01:00
|
|
|
*/
|
2025-01-27 18:06:17 +01:00
|
|
|
static void
|
2025-01-30 14:42:57 -08:00
|
|
|
qpcnode_release(qpcache_t *qpdb, qpcnode_t *node, isc_rwlocktype_t *nlocktypep,
|
2025-03-21 03:06:16 +01:00
|
|
|
isc_rwlocktype_t *tlocktypep DNS__DB_FLARG) {
|
2025-01-30 14:42:57 -08:00
|
|
|
REQUIRE(*nlocktypep != isc_rwlocktype_none);
|
|
|
|
|
|
|
|
if (!qpcnode_erefs_decrement(qpdb, node DNS__DB_FLARG_PASS)) {
|
2025-01-27 18:06:17 +01:00
|
|
|
goto unref;
|
2025-01-17 16:54:19 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Handle easy and typical case first. */
|
2025-01-27 18:13:38 +01:00
|
|
|
if (!node->dirty && node->data != NULL) {
|
2025-01-27 18:06:17 +01:00
|
|
|
goto unref;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (*nlocktypep == isc_rwlocktype_read) {
|
2025-01-30 14:42:57 -08:00
|
|
|
/*
|
|
|
|
* The external reference count went to zero and the node
|
|
|
|
* is dirty or has no data, so we might want to delete it.
|
|
|
|
* To do that, we'll need a write lock. If we don't already
|
|
|
|
* have one, we have to make sure nobody else has
|
|
|
|
* acquired a reference in the meantime, so we increment
|
|
|
|
* erefs (but NOT references!), upgrade the node lock,
|
|
|
|
* decrement erefs again, and see if it's still zero.
|
2025-02-24 15:55:18 +01:00
|
|
|
*
|
|
|
|
* We can't really assume anything about the result code of
|
|
|
|
* erefs_increment. If another thread acquires reference it
|
|
|
|
* will be larger than 0, if it doesn't it is going to be 0.
|
2025-01-30 14:42:57 -08:00
|
|
|
*/
|
2025-02-03 13:36:27 +01:00
|
|
|
isc_rwlock_t *nlock = &qpdb->buckets[node->locknum].lock;
|
2025-01-30 14:42:57 -08:00
|
|
|
qpcnode_erefs_increment(qpdb, node, *nlocktypep,
|
|
|
|
*tlocktypep DNS__DB_FLARG_PASS);
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_FORCEUPGRADE(nlock, nlocktypep);
|
2025-01-30 14:42:57 -08:00
|
|
|
if (!qpcnode_erefs_decrement(qpdb, node DNS__DB_FLARG_PASS)) {
|
|
|
|
goto unref;
|
|
|
|
}
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2024-03-06 17:33:37 -08:00
|
|
|
if (node->dirty) {
|
|
|
|
clean_cache_node(qpdb, node);
|
|
|
|
}
|
|
|
|
|
2025-01-27 18:13:38 +01:00
|
|
|
if (node->data != NULL) {
|
2025-03-21 03:06:16 +01:00
|
|
|
goto unref;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
2025-03-21 03:06:16 +01:00
|
|
|
if (*tlocktypep == isc_rwlocktype_write) {
|
2024-03-06 17:33:37 -08:00
|
|
|
/*
|
2025-03-21 03:06:16 +01:00
|
|
|
* We can delete the node if we have the tree write lock.
|
2024-03-06 17:33:37 -08:00
|
|
|
*/
|
|
|
|
delete_node(qpdb, node);
|
|
|
|
} else {
|
2025-03-21 03:06:16 +01:00
|
|
|
/*
|
|
|
|
* If we don't have the tree lock, we will add this node to a
|
|
|
|
* linked list of nodes in this locking bucket which we will
|
|
|
|
* free later.
|
|
|
|
*/
|
2025-01-30 14:42:57 -08:00
|
|
|
qpcnode_acquire(qpdb, node, *nlocktypep,
|
|
|
|
*tlocktypep DNS__DB_FLARG_PASS);
|
2024-03-25 12:17:42 +01:00
|
|
|
|
|
|
|
isc_queue_node_init(&node->deadlink);
|
2025-02-03 13:36:27 +01:00
|
|
|
if (!isc_queue_enqueue_entry(
|
|
|
|
&qpdb->buckets[node->locknum].deadnodes, node,
|
|
|
|
deadlink))
|
2024-03-25 12:17:42 +01:00
|
|
|
{
|
|
|
|
/* Queue was empty, trigger new cleaning */
|
2025-07-14 10:50:21 +02:00
|
|
|
isc_loop_t *loop = isc_loop_get(node->locknum);
|
2024-03-25 12:17:42 +01:00
|
|
|
|
2025-02-23 14:36:35 +01:00
|
|
|
qpcache_ref(qpdb);
|
|
|
|
isc_async_run(loop, cleanup_deadnodes_cb, qpdb);
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2025-01-27 18:06:17 +01:00
|
|
|
unref:
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcnode_unref(node);
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2025-08-06 19:34:35 +02:00
|
|
|
update_rrsetstats(dns_stats_t *stats, const dns_typepair_t typepair,
|
2024-03-06 17:33:37 -08:00
|
|
|
const uint_least16_t hattributes, const bool increment) {
|
|
|
|
dns_rdatastatstype_t statattributes = 0;
|
|
|
|
dns_rdatastatstype_t base = 0;
|
|
|
|
dns_rdatastatstype_t type;
|
|
|
|
dns_slabheader_t *header = &(dns_slabheader_t){
|
2025-08-06 19:34:35 +02:00
|
|
|
.typepair = typepair,
|
2024-03-06 17:33:37 -08:00
|
|
|
.attributes = hattributes,
|
|
|
|
};
|
|
|
|
|
|
|
|
if (!EXISTS(header) || !STATCOUNT(header)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (NEGATIVE(header)) {
|
|
|
|
if (NXDOMAIN(header)) {
|
|
|
|
statattributes = DNS_RDATASTATSTYPE_ATTR_NXDOMAIN;
|
|
|
|
} else {
|
|
|
|
statattributes = DNS_RDATASTATSTYPE_ATTR_NXRRSET;
|
2025-08-06 19:34:35 +02:00
|
|
|
base = DNS_TYPEPAIR_COVERS(header->typepair);
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
} else {
|
2025-08-06 19:34:35 +02:00
|
|
|
base = DNS_TYPEPAIR_TYPE(header->typepair);
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (STALE(header)) {
|
|
|
|
statattributes |= DNS_RDATASTATSTYPE_ATTR_STALE;
|
|
|
|
}
|
|
|
|
if (ANCIENT(header)) {
|
|
|
|
statattributes |= DNS_RDATASTATSTYPE_ATTR_ANCIENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
type = DNS_RDATASTATSTYPE_VALUE(base, statattributes);
|
|
|
|
if (increment) {
|
|
|
|
dns_rdatasetstats_increment(stats, type);
|
|
|
|
} else {
|
|
|
|
dns_rdatasetstats_decrement(stats, type);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
mark(dns_slabheader_t *header, uint_least16_t flag) {
|
|
|
|
uint_least16_t attributes = atomic_load_acquire(&header->attributes);
|
|
|
|
uint_least16_t newattributes = 0;
|
|
|
|
dns_stats_t *stats = NULL;
|
|
|
|
|
Decouple database and node lifetimes by adding node-specific vtables
All databases in the codebase follow the same structure: a database is
an associative container from DNS names to nodes, and each node is an
associative container from RR types to RR data.
Each database implementation (qpzone, qpcache, sdlz, builtin, dyndb) has
its own corresponding node type (qpznode, qpcnode, etc). However, some
code needs to work with nodes generically regardless of their specific
type - for example, to acquire locks, manage references, or
register/unregister slabs from the heap.
Currently, these generic node operations are implemented as methods in
the database vtable, which creates problematic coupling between database
and node lifetimes. If a node outlives its parent database, the node
destructor will destroy all RR data, and each RR data destructor will
try to unregister from heaps by calling a virtual function from the
database vtable. Since the database was already freed, this causes a
crash.
This commit breaks the coupling by standardizing the layout of all
database nodes, adding a dedicated vtable for node operations, and
moving node-specific methods from the database vtable to the node
vtable.
2025-06-05 11:51:29 +02:00
|
|
|
qpcache_t *qpdb = HEADERNODE(header)->qpdb;
|
2024-03-06 17:33:37 -08:00
|
|
|
/*
|
|
|
|
* If we are already ancient there is nothing to do.
|
|
|
|
*/
|
|
|
|
do {
|
|
|
|
if ((attributes & flag) != 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
newattributes = attributes | flag;
|
|
|
|
} while (!atomic_compare_exchange_weak_acq_rel(
|
|
|
|
&header->attributes, &attributes, newattributes));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Decrement and increment the stats counter for the appropriate
|
|
|
|
* RRtype.
|
|
|
|
*/
|
Decouple database and node lifetimes by adding node-specific vtables
All databases in the codebase follow the same structure: a database is
an associative container from DNS names to nodes, and each node is an
associative container from RR types to RR data.
Each database implementation (qpzone, qpcache, sdlz, builtin, dyndb) has
its own corresponding node type (qpznode, qpcnode, etc). However, some
code needs to work with nodes generically regardless of their specific
type - for example, to acquire locks, manage references, or
register/unregister slabs from the heap.
Currently, these generic node operations are implemented as methods in
the database vtable, which creates problematic coupling between database
and node lifetimes. If a node outlives its parent database, the node
destructor will destroy all RR data, and each RR data destructor will
try to unregister from heaps by calling a virtual function from the
database vtable. Since the database was already freed, this causes a
crash.
This commit breaks the coupling by standardizing the layout of all
database nodes, adding a dedicated vtable for node operations, and
moving node-specific methods from the database vtable to the node
vtable.
2025-06-05 11:51:29 +02:00
|
|
|
stats = dns_db_getrrsetstats(&qpdb->common);
|
2024-03-06 17:33:37 -08:00
|
|
|
if (stats != NULL) {
|
2025-08-06 19:34:35 +02:00
|
|
|
update_rrsetstats(stats, header->typepair, attributes, false);
|
|
|
|
update_rrsetstats(stats, header->typepair, newattributes, true);
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2025-02-02 13:31:36 +01:00
|
|
|
setttl(dns_slabheader_t *header, isc_stdtime_t newts) {
|
|
|
|
isc_stdtime_t oldts = header->expire;
|
2024-03-06 17:33:37 -08:00
|
|
|
|
2025-02-02 13:31:36 +01:00
|
|
|
header->expire = newts;
|
2024-03-06 17:33:37 -08:00
|
|
|
|
2025-02-02 13:31:36 +01:00
|
|
|
if (header->heap == NULL || header->heap_index == 0 || newts == oldts) {
|
2024-03-06 17:33:37 -08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2025-02-02 13:31:36 +01:00
|
|
|
if (newts < oldts) {
|
2024-03-06 17:33:37 -08:00
|
|
|
isc_heap_increased(header->heap, header->heap_index);
|
|
|
|
} else {
|
|
|
|
isc_heap_decreased(header->heap, header->heap_index);
|
|
|
|
}
|
|
|
|
|
2025-02-02 13:31:36 +01:00
|
|
|
if (newts == 0) {
|
2024-03-06 17:33:37 -08:00
|
|
|
isc_heap_delete(header->heap, header->heap_index);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2025-02-02 13:56:37 +01:00
|
|
|
static void
|
|
|
|
mark_ancient(dns_slabheader_t *header) {
|
|
|
|
setttl(header, 0);
|
|
|
|
mark(header, DNS_SLABHEADERATTR_ANCIENT);
|
|
|
|
HEADERNODE(header)->dirty = 1;
|
|
|
|
}
|
|
|
|
|
2024-03-06 17:33:37 -08:00
|
|
|
/*
|
|
|
|
* Caller must hold the node (write) lock.
|
|
|
|
*/
|
|
|
|
static void
|
2024-03-26 14:13:24 +01:00
|
|
|
expireheader(dns_slabheader_t *header, isc_rwlocktype_t *nlocktypep,
|
|
|
|
isc_rwlocktype_t *tlocktypep, dns_expire_t reason DNS__DB_FLARG) {
|
2025-02-02 13:56:37 +01:00
|
|
|
mark_ancient(header);
|
2024-03-06 17:33:37 -08:00
|
|
|
|
2024-04-29 15:54:37 -07:00
|
|
|
if (isc_refcount_current(&HEADERNODE(header)->erefs) == 0) {
|
Decouple database and node lifetimes by adding node-specific vtables
All databases in the codebase follow the same structure: a database is
an associative container from DNS names to nodes, and each node is an
associative container from RR types to RR data.
Each database implementation (qpzone, qpcache, sdlz, builtin, dyndb) has
its own corresponding node type (qpznode, qpcnode, etc). However, some
code needs to work with nodes generically regardless of their specific
type - for example, to acquire locks, manage references, or
register/unregister slabs from the heap.
Currently, these generic node operations are implemented as methods in
the database vtable, which creates problematic coupling between database
and node lifetimes. If a node outlives its parent database, the node
destructor will destroy all RR data, and each RR data destructor will
try to unregister from heaps by calling a virtual function from the
database vtable. Since the database was already freed, this causes a
crash.
This commit breaks the coupling by standardizing the layout of all
database nodes, adding a dedicated vtable for node operations, and
moving node-specific methods from the database vtable to the node
vtable.
2025-06-05 11:51:29 +02:00
|
|
|
qpcache_t *qpdb = HEADERNODE(header)->qpdb;
|
2024-03-06 17:33:37 -08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If no one else is using the node, we can clean it up now.
|
|
|
|
* We first need to gain a new reference to the node to meet a
|
2025-01-30 14:42:57 -08:00
|
|
|
* requirement of qpcnode_release().
|
2024-03-06 17:33:37 -08:00
|
|
|
*/
|
2025-01-30 14:42:57 -08:00
|
|
|
qpcnode_acquire(qpdb, HEADERNODE(header), *nlocktypep,
|
|
|
|
*tlocktypep DNS__DB_FLARG_PASS);
|
|
|
|
qpcnode_release(qpdb, HEADERNODE(header), nlocktypep,
|
2025-03-21 03:06:16 +01:00
|
|
|
tlocktypep DNS__DB_FLARG_PASS);
|
2024-03-06 17:33:37 -08:00
|
|
|
|
|
|
|
if (qpdb->cachestats == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (reason) {
|
|
|
|
case dns_expire_ttl:
|
|
|
|
isc_stats_increment(qpdb->cachestats,
|
|
|
|
dns_cachestatscounter_deletettl);
|
|
|
|
break;
|
|
|
|
case dns_expire_lru:
|
|
|
|
isc_stats_increment(qpdb->cachestats,
|
|
|
|
dns_cachestatscounter_deletelru);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2024-04-29 15:29:33 -07:00
|
|
|
update_cachestats(qpcache_t *qpdb, isc_result_t result) {
|
2024-03-06 17:33:37 -08:00
|
|
|
if (qpdb->cachestats == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (result) {
|
|
|
|
case DNS_R_COVERINGNSEC:
|
|
|
|
isc_stats_increment(qpdb->cachestats,
|
|
|
|
dns_cachestatscounter_coveringnsec);
|
|
|
|
FALLTHROUGH;
|
|
|
|
case ISC_R_SUCCESS:
|
|
|
|
case DNS_R_CNAME:
|
|
|
|
case DNS_R_DNAME:
|
|
|
|
case DNS_R_DELEGATION:
|
|
|
|
case DNS_R_NCACHENXDOMAIN:
|
|
|
|
case DNS_R_NCACHENXRRSET:
|
|
|
|
isc_stats_increment(qpdb->cachestats,
|
|
|
|
dns_cachestatscounter_hits);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
isc_stats_increment(qpdb->cachestats,
|
|
|
|
dns_cachestatscounter_misses);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2024-04-29 15:29:33 -07:00
|
|
|
bindrdataset(qpcache_t *qpdb, qpcnode_t *node, dns_slabheader_t *header,
|
2024-03-26 14:13:24 +01:00
|
|
|
isc_stdtime_t now, isc_rwlocktype_t nlocktype,
|
|
|
|
isc_rwlocktype_t tlocktype,
|
2024-03-06 17:33:37 -08:00
|
|
|
dns_rdataset_t *rdataset DNS__DB_FLARG) {
|
|
|
|
bool stale = STALE(header);
|
|
|
|
bool ancient = ANCIENT(header);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Caller must be holding the node reader lock.
|
|
|
|
* XXXJT: technically, we need a writer lock, since we'll increment
|
|
|
|
* the header count below. However, since the actual counter value
|
|
|
|
* doesn't matter, we prioritize performance here. (We may want to
|
|
|
|
* use atomic increment when available).
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (rdataset == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2025-01-30 14:42:57 -08:00
|
|
|
qpcnode_acquire(qpdb, node, nlocktype, tlocktype DNS__DB_FLARG_PASS);
|
2024-03-06 17:33:37 -08:00
|
|
|
|
|
|
|
INSIST(rdataset->methods == NULL); /* We must be disassociated. */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Mark header stale or ancient if the RRset is no longer active.
|
|
|
|
*/
|
|
|
|
if (!ACTIVE(header, now)) {
|
2025-02-02 13:31:36 +01:00
|
|
|
dns_ttl_t stale_ttl = header->expire + STALE_TTL(header, qpdb);
|
2024-03-06 17:33:37 -08:00
|
|
|
/*
|
|
|
|
* If this data is in the stale window keep it and if
|
|
|
|
* DNS_DBFIND_STALEOK is not set we tell the caller to
|
|
|
|
* skip this record. We skip the records with ZEROTTL
|
|
|
|
* (these records should not be cached anyway).
|
|
|
|
*/
|
|
|
|
|
2025-02-02 13:38:04 +01:00
|
|
|
if (!ZEROTTL(header) && KEEPSTALE(qpdb) && stale_ttl > now) {
|
2024-03-06 17:33:37 -08:00
|
|
|
stale = true;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* We are not keeping stale, or it is outside the
|
|
|
|
* stale window. Mark ancient, i.e. ready for cleanup.
|
|
|
|
*/
|
|
|
|
ancient = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
rdataset->methods = &dns_rdataslab_rdatasetmethods;
|
|
|
|
rdataset->rdclass = qpdb->common.rdclass;
|
2025-08-06 19:34:35 +02:00
|
|
|
rdataset->type = DNS_TYPEPAIR_TYPE(header->typepair);
|
|
|
|
rdataset->covers = DNS_TYPEPAIR_COVERS(header->typepair);
|
2025-02-02 13:38:04 +01:00
|
|
|
rdataset->ttl = !ZEROTTL(header) ? header->expire - now : 0;
|
2024-03-06 17:33:37 -08:00
|
|
|
rdataset->trust = header->trust;
|
|
|
|
rdataset->resign = 0;
|
|
|
|
|
|
|
|
if (NEGATIVE(header)) {
|
2025-07-09 16:56:22 +02:00
|
|
|
rdataset->attributes.negative = true;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
if (NXDOMAIN(header)) {
|
2025-07-09 16:56:22 +02:00
|
|
|
rdataset->attributes.nxdomain = true;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
if (OPTOUT(header)) {
|
2025-07-09 16:56:22 +02:00
|
|
|
rdataset->attributes.optout = true;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
if (PREFETCH(header)) {
|
2025-07-09 16:56:22 +02:00
|
|
|
rdataset->attributes.prefetch = true;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (stale && !ancient) {
|
2025-02-02 13:31:36 +01:00
|
|
|
dns_ttl_t stale_ttl = header->expire + STALE_TTL(header, qpdb);
|
2024-03-06 17:33:37 -08:00
|
|
|
if (stale_ttl > now) {
|
|
|
|
rdataset->ttl = stale_ttl - now;
|
|
|
|
} else {
|
|
|
|
rdataset->ttl = 0;
|
|
|
|
}
|
|
|
|
if (STALE_WINDOW(header)) {
|
2025-07-09 16:56:22 +02:00
|
|
|
rdataset->attributes.stale_window = true;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
2025-07-09 16:56:22 +02:00
|
|
|
rdataset->attributes.stale = true;
|
2025-02-03 11:15:15 +01:00
|
|
|
rdataset->expire = header->expire;
|
2024-03-06 17:33:37 -08:00
|
|
|
} else if (!ACTIVE(header, now)) {
|
2025-07-09 16:56:22 +02:00
|
|
|
rdataset->attributes.ancient = true;
|
2025-02-02 11:44:00 +01:00
|
|
|
rdataset->ttl = 0;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
rdataset->count = atomic_fetch_add_relaxed(&header->count, 1);
|
|
|
|
|
|
|
|
rdataset->slab.db = (dns_db_t *)qpdb;
|
|
|
|
rdataset->slab.node = (dns_dbnode_t *)node;
|
|
|
|
rdataset->slab.raw = dns_slabheader_raw(header);
|
|
|
|
rdataset->slab.iter_pos = NULL;
|
|
|
|
rdataset->slab.iter_count = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add noqname proof.
|
|
|
|
*/
|
|
|
|
rdataset->slab.noqname = header->noqname;
|
|
|
|
if (header->noqname != NULL) {
|
2025-07-09 16:56:22 +02:00
|
|
|
rdataset->attributes.noqname = true;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
rdataset->slab.closest = header->closest;
|
|
|
|
if (header->closest != NULL) {
|
2025-07-09 16:56:22 +02:00
|
|
|
rdataset->attributes.closest = true;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2025-02-02 19:29:53 +01:00
|
|
|
static void
|
|
|
|
bindrdatasets(qpcache_t *qpdb, qpcnode_t *qpnode, dns_slabheader_t *found,
|
|
|
|
dns_slabheader_t *foundsig, isc_stdtime_t now,
|
|
|
|
isc_rwlocktype_t nlocktype, isc_rwlocktype_t tlocktype,
|
|
|
|
dns_rdataset_t *rdataset,
|
|
|
|
dns_rdataset_t *sigrdataset DNS__DB_FLARG) {
|
|
|
|
bindrdataset(qpdb, qpnode, found, now, nlocktype, tlocktype,
|
|
|
|
rdataset DNS__DB_FLARG_PASS);
|
2025-02-23 14:36:35 +01:00
|
|
|
qpcache_hit(qpdb, found);
|
2025-02-02 19:29:53 +01:00
|
|
|
if (!NEGATIVE(found) && foundsig != NULL) {
|
|
|
|
bindrdataset(qpdb, qpnode, foundsig, now, nlocktype, tlocktype,
|
|
|
|
sigrdataset DNS__DB_FLARG_PASS);
|
2025-02-23 14:36:35 +01:00
|
|
|
qpcache_hit(qpdb, foundsig);
|
2025-02-02 19:29:53 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-03-06 17:33:37 -08:00
|
|
|
static isc_result_t
|
2024-04-29 15:45:26 -07:00
|
|
|
setup_delegation(qpc_search_t *search, dns_dbnode_t **nodep,
|
2024-03-12 01:05:07 -07:00
|
|
|
dns_rdataset_t *rdataset, dns_rdataset_t *sigrdataset,
|
2024-03-26 14:13:24 +01:00
|
|
|
isc_rwlocktype_t tlocktype DNS__DB_FLARG) {
|
2025-08-06 19:34:35 +02:00
|
|
|
dns_typepair_t typepair;
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcnode_t *node = NULL;
|
2024-03-06 17:33:37 -08:00
|
|
|
|
|
|
|
REQUIRE(search != NULL);
|
|
|
|
REQUIRE(search->zonecut != NULL);
|
|
|
|
REQUIRE(search->zonecut_header != NULL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The caller MUST NOT be holding any node locks.
|
|
|
|
*/
|
|
|
|
|
|
|
|
node = search->zonecut;
|
2025-08-06 19:34:35 +02:00
|
|
|
typepair = search->zonecut_header->typepair;
|
2024-03-06 17:33:37 -08:00
|
|
|
|
|
|
|
if (nodep != NULL) {
|
|
|
|
/*
|
|
|
|
* Note that we don't have to increment the node's reference
|
|
|
|
* count here because we're going to use the reference we
|
|
|
|
* already have in the search block.
|
|
|
|
*/
|
2024-11-05 16:13:10 +01:00
|
|
|
*nodep = (dns_dbnode_t *)node;
|
2024-03-06 17:33:37 -08:00
|
|
|
search->need_cleanup = false;
|
|
|
|
}
|
|
|
|
if (rdataset != NULL) {
|
|
|
|
isc_rwlocktype_t nlocktype = isc_rwlocktype_none;
|
2025-02-03 13:36:27 +01:00
|
|
|
isc_rwlock_t *nlock =
|
|
|
|
&search->qpdb->buckets[node->locknum].lock;
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_RDLOCK(nlock, &nlocktype);
|
2025-02-02 19:29:53 +01:00
|
|
|
bindrdatasets(search->qpdb, node, search->zonecut_header,
|
|
|
|
search->zonecut_sigheader, search->now, nlocktype,
|
|
|
|
tlocktype, rdataset,
|
|
|
|
sigrdataset DNS__DB_FLARG_PASS);
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_UNLOCK(nlock, &nlocktype);
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
2025-08-06 19:34:35 +02:00
|
|
|
if (typepair == dns_rdatatype_dname) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return DNS_R_DNAME;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
2024-11-19 10:38:03 +01:00
|
|
|
return DNS_R_DELEGATION;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
2025-03-21 03:06:16 +01:00
|
|
|
check_stale_header(dns_slabheader_t *header, qpc_search_t *search,
|
|
|
|
dns_slabheader_t **header_prev) {
|
2025-02-02 19:21:44 +01:00
|
|
|
if (ACTIVE(header, search->now)) {
|
|
|
|
*header_prev = header;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
isc_stdtime_t stale = header->expire + STALE_TTL(header, search->qpdb);
|
|
|
|
/*
|
|
|
|
* If this data is in the stale window keep it and if
|
|
|
|
* DNS_DBFIND_STALEOK is not set we tell the caller to
|
|
|
|
* skip this record. We skip the records with ZEROTTL
|
|
|
|
* (these records should not be cached anyway).
|
|
|
|
*/
|
|
|
|
|
|
|
|
DNS_SLABHEADER_CLRATTR(header, DNS_SLABHEADERATTR_STALE_WINDOW);
|
|
|
|
if (!ZEROTTL(header) && KEEPSTALE(search->qpdb) && stale > search->now)
|
|
|
|
{
|
|
|
|
mark(header, DNS_SLABHEADERATTR_STALE);
|
|
|
|
*header_prev = header;
|
2024-03-06 17:33:37 -08:00
|
|
|
/*
|
2025-02-02 19:21:44 +01:00
|
|
|
* If DNS_DBFIND_STALESTART is set then it means we
|
|
|
|
* failed to resolve the name during recursion, in
|
|
|
|
* this case we mark the time in which the refresh
|
|
|
|
* failed.
|
2024-03-06 17:33:37 -08:00
|
|
|
*/
|
2025-02-02 19:21:44 +01:00
|
|
|
if ((search->options & DNS_DBFIND_STALESTART) != 0) {
|
|
|
|
atomic_store_release(&header->last_refresh_fail_ts,
|
|
|
|
search->now);
|
|
|
|
} else if ((search->options & DNS_DBFIND_STALEENABLED) != 0 &&
|
|
|
|
search->now <
|
|
|
|
(atomic_load_acquire(
|
|
|
|
&header->last_refresh_fail_ts) +
|
|
|
|
search->qpdb->serve_stale_refresh))
|
2024-03-06 17:33:37 -08:00
|
|
|
{
|
|
|
|
/*
|
2025-02-02 19:21:44 +01:00
|
|
|
* If we are within interval between last
|
|
|
|
* refresh failure time + 'stale-refresh-time',
|
|
|
|
* then don't skip this stale entry but use it
|
|
|
|
* instead.
|
2024-03-06 17:33:37 -08:00
|
|
|
*/
|
2025-02-02 19:21:44 +01:00
|
|
|
DNS_SLABHEADER_SETATTR(header,
|
|
|
|
DNS_SLABHEADERATTR_STALE_WINDOW);
|
|
|
|
return false;
|
|
|
|
} else if ((search->options & DNS_DBFIND_STALETIMEOUT) != 0) {
|
|
|
|
/*
|
|
|
|
* We want stale RRset due to timeout, so we
|
|
|
|
* don't skip it.
|
|
|
|
*/
|
|
|
|
return false;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
2025-02-02 19:21:44 +01:00
|
|
|
return (search->options & DNS_DBFIND_STALEOK) == 0;
|
|
|
|
}
|
2024-03-06 17:33:37 -08:00
|
|
|
|
2025-03-21 03:06:16 +01:00
|
|
|
*header_prev = header;
|
2025-02-02 19:21:44 +01:00
|
|
|
return true;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
2025-02-02 20:36:13 +01:00
|
|
|
/*
|
|
|
|
* Return true if we've found headers for both 'type' and RRSIG('type'),
|
|
|
|
* or (optionally, if 'negtype' is nonzero) if we've found a single
|
|
|
|
* negative header covering either 'negtype' or ANY.
|
|
|
|
*/
|
2025-02-02 20:22:29 +01:00
|
|
|
static bool
|
2025-08-06 19:34:35 +02:00
|
|
|
related_headers(dns_slabheader_t *header, dns_typepair_t typepair,
|
|
|
|
dns_typepair_t sigpair, dns_typepair_t negpair,
|
2025-02-02 20:36:13 +01:00
|
|
|
dns_slabheader_t **foundp, dns_slabheader_t **foundsigp,
|
|
|
|
bool *matchp) {
|
2025-02-02 20:22:29 +01:00
|
|
|
if (!EXISTS(header) || ANCIENT(header)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2025-08-06 19:34:35 +02:00
|
|
|
if (header->typepair == typepair) {
|
2025-02-02 20:22:29 +01:00
|
|
|
*foundp = header;
|
2025-02-02 20:36:13 +01:00
|
|
|
SET_IF_NOT_NULL(matchp, true);
|
2025-02-02 20:22:29 +01:00
|
|
|
if (*foundsigp != NULL) {
|
|
|
|
return true;
|
|
|
|
}
|
2025-08-06 19:34:35 +02:00
|
|
|
} else if (header->typepair == sigpair) {
|
2025-02-02 20:22:29 +01:00
|
|
|
*foundsigp = header;
|
2025-02-02 20:36:13 +01:00
|
|
|
SET_IF_NOT_NULL(matchp, true);
|
2025-02-02 20:22:29 +01:00
|
|
|
if (*foundp != NULL) {
|
|
|
|
return true;
|
|
|
|
}
|
2025-08-06 19:34:35 +02:00
|
|
|
} else if (negpair != 0 && (header->typepair == RDATATYPE_NCACHEANY ||
|
|
|
|
header->typepair == negpair))
|
2025-02-02 20:22:29 +01:00
|
|
|
{
|
|
|
|
*foundp = header;
|
|
|
|
*foundsigp = NULL;
|
2025-02-02 20:36:13 +01:00
|
|
|
SET_IF_NOT_NULL(matchp, true);
|
2025-02-02 20:22:29 +01:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2025-02-02 20:36:13 +01:00
|
|
|
/*
|
|
|
|
* Return true if we've found headers for both 'type' and RRSIG('type').
|
|
|
|
*/
|
2025-02-02 20:07:42 +01:00
|
|
|
static bool
|
|
|
|
both_headers(dns_slabheader_t *header, dns_rdatatype_t type,
|
|
|
|
dns_slabheader_t **foundp, dns_slabheader_t **foundsigp) {
|
2025-08-06 19:34:35 +02:00
|
|
|
dns_typepair_t typepair = DNS_TYPEPAIR_VALUE(type, 0);
|
|
|
|
dns_typepair_t sigpair = DNS_SIGTYPE(type);
|
2025-02-02 20:07:42 +01:00
|
|
|
|
2025-08-06 19:34:35 +02:00
|
|
|
return related_headers(header, typepair, sigpair, 0, foundp, foundsigp,
|
|
|
|
NULL);
|
2025-02-02 20:07:42 +01:00
|
|
|
}
|
|
|
|
|
2024-03-06 17:33:37 -08:00
|
|
|
static isc_result_t
|
2024-04-29 15:29:33 -07:00
|
|
|
check_zonecut(qpcnode_t *node, void *arg DNS__DB_FLARG) {
|
2024-04-29 15:45:26 -07:00
|
|
|
qpc_search_t *search = arg;
|
2024-03-06 17:33:37 -08:00
|
|
|
dns_slabheader_t *header = NULL;
|
|
|
|
dns_slabheader_t *header_prev = NULL, *header_next = NULL;
|
|
|
|
dns_slabheader_t *dname_header = NULL, *sigdname_header = NULL;
|
|
|
|
isc_result_t result;
|
2025-01-27 21:07:11 +01:00
|
|
|
isc_rwlock_t *nlock = NULL;
|
2024-03-06 17:33:37 -08:00
|
|
|
isc_rwlocktype_t nlocktype = isc_rwlocktype_none;
|
|
|
|
|
|
|
|
REQUIRE(search->zonecut == NULL);
|
|
|
|
|
2025-02-03 13:36:27 +01:00
|
|
|
nlock = &search->qpdb->buckets[node->locknum].lock;
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_RDLOCK(nlock, &nlocktype);
|
2024-03-06 17:33:37 -08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Look for a DNAME or RRSIG DNAME rdataset.
|
|
|
|
*/
|
|
|
|
for (header = node->data; header != NULL; header = header_next) {
|
|
|
|
header_next = header->next;
|
2025-03-21 03:06:16 +01:00
|
|
|
if (check_stale_header(header, search, &header_prev)) {
|
2025-02-02 19:21:44 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2025-02-02 20:07:42 +01:00
|
|
|
if (both_headers(header, dns_rdatatype_dname, &dname_header,
|
|
|
|
&sigdname_header))
|
2024-03-06 17:33:37 -08:00
|
|
|
{
|
2025-02-02 20:07:42 +01:00
|
|
|
break;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dname_header != NULL &&
|
|
|
|
(!DNS_TRUST_PENDING(dname_header->trust) ||
|
|
|
|
(search->options & DNS_DBFIND_PENDINGOK) != 0))
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We increment the reference count on node to ensure that
|
|
|
|
* search->zonecut_header will still be valid later.
|
|
|
|
*/
|
2025-01-30 14:42:57 -08:00
|
|
|
qpcnode_acquire(search->qpdb, node, nlocktype,
|
|
|
|
isc_rwlocktype_none DNS__DB_FLARG_PASS);
|
2024-03-06 17:33:37 -08:00
|
|
|
search->zonecut = node;
|
|
|
|
search->zonecut_header = dname_header;
|
|
|
|
search->zonecut_sigheader = sigdname_header;
|
|
|
|
search->need_cleanup = true;
|
|
|
|
result = DNS_R_PARTIALMATCH;
|
|
|
|
} else {
|
|
|
|
result = DNS_R_CONTINUE;
|
|
|
|
}
|
|
|
|
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_UNLOCK(nlock, &nlocktype);
|
2024-03-06 17:33:37 -08:00
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return result;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static isc_result_t
|
2024-04-29 15:45:26 -07:00
|
|
|
find_deepest_zonecut(qpc_search_t *search, qpcnode_t *node,
|
|
|
|
dns_dbnode_t **nodep, dns_name_t *foundname,
|
|
|
|
dns_rdataset_t *rdataset,
|
2024-03-06 17:33:37 -08:00
|
|
|
dns_rdataset_t *sigrdataset DNS__DB_FLARG) {
|
|
|
|
isc_result_t result = ISC_R_NOTFOUND;
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcache_t *qpdb = NULL;
|
2024-03-06 17:33:37 -08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Caller must be holding the tree lock.
|
|
|
|
*/
|
|
|
|
|
|
|
|
qpdb = search->qpdb;
|
|
|
|
|
|
|
|
for (int i = dns_qpchain_length(&search->chain) - 1; i >= 0; i--) {
|
|
|
|
dns_slabheader_t *header = NULL;
|
|
|
|
dns_slabheader_t *header_prev = NULL, *header_next = NULL;
|
|
|
|
dns_slabheader_t *found = NULL, *foundsig = NULL;
|
2025-01-27 21:07:11 +01:00
|
|
|
isc_rwlock_t *nlock = NULL;
|
2024-03-06 17:33:37 -08:00
|
|
|
isc_rwlocktype_t nlocktype = isc_rwlocktype_none;
|
|
|
|
|
|
|
|
dns_qpchain_node(&search->chain, i, NULL, (void **)&node, NULL);
|
2025-02-03 13:36:27 +01:00
|
|
|
nlock = &qpdb->buckets[node->locknum].lock;
|
2024-03-06 17:33:37 -08:00
|
|
|
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_RDLOCK(nlock, &nlocktype);
|
2024-03-06 17:33:37 -08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Look for NS and RRSIG NS rdatasets.
|
|
|
|
*/
|
|
|
|
for (header = node->data; header != NULL; header = header_next)
|
|
|
|
{
|
|
|
|
header_next = header->next;
|
2025-03-21 03:06:16 +01:00
|
|
|
if (check_stale_header(header, search, &header_prev)) {
|
2025-02-02 19:21:44 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2025-02-02 20:07:42 +01:00
|
|
|
if (both_headers(header, dns_rdatatype_ns, &found,
|
|
|
|
&foundsig))
|
|
|
|
{
|
|
|
|
break;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (found != NULL) {
|
|
|
|
/*
|
|
|
|
* If we have to set foundname, we do it before
|
|
|
|
* anything else.
|
|
|
|
*/
|
|
|
|
if (foundname != NULL) {
|
2024-03-11 18:53:49 -07:00
|
|
|
dns_name_copy(&node->name, foundname);
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
result = DNS_R_DELEGATION;
|
|
|
|
if (nodep != NULL) {
|
2025-01-30 14:42:57 -08:00
|
|
|
qpcnode_acquire(
|
|
|
|
search->qpdb, node, nlocktype,
|
|
|
|
isc_rwlocktype_none DNS__DB_FLARG_PASS);
|
2024-11-05 16:13:10 +01:00
|
|
|
*nodep = (dns_dbnode_t *)node;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
2025-02-02 19:29:53 +01:00
|
|
|
bindrdatasets(search->qpdb, node, found, foundsig,
|
|
|
|
search->now, nlocktype,
|
|
|
|
isc_rwlocktype_none, rdataset,
|
|
|
|
sigrdataset DNS__DB_FLARG_PASS);
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_UNLOCK(nlock, &nlocktype);
|
2024-03-06 17:33:37 -08:00
|
|
|
|
|
|
|
if (found != NULL) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return result;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Look for a potentially covering NSEC in the cache where `name`
|
|
|
|
* is known not to exist. This uses the auxiliary NSEC tree to find
|
|
|
|
* the potential NSEC owner. If found, we update 'foundname', 'nodep',
|
|
|
|
* 'rdataset' and 'sigrdataset', and return DNS_R_COVERINGNSEC.
|
|
|
|
* Otherwise, return ISC_R_NOTFOUND.
|
|
|
|
*/
|
|
|
|
static isc_result_t
|
2024-04-29 15:45:26 -07:00
|
|
|
find_coveringnsec(qpc_search_t *search, const dns_name_t *name,
|
2025-02-02 14:07:18 +01:00
|
|
|
dns_dbnode_t **nodep, dns_name_t *foundname,
|
|
|
|
dns_rdataset_t *rdataset,
|
2024-03-06 17:33:37 -08:00
|
|
|
dns_rdataset_t *sigrdataset DNS__DB_FLARG) {
|
|
|
|
dns_fixedname_t fpredecessor, fixed;
|
|
|
|
dns_name_t *predecessor = NULL, *fname = NULL;
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcnode_t *node = NULL;
|
2024-03-06 17:33:37 -08:00
|
|
|
dns_qpiter_t iter;
|
|
|
|
isc_result_t result;
|
|
|
|
isc_rwlocktype_t nlocktype = isc_rwlocktype_none;
|
2025-01-27 21:07:11 +01:00
|
|
|
isc_rwlock_t *nlock = NULL;
|
2024-03-06 17:33:37 -08:00
|
|
|
dns_slabheader_t *found = NULL, *foundsig = NULL;
|
|
|
|
dns_slabheader_t *header = NULL;
|
|
|
|
dns_slabheader_t *header_next = NULL, *header_prev = NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Look for the node in the auxilary tree.
|
|
|
|
*/
|
2025-07-07 11:29:45 +02:00
|
|
|
result = dns_qp_lookup(search->qpdb->nsec, name, DNS_DBNAMESPACE_NSEC,
|
|
|
|
NULL, &iter, NULL, (void **)&node, NULL);
|
2025-07-15 15:14:23 +10:00
|
|
|
/*
|
|
|
|
* When DNS_R_PARTIALMATCH or ISC_R_NOTFOUND is returned from
|
|
|
|
* dns_qp_lookup there is potentially a covering NSEC present
|
|
|
|
* in the cache so we need to search for it. Otherwise we are
|
|
|
|
* done here.
|
|
|
|
*/
|
|
|
|
if (result != DNS_R_PARTIALMATCH && result != ISC_R_NOTFOUND) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return ISC_R_NOTFOUND;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
fname = dns_fixedname_initname(&fixed);
|
|
|
|
predecessor = dns_fixedname_initname(&fpredecessor);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Extract predecessor from iterator.
|
|
|
|
*/
|
|
|
|
result = dns_qpiter_current(&iter, predecessor, NULL, NULL);
|
2024-04-29 14:57:42 -07:00
|
|
|
if (result != ISC_R_SUCCESS) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return ISC_R_NOTFOUND;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Lookup the predecessor in the main tree.
|
|
|
|
*/
|
|
|
|
node = NULL;
|
Prepend qpkey with denial byte
In preparation to merge the three qp tries (tree, nsec, nsec3) into
one, add the piece of information into the qpkey. This is the most
significant bit of information, so prepend the denial type to the qpkey.
This means we need to pass on the denial type when constructing the
qpkey from a name, or doing a lookup.
Reuse the the DNS_DB_NSEC_* values. Most qp tries in the code we just
pass on 0 (nta, rpz, zt, etc.), because there is no need for denial of
existence, but for qpzone and qpcache we must pass the right value.
Change the code, so that node->nsec no longer can have the value
DNS_DB_NSEC_HAS_NSEC, instead track this in a new attribute 'havensec'.
Since we use node->nsec to convert names to keys, the value MUST be set
before inserting the node into the qp-trie.
Update the fuzzing and unit tests accordingly. This only adds a few
extra test cases, more are needed.
In the qp_test.c we can remove test code for empty keys as this is
no longer possible.
2025-04-25 17:21:16 +02:00
|
|
|
result = dns_qp_getname(search->qpdb->tree, predecessor,
|
2025-07-07 11:29:45 +02:00
|
|
|
DNS_DBNAMESPACE_NORMAL, (void **)&node, NULL);
|
2024-03-06 17:33:37 -08:00
|
|
|
if (result != ISC_R_SUCCESS) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return result;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
2024-04-10 23:48:24 -04:00
|
|
|
dns_name_copy(&node->name, fname);
|
2024-03-06 17:33:37 -08:00
|
|
|
|
2025-02-03 13:36:27 +01:00
|
|
|
nlock = &search->qpdb->buckets[node->locknum].lock;
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_RDLOCK(nlock, &nlocktype);
|
2024-03-06 17:33:37 -08:00
|
|
|
for (header = node->data; header != NULL; header = header_next) {
|
|
|
|
header_next = header->next;
|
2025-03-21 03:06:16 +01:00
|
|
|
if (check_stale_header(header, search, &header_prev)) {
|
2024-03-06 17:33:37 -08:00
|
|
|
continue;
|
|
|
|
}
|
2025-02-02 19:21:44 +01:00
|
|
|
|
2025-08-06 19:34:35 +02:00
|
|
|
if (DNS_TYPEPAIR_TYPE(header->typepair) == 0) {
|
2024-03-06 17:33:37 -08:00
|
|
|
continue;
|
|
|
|
}
|
2025-02-02 20:07:42 +01:00
|
|
|
|
|
|
|
if (both_headers(header, dns_rdatatype_nsec, &found, &foundsig))
|
|
|
|
{
|
|
|
|
break;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (found != NULL) {
|
2025-02-02 19:37:21 +01:00
|
|
|
if (nodep != NULL) {
|
|
|
|
qpcnode_acquire(search->qpdb, node, nlocktype,
|
|
|
|
isc_rwlocktype_none DNS__DB_FLARG_PASS);
|
|
|
|
*nodep = (dns_dbnode_t *)node;
|
|
|
|
}
|
2025-02-02 19:29:53 +01:00
|
|
|
bindrdatasets(search->qpdb, node, found, foundsig, search->now,
|
|
|
|
nlocktype, isc_rwlocktype_none, rdataset,
|
|
|
|
sigrdataset DNS__DB_FLARG_PASS);
|
2024-03-06 17:33:37 -08:00
|
|
|
dns_name_copy(fname, foundname);
|
|
|
|
|
|
|
|
result = DNS_R_COVERINGNSEC;
|
|
|
|
} else {
|
|
|
|
result = ISC_R_NOTFOUND;
|
|
|
|
}
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_UNLOCK(nlock, &nlocktype);
|
2024-11-19 10:38:03 +01:00
|
|
|
return result;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
2025-02-02 20:36:13 +01:00
|
|
|
#define MISSING_ANSWER(found, options) \
|
|
|
|
((found) == NULL || \
|
|
|
|
(DNS_TRUST_ADDITIONAL((found)->trust) && \
|
|
|
|
(((options) & DNS_DBFIND_ADDITIONALOK) == 0)) || \
|
|
|
|
((found)->trust == dns_trust_glue && \
|
|
|
|
(((options) & DNS_DBFIND_GLUEOK) == 0)) || \
|
|
|
|
(DNS_TRUST_PENDING((found)->trust) && \
|
|
|
|
(((options) & DNS_DBFIND_PENDINGOK) == 0)))
|
|
|
|
|
2025-01-30 12:33:48 +01:00
|
|
|
static void
|
|
|
|
qpc_search_init(qpc_search_t *search, qpcache_t *db, unsigned int options,
|
|
|
|
isc_stdtime_t now) {
|
|
|
|
/*
|
|
|
|
* qpc_search_t contains two structures with large buffers (dns_qpiter_t
|
|
|
|
* and dns_qpchain_t). Those two structures will be initialized later by
|
|
|
|
* dns_qp_lookup anyway.
|
|
|
|
* To avoid the overhead of zero initialization, we avoid designated
|
|
|
|
* initializers and initialize all "small" fields manually.
|
|
|
|
*/
|
|
|
|
search->qpdb = (qpcache_t *)db;
|
|
|
|
search->options = options;
|
|
|
|
/*
|
|
|
|
* qpch->in - Init by dns_qp_lookup
|
|
|
|
* qpiter - Init by dns_qp_lookup
|
|
|
|
*/
|
|
|
|
search->need_cleanup = false;
|
|
|
|
search->now = now ? now : isc_stdtime_now();
|
|
|
|
search->zonecut = NULL;
|
|
|
|
search->zonecut_header = NULL;
|
|
|
|
search->zonecut_sigheader = NULL;
|
|
|
|
}
|
|
|
|
|
2024-03-06 17:33:37 -08:00
|
|
|
static isc_result_t
|
2024-11-14 11:18:00 +01:00
|
|
|
qpcache_find(dns_db_t *db, const dns_name_t *name, dns_dbversion_t *version,
|
2025-02-03 00:00:39 +01:00
|
|
|
dns_rdatatype_t type, unsigned int options, isc_stdtime_t __now,
|
2024-11-14 11:18:00 +01:00
|
|
|
dns_dbnode_t **nodep, dns_name_t *foundname,
|
|
|
|
dns_rdataset_t *rdataset,
|
|
|
|
dns_rdataset_t *sigrdataset DNS__DB_FLARG) {
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcnode_t *node = NULL;
|
2024-03-06 17:33:37 -08:00
|
|
|
isc_result_t result;
|
|
|
|
bool cname_ok = true;
|
|
|
|
bool found_noqname = false;
|
|
|
|
bool all_negative = true;
|
|
|
|
bool empty_node;
|
2025-01-27 21:07:11 +01:00
|
|
|
isc_rwlock_t *nlock = NULL;
|
2024-03-06 17:33:37 -08:00
|
|
|
isc_rwlocktype_t tlocktype = isc_rwlocktype_none;
|
|
|
|
isc_rwlocktype_t nlocktype = isc_rwlocktype_none;
|
|
|
|
dns_slabheader_t *header = NULL;
|
|
|
|
dns_slabheader_t *header_prev = NULL, *header_next = NULL;
|
|
|
|
dns_slabheader_t *found = NULL, *nsheader = NULL;
|
|
|
|
dns_slabheader_t *foundsig = NULL, *nssig = NULL, *cnamesig = NULL;
|
|
|
|
dns_slabheader_t *nsecheader = NULL, *nsecsig = NULL;
|
2025-08-06 19:34:35 +02:00
|
|
|
dns_typepair_t typepair, sigpair, negpair;
|
2025-01-30 12:33:48 +01:00
|
|
|
|
|
|
|
qpc_search_t search;
|
|
|
|
qpc_search_init(&search, (qpcache_t *)db, options, __now);
|
2024-03-06 17:33:37 -08:00
|
|
|
|
2024-04-29 15:29:33 -07:00
|
|
|
REQUIRE(VALID_QPDB((qpcache_t *)db));
|
2024-03-06 17:33:37 -08:00
|
|
|
REQUIRE(version == NULL);
|
|
|
|
|
|
|
|
TREE_RDLOCK(&search.qpdb->tree_lock, &tlocktype);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Search down from the root of the tree.
|
|
|
|
*/
|
2025-07-07 11:29:45 +02:00
|
|
|
result = dns_qp_lookup(search.qpdb->tree, name, DNS_DBNAMESPACE_NORMAL,
|
|
|
|
NULL, NULL, &search.chain, (void **)&node, NULL);
|
2024-04-10 23:48:24 -04:00
|
|
|
if (result != ISC_R_NOTFOUND && foundname != NULL) {
|
|
|
|
dns_name_copy(&node->name, foundname);
|
|
|
|
}
|
2024-03-06 17:33:37 -08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check the QP chain to see if there's a node above us with a
|
|
|
|
* active DNAME or NS rdatasets.
|
|
|
|
*
|
|
|
|
* We're only interested in nodes above QNAME, so if the result
|
|
|
|
* was success, then we skip the last item in the chain.
|
|
|
|
*/
|
|
|
|
unsigned int len = dns_qpchain_length(&search.chain);
|
|
|
|
if (result == ISC_R_SUCCESS) {
|
|
|
|
len--;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (unsigned int i = 0; i < len; i++) {
|
|
|
|
isc_result_t zcresult;
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcnode_t *encloser = NULL;
|
2024-03-06 17:33:37 -08:00
|
|
|
|
|
|
|
dns_qpchain_node(&search.chain, i, NULL, (void **)&encloser,
|
|
|
|
NULL);
|
|
|
|
|
2024-04-10 23:48:24 -04:00
|
|
|
zcresult = check_zonecut(encloser,
|
|
|
|
(void *)&search DNS__DB_FLARG_PASS);
|
|
|
|
if (zcresult != DNS_R_CONTINUE) {
|
|
|
|
result = DNS_R_PARTIALMATCH;
|
|
|
|
search.chain.len = i - 1;
|
|
|
|
node = encloser;
|
|
|
|
if (foundname != NULL) {
|
|
|
|
dns_name_copy(&node->name, foundname);
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
2024-04-10 23:48:24 -04:00
|
|
|
break;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (result == DNS_R_PARTIALMATCH) {
|
|
|
|
/*
|
|
|
|
* If we discovered a covering DNAME skip looking for a covering
|
|
|
|
* NSEC.
|
|
|
|
*/
|
|
|
|
if ((search.options & DNS_DBFIND_COVERINGNSEC) != 0 &&
|
|
|
|
(search.zonecut_header == NULL ||
|
2025-08-06 19:34:35 +02:00
|
|
|
search.zonecut_header->typepair != dns_rdatatype_dname))
|
2024-03-06 17:33:37 -08:00
|
|
|
{
|
|
|
|
result = find_coveringnsec(
|
2025-02-02 14:07:18 +01:00
|
|
|
&search, name, nodep, foundname, rdataset,
|
2024-03-06 17:33:37 -08:00
|
|
|
sigrdataset DNS__DB_FLARG_PASS);
|
|
|
|
if (result == DNS_R_COVERINGNSEC) {
|
|
|
|
goto tree_exit;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (search.zonecut != NULL) {
|
2024-03-12 01:05:07 -07:00
|
|
|
result = setup_delegation(&search, nodep, rdataset,
|
|
|
|
sigrdataset,
|
2024-03-26 14:13:24 +01:00
|
|
|
tlocktype DNS__DB_FLARG_PASS);
|
2024-03-06 17:33:37 -08:00
|
|
|
goto tree_exit;
|
|
|
|
} else {
|
|
|
|
find_ns:
|
|
|
|
result = find_deepest_zonecut(
|
|
|
|
&search, node, nodep, foundname, rdataset,
|
|
|
|
sigrdataset DNS__DB_FLARG_PASS);
|
|
|
|
goto tree_exit;
|
|
|
|
}
|
|
|
|
} else if (result != ISC_R_SUCCESS) {
|
|
|
|
goto tree_exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Certain DNSSEC types are not subject to CNAME matching
|
|
|
|
* (RFC4035, section 2.5 and RFC3007).
|
|
|
|
*
|
|
|
|
* We don't check for RRSIG, because we don't store RRSIG records
|
|
|
|
* directly.
|
|
|
|
*/
|
|
|
|
if (type == dns_rdatatype_key || type == dns_rdatatype_nsec) {
|
|
|
|
cname_ok = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We now go looking for rdata...
|
|
|
|
*/
|
|
|
|
|
2025-02-03 13:36:27 +01:00
|
|
|
nlock = &search.qpdb->buckets[node->locknum].lock;
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_RDLOCK(nlock, &nlocktype);
|
2024-03-06 17:33:37 -08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* These pointers need to be reset here in case we did
|
|
|
|
* 'goto find_ns' from somewhere below.
|
|
|
|
*/
|
|
|
|
found = NULL;
|
|
|
|
foundsig = NULL;
|
2025-08-06 19:34:35 +02:00
|
|
|
typepair = DNS_TYPEPAIR_VALUE(type, 0);
|
|
|
|
sigpair = DNS_SIGTYPE(type);
|
|
|
|
negpair = DNS_TYPEPAIR_VALUE(0, type);
|
2024-03-06 17:33:37 -08:00
|
|
|
nsheader = NULL;
|
|
|
|
nsecheader = NULL;
|
|
|
|
nssig = NULL;
|
|
|
|
nsecsig = NULL;
|
|
|
|
cnamesig = NULL;
|
|
|
|
empty_node = true;
|
|
|
|
header_prev = NULL;
|
|
|
|
for (header = node->data; header != NULL; header = header_next) {
|
|
|
|
header_next = header->next;
|
2025-03-21 03:06:16 +01:00
|
|
|
if (check_stale_header(header, &search, &header_prev)) {
|
2025-02-02 19:21:44 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2025-02-02 20:36:13 +01:00
|
|
|
if (!EXISTS(header) || ANCIENT(header)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We now know that there is at least one active
|
|
|
|
* non-stale rdataset at this node.
|
|
|
|
*/
|
|
|
|
empty_node = false;
|
|
|
|
|
|
|
|
if (header->noqname != NULL &&
|
|
|
|
header->trust == dns_trust_secure)
|
|
|
|
{
|
|
|
|
found_noqname = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!NEGATIVE(header)) {
|
|
|
|
all_negative = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool match = false;
|
2025-08-06 19:34:35 +02:00
|
|
|
if (related_headers(header, typepair, sigpair, negpair, &found,
|
2025-02-02 20:36:13 +01:00
|
|
|
&foundsig, &match) &&
|
|
|
|
!MISSING_ANSWER(found, options))
|
|
|
|
{
|
2024-03-06 17:33:37 -08:00
|
|
|
/*
|
2025-02-02 20:36:13 +01:00
|
|
|
* We can't exit early until we have an answer with
|
|
|
|
* sufficient trust level, see MISSING_ANSWER() macro
|
|
|
|
* for details, because we might need NS or NSEC
|
|
|
|
* records.
|
2024-03-06 17:33:37 -08:00
|
|
|
*/
|
2025-02-02 20:36:13 +01:00
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (match) {
|
|
|
|
/* We found something, continue with next header */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2025-08-06 19:34:35 +02:00
|
|
|
switch (header->typepair) {
|
2025-02-02 20:36:13 +01:00
|
|
|
case dns_rdatatype_cname:
|
|
|
|
if (!cname_ok) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
found = header;
|
|
|
|
if (cnamesig != NULL) {
|
|
|
|
/* We already have CNAME signature */
|
|
|
|
foundsig = cnamesig;
|
|
|
|
} else {
|
|
|
|
/* Look for CNAME signature instead */
|
2025-08-06 19:34:35 +02:00
|
|
|
sigpair = DNS_SIGTYPE(dns_rdatatype_cname);
|
2025-02-02 20:36:13 +01:00
|
|
|
foundsig = NULL;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
2025-02-02 20:36:13 +01:00
|
|
|
break;
|
|
|
|
case DNS_SIGTYPE(dns_rdatatype_cname):
|
|
|
|
if (!cname_ok) {
|
|
|
|
break;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
2025-02-02 20:36:13 +01:00
|
|
|
cnamesig = header;
|
|
|
|
break;
|
|
|
|
case dns_rdatatype_ns:
|
|
|
|
/* Remember the NS rdataset */
|
|
|
|
nsheader = header;
|
|
|
|
break;
|
|
|
|
case DNS_SIGTYPE(dns_rdatatype_ns):
|
|
|
|
/* ...and its signature */
|
|
|
|
nssig = header;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case dns_rdatatype_nsec:
|
|
|
|
nsecheader = header;
|
|
|
|
break;
|
|
|
|
case DNS_SIGTYPE(dns_rdatatype_nsec):
|
|
|
|
nsecsig = header;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2025-08-06 19:34:35 +02:00
|
|
|
if (typepair == dns_rdatatype_any &&
|
|
|
|
DNS_TYPEPAIR_TYPE(header->typepair) != 0)
|
2024-03-06 17:33:37 -08:00
|
|
|
{
|
2025-02-02 20:36:13 +01:00
|
|
|
/* QTYPE==ANY, so any anwers will do */
|
2024-03-06 17:33:37 -08:00
|
|
|
found = header;
|
2025-02-02 20:36:13 +01:00
|
|
|
break;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (empty_node) {
|
|
|
|
/*
|
|
|
|
* We have an exact match for the name, but there are no
|
|
|
|
* extant rdatasets. That means that this node doesn't
|
|
|
|
* meaningfully exist, and that we really have a partial match.
|
|
|
|
*/
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_UNLOCK(nlock, &nlocktype);
|
2024-03-06 17:33:37 -08:00
|
|
|
if ((search.options & DNS_DBFIND_COVERINGNSEC) != 0) {
|
|
|
|
result = find_coveringnsec(
|
2025-02-02 14:07:18 +01:00
|
|
|
&search, name, nodep, foundname, rdataset,
|
2024-03-06 17:33:37 -08:00
|
|
|
sigrdataset DNS__DB_FLARG_PASS);
|
|
|
|
if (result == DNS_R_COVERINGNSEC) {
|
|
|
|
goto tree_exit;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
goto find_ns;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we didn't find what we were looking for...
|
|
|
|
*/
|
2025-02-02 20:36:13 +01:00
|
|
|
if (MISSING_ANSWER(found, options)) {
|
2024-03-06 17:33:37 -08:00
|
|
|
/*
|
|
|
|
* Return covering NODATA NSEC record.
|
|
|
|
*/
|
|
|
|
if ((search.options & DNS_DBFIND_COVERINGNSEC) != 0 &&
|
|
|
|
nsecheader != NULL)
|
|
|
|
{
|
|
|
|
if (nodep != NULL) {
|
2025-01-30 14:42:57 -08:00
|
|
|
qpcnode_acquire(search.qpdb, node, nlocktype,
|
|
|
|
tlocktype DNS__DB_FLARG_PASS);
|
2024-11-05 16:13:10 +01:00
|
|
|
*nodep = (dns_dbnode_t *)node;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
2025-02-02 19:29:53 +01:00
|
|
|
bindrdatasets(search.qpdb, node, nsecheader, nsecsig,
|
|
|
|
search.now, nlocktype, tlocktype,
|
|
|
|
rdataset, sigrdataset DNS__DB_FLARG_PASS);
|
2024-03-06 17:33:37 -08:00
|
|
|
result = DNS_R_COVERINGNSEC;
|
|
|
|
goto node_exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This name was from a wild card. Look for a covering NSEC.
|
|
|
|
*/
|
|
|
|
if (found == NULL && (found_noqname || all_negative) &&
|
|
|
|
(search.options & DNS_DBFIND_COVERINGNSEC) != 0)
|
|
|
|
{
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_UNLOCK(nlock, &nlocktype);
|
2024-03-06 17:33:37 -08:00
|
|
|
result = find_coveringnsec(
|
2025-02-02 14:07:18 +01:00
|
|
|
&search, name, nodep, foundname, rdataset,
|
2024-03-06 17:33:37 -08:00
|
|
|
sigrdataset DNS__DB_FLARG_PASS);
|
|
|
|
if (result == DNS_R_COVERINGNSEC) {
|
|
|
|
goto tree_exit;
|
|
|
|
}
|
|
|
|
goto find_ns;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If there is an NS rdataset at this node, then this is the
|
|
|
|
* deepest zone cut.
|
|
|
|
*/
|
|
|
|
if (nsheader != NULL) {
|
|
|
|
if (nodep != NULL) {
|
2025-01-30 14:42:57 -08:00
|
|
|
qpcnode_acquire(search.qpdb, node, nlocktype,
|
|
|
|
tlocktype DNS__DB_FLARG_PASS);
|
2024-11-05 16:13:10 +01:00
|
|
|
*nodep = (dns_dbnode_t *)node;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
2025-02-02 19:29:53 +01:00
|
|
|
bindrdatasets(search.qpdb, node, nsheader, nssig,
|
|
|
|
search.now, nlocktype, tlocktype,
|
|
|
|
rdataset, sigrdataset DNS__DB_FLARG_PASS);
|
2024-03-06 17:33:37 -08:00
|
|
|
result = DNS_R_DELEGATION;
|
|
|
|
goto node_exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Go find the deepest zone cut.
|
|
|
|
*/
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_UNLOCK(nlock, &nlocktype);
|
2024-03-06 17:33:37 -08:00
|
|
|
goto find_ns;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We found what we were looking for, or we found a CNAME.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (nodep != NULL) {
|
2025-01-30 14:42:57 -08:00
|
|
|
qpcnode_acquire(search.qpdb, node, nlocktype,
|
|
|
|
tlocktype DNS__DB_FLARG_PASS);
|
2024-11-05 16:13:10 +01:00
|
|
|
*nodep = (dns_dbnode_t *)node;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (NEGATIVE(found)) {
|
|
|
|
/*
|
|
|
|
* We found a negative cache entry.
|
|
|
|
*/
|
|
|
|
if (NXDOMAIN(found)) {
|
|
|
|
result = DNS_R_NCACHENXDOMAIN;
|
|
|
|
} else {
|
|
|
|
result = DNS_R_NCACHENXRRSET;
|
|
|
|
}
|
2025-08-06 19:34:35 +02:00
|
|
|
} else if (typepair != found->typepair &&
|
|
|
|
typepair != dns_rdatatype_any &&
|
|
|
|
found->typepair == dns_rdatatype_cname)
|
2024-03-06 17:33:37 -08:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We weren't doing an ANY query and we found a CNAME instead
|
|
|
|
* of the type we were looking for, so we need to indicate
|
|
|
|
* that result to the caller.
|
|
|
|
*/
|
|
|
|
result = DNS_R_CNAME;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* An ordinary successful query!
|
|
|
|
*/
|
|
|
|
result = ISC_R_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2025-08-06 19:34:35 +02:00
|
|
|
if (typepair != dns_rdatatype_any || result == DNS_R_NCACHENXDOMAIN ||
|
2024-03-06 17:33:37 -08:00
|
|
|
result == DNS_R_NCACHENXRRSET)
|
|
|
|
{
|
2025-02-02 19:29:53 +01:00
|
|
|
bindrdatasets(search.qpdb, node, found, foundsig, search.now,
|
|
|
|
nlocktype, tlocktype, rdataset,
|
|
|
|
sigrdataset DNS__DB_FLARG_PASS);
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
node_exit:
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_UNLOCK(nlock, &nlocktype);
|
2024-03-06 17:33:37 -08:00
|
|
|
|
|
|
|
tree_exit:
|
|
|
|
TREE_UNLOCK(&search.qpdb->tree_lock, &tlocktype);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we found a zonecut but aren't going to use it, we have to
|
|
|
|
* let go of it.
|
|
|
|
*/
|
|
|
|
if (search.need_cleanup) {
|
|
|
|
node = search.zonecut;
|
|
|
|
INSIST(node != NULL);
|
2025-02-03 13:36:27 +01:00
|
|
|
nlock = &search.qpdb->buckets[node->locknum].lock;
|
2024-03-06 17:33:37 -08:00
|
|
|
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_RDLOCK(nlock, &nlocktype);
|
2025-03-21 03:06:16 +01:00
|
|
|
qpcnode_release(search.qpdb, node, &nlocktype,
|
|
|
|
&tlocktype DNS__DB_FLARG_PASS);
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_UNLOCK(nlock, &nlocktype);
|
2024-03-06 17:33:37 -08:00
|
|
|
INSIST(tlocktype == isc_rwlocktype_none);
|
|
|
|
}
|
|
|
|
|
|
|
|
update_cachestats(search.qpdb, result);
|
2024-11-19 10:38:03 +01:00
|
|
|
return result;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static isc_result_t
|
2025-02-02 15:50:15 +01:00
|
|
|
seek_ns_headers(qpc_search_t *search, qpcnode_t *node, dns_dbnode_t **nodep,
|
|
|
|
dns_rdataset_t *rdataset, dns_rdataset_t *sigrdataset,
|
|
|
|
dns_name_t *foundname, dns_name_t *dcname,
|
|
|
|
isc_rwlocktype_t *tlocktype) {
|
2024-03-06 17:33:37 -08:00
|
|
|
dns_slabheader_t *header = NULL;
|
|
|
|
dns_slabheader_t *header_prev = NULL, *header_next = NULL;
|
|
|
|
isc_rwlocktype_t nlocktype = isc_rwlocktype_none;
|
2025-02-02 15:50:15 +01:00
|
|
|
isc_rwlock_t *nlock = &search->qpdb->buckets[node->locknum].lock;
|
|
|
|
dns_slabheader_t *found = NULL, *foundsig = NULL;
|
2024-03-06 17:33:37 -08:00
|
|
|
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_RDLOCK(nlock, &nlocktype);
|
2024-03-06 17:33:37 -08:00
|
|
|
|
|
|
|
for (header = node->data; header != NULL; header = header_next) {
|
|
|
|
header_next = header->next;
|
2025-08-06 19:34:35 +02:00
|
|
|
bool ns = (header->typepair == dns_rdatatype_ns ||
|
|
|
|
header->typepair == DNS_SIGTYPE(dns_rdatatype_ns));
|
2025-03-21 03:06:16 +01:00
|
|
|
if (check_stale_header(header, search, &header_prev)) {
|
2025-02-01 13:09:22 -08:00
|
|
|
if (ns) {
|
2024-03-06 17:33:37 -08:00
|
|
|
/*
|
2025-02-01 13:09:22 -08:00
|
|
|
* We found a cached NS, but was either
|
|
|
|
* ancient or it was stale and serve-stale
|
|
|
|
* is disabled, so this node can't be used
|
|
|
|
* as a zone cut we know about. Instead we
|
|
|
|
* bail out and call find_deepest_zonecut()
|
|
|
|
* below.
|
2024-03-06 17:33:37 -08:00
|
|
|
*/
|
2025-02-01 13:09:22 -08:00
|
|
|
break;
|
|
|
|
}
|
2025-02-02 19:21:44 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2025-02-02 20:07:42 +01:00
|
|
|
if (both_headers(header, dns_rdatatype_ns, &found, &foundsig)) {
|
|
|
|
break;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (found == NULL) {
|
2025-02-02 15:50:15 +01:00
|
|
|
isc_result_t result;
|
|
|
|
|
2024-03-06 17:33:37 -08:00
|
|
|
/*
|
2025-02-01 13:09:22 -08:00
|
|
|
* No active NS records found. Call find_deepest_zonecut()
|
|
|
|
* to look for them in nodes above this one.
|
2024-03-06 17:33:37 -08:00
|
|
|
*/
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_UNLOCK(nlock, &nlocktype);
|
2025-02-02 15:50:15 +01:00
|
|
|
result = find_deepest_zonecut(search, node, nodep, foundname,
|
2024-03-06 17:33:37 -08:00
|
|
|
rdataset,
|
|
|
|
sigrdataset DNS__DB_FLARG_PASS);
|
2025-02-02 15:50:15 +01:00
|
|
|
if (dcname != NULL) {
|
|
|
|
dns_name_copy(foundname, dcname);
|
|
|
|
}
|
|
|
|
return result;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (nodep != NULL) {
|
2025-02-02 15:50:15 +01:00
|
|
|
qpcnode_acquire(search->qpdb, node, nlocktype,
|
|
|
|
*tlocktype DNS__DB_FLARG_PASS);
|
2024-11-05 16:13:10 +01:00
|
|
|
*nodep = (dns_dbnode_t *)node;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
2025-02-02 15:50:15 +01:00
|
|
|
bindrdatasets(search->qpdb, node, found, foundsig, search->now,
|
|
|
|
nlocktype, *tlocktype, rdataset,
|
|
|
|
sigrdataset DNS__DB_FLARG_PASS);
|
2024-03-06 17:33:37 -08:00
|
|
|
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_UNLOCK(nlock, &nlocktype);
|
2024-03-06 17:33:37 -08:00
|
|
|
|
2025-02-02 15:50:15 +01:00
|
|
|
return ISC_R_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static isc_result_t
|
|
|
|
qpcache_findzonecut(dns_db_t *db, const dns_name_t *name, unsigned int options,
|
|
|
|
isc_stdtime_t __now, dns_dbnode_t **nodep,
|
|
|
|
dns_name_t *foundname, dns_name_t *dcname,
|
|
|
|
dns_rdataset_t *rdataset,
|
|
|
|
dns_rdataset_t *sigrdataset DNS__DB_FLARG) {
|
|
|
|
qpcnode_t *node = NULL;
|
|
|
|
isc_result_t result;
|
|
|
|
isc_rwlocktype_t tlocktype = isc_rwlocktype_none;
|
|
|
|
qpc_search_t search = (qpc_search_t){
|
|
|
|
.qpdb = (qpcache_t *)db,
|
|
|
|
.options = options,
|
|
|
|
.now = __now ? __now : isc_stdtime_now(),
|
|
|
|
};
|
|
|
|
unsigned int len = 0;
|
|
|
|
|
|
|
|
REQUIRE(VALID_QPDB((qpcache_t *)db));
|
|
|
|
|
|
|
|
TREE_RDLOCK(&search.qpdb->tree_lock, &tlocktype);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Search down from the root of the tree.
|
|
|
|
*/
|
2025-07-07 11:29:45 +02:00
|
|
|
result = dns_qp_lookup(search.qpdb->tree, name, DNS_DBNAMESPACE_NORMAL,
|
|
|
|
NULL, NULL, &search.chain, (void **)&node, NULL);
|
2025-02-02 15:50:15 +01:00
|
|
|
|
|
|
|
switch (result) {
|
|
|
|
case ISC_R_SUCCESS:
|
|
|
|
if ((options & DNS_DBFIND_NOEXACT) == 0) {
|
|
|
|
if (dcname != NULL) {
|
|
|
|
dns_name_copy(&node->name, dcname);
|
|
|
|
}
|
|
|
|
dns_name_copy(&node->name, foundname);
|
|
|
|
result = seek_ns_headers(&search, node, nodep, rdataset,
|
|
|
|
sigrdataset, foundname, dcname,
|
|
|
|
&tlocktype);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
len = dns_qpchain_length(&search.chain);
|
|
|
|
if (len < 2) {
|
|
|
|
result = ISC_R_NOTFOUND;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
FALLTHROUGH;
|
|
|
|
case DNS_R_PARTIALMATCH:
|
|
|
|
if (dcname != NULL) {
|
|
|
|
dns_name_copy(&node->name, dcname);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (result == ISC_R_SUCCESS) {
|
|
|
|
/* Fell through from the previous case */
|
|
|
|
INSIST(len >= 2);
|
|
|
|
|
|
|
|
node = NULL;
|
|
|
|
dns_qpchain_node(&search.chain, len - 2, NULL,
|
|
|
|
(void **)&node, NULL);
|
|
|
|
search.chain.len = len - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
result = find_deepest_zonecut(&search, node, nodep, foundname,
|
|
|
|
rdataset,
|
|
|
|
sigrdataset DNS__DB_FLARG_PASS);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2024-03-06 17:33:37 -08:00
|
|
|
TREE_UNLOCK(&search.qpdb->tree_lock, &tlocktype);
|
|
|
|
|
|
|
|
INSIST(!search.need_cleanup);
|
|
|
|
|
|
|
|
if (result == DNS_R_DELEGATION) {
|
|
|
|
result = ISC_R_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return result;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static isc_result_t
|
2024-11-14 11:18:00 +01:00
|
|
|
qpcache_findrdataset(dns_db_t *db, dns_dbnode_t *node, dns_dbversion_t *version,
|
|
|
|
dns_rdatatype_t type, dns_rdatatype_t covers,
|
2025-02-03 00:00:39 +01:00
|
|
|
isc_stdtime_t __now, dns_rdataset_t *rdataset,
|
2024-11-14 11:18:00 +01:00
|
|
|
dns_rdataset_t *sigrdataset DNS__DB_FLARG) {
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcache_t *qpdb = (qpcache_t *)db;
|
|
|
|
qpcnode_t *qpnode = (qpcnode_t *)node;
|
2025-02-02 20:22:29 +01:00
|
|
|
dns_slabheader_t *header = NULL;
|
|
|
|
dns_slabheader_t *header_prev = NULL, *header_next = NULL;
|
2024-03-06 17:33:37 -08:00
|
|
|
dns_slabheader_t *found = NULL, *foundsig = NULL;
|
2025-08-06 19:34:35 +02:00
|
|
|
dns_typepair_t typepair, sigpair, negpair;
|
2025-08-07 08:08:24 +02:00
|
|
|
isc_result_t result = ISC_R_SUCCESS;
|
2025-01-27 21:07:11 +01:00
|
|
|
isc_rwlock_t *nlock = NULL;
|
2024-03-06 17:33:37 -08:00
|
|
|
isc_rwlocktype_t nlocktype = isc_rwlocktype_none;
|
2025-02-03 00:00:39 +01:00
|
|
|
qpc_search_t search = (qpc_search_t){
|
|
|
|
.qpdb = (qpcache_t *)db,
|
|
|
|
.now = __now ? __now : isc_stdtime_now(),
|
|
|
|
};
|
2024-03-06 17:33:37 -08:00
|
|
|
|
|
|
|
REQUIRE(VALID_QPDB(qpdb));
|
2025-02-03 00:06:48 +01:00
|
|
|
REQUIRE(version == NULL);
|
2024-03-06 17:33:37 -08:00
|
|
|
REQUIRE(type != dns_rdatatype_any);
|
|
|
|
|
2025-08-07 08:08:24 +02:00
|
|
|
if (type == dns_rdatatype_none && covers == dns_rdatatype_none) {
|
|
|
|
return ISC_R_NOTFOUND;
|
|
|
|
}
|
2024-03-06 17:33:37 -08:00
|
|
|
|
2025-02-03 13:36:27 +01:00
|
|
|
nlock = &qpdb->buckets[qpnode->locknum].lock;
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_RDLOCK(nlock, &nlocktype);
|
2024-03-06 17:33:37 -08:00
|
|
|
|
2025-08-06 19:34:35 +02:00
|
|
|
typepair = DNS_TYPEPAIR_VALUE(type, covers);
|
|
|
|
negpair = DNS_TYPEPAIR_VALUE(0, type);
|
|
|
|
sigpair = (covers == 0) ? DNS_SIGTYPE(type) : 0;
|
2024-03-06 17:33:37 -08:00
|
|
|
|
|
|
|
for (header = qpnode->data; header != NULL; header = header_next) {
|
|
|
|
header_next = header->next;
|
2025-02-02 20:22:29 +01:00
|
|
|
|
2025-03-21 03:06:16 +01:00
|
|
|
if (check_stale_header(header, &search, &header_prev)) {
|
2025-02-02 20:22:29 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2025-08-06 19:34:35 +02:00
|
|
|
if (related_headers(header, typepair, sigpair, negpair, &found,
|
|
|
|
&foundsig, NULL))
|
2025-02-02 20:22:29 +01:00
|
|
|
{
|
|
|
|
break;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (found != NULL) {
|
2025-02-03 00:00:39 +01:00
|
|
|
bindrdatasets(qpdb, qpnode, found, foundsig, search.now,
|
|
|
|
nlocktype, isc_rwlocktype_none, rdataset,
|
2025-02-02 19:29:53 +01:00
|
|
|
sigrdataset DNS__DB_FLARG_PASS);
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_UNLOCK(nlock, &nlocktype);
|
2024-03-06 17:33:37 -08:00
|
|
|
|
|
|
|
if (found == NULL) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return ISC_R_NOTFOUND;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (NEGATIVE(found)) {
|
|
|
|
/*
|
|
|
|
* We found a negative cache entry.
|
|
|
|
*/
|
|
|
|
if (NXDOMAIN(found)) {
|
|
|
|
result = DNS_R_NCACHENXDOMAIN;
|
|
|
|
} else {
|
|
|
|
result = DNS_R_NCACHENXRRSET;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
update_cachestats(qpdb, result);
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return result;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static isc_result_t
|
|
|
|
setcachestats(dns_db_t *db, isc_stats_t *stats) {
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcache_t *qpdb = (qpcache_t *)db;
|
2024-03-06 17:33:37 -08:00
|
|
|
|
|
|
|
REQUIRE(VALID_QPDB(qpdb));
|
|
|
|
REQUIRE(stats != NULL);
|
|
|
|
|
|
|
|
isc_stats_attach(stats, &qpdb->cachestats);
|
2024-11-19 10:38:03 +01:00
|
|
|
return ISC_R_SUCCESS;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static dns_stats_t *
|
|
|
|
getrrsetstats(dns_db_t *db) {
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcache_t *qpdb = (qpcache_t *)db;
|
2024-03-06 17:33:37 -08:00
|
|
|
|
|
|
|
REQUIRE(VALID_QPDB(qpdb));
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return qpdb->rrsetstats;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static isc_result_t
|
|
|
|
setservestalettl(dns_db_t *db, dns_ttl_t ttl) {
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcache_t *qpdb = (qpcache_t *)db;
|
2024-03-06 17:33:37 -08:00
|
|
|
|
|
|
|
REQUIRE(VALID_QPDB(qpdb));
|
|
|
|
|
|
|
|
/* currently no bounds checking. 0 means disable. */
|
|
|
|
qpdb->common.serve_stale_ttl = ttl;
|
2024-11-19 10:38:03 +01:00
|
|
|
return ISC_R_SUCCESS;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static isc_result_t
|
|
|
|
getservestalettl(dns_db_t *db, dns_ttl_t *ttl) {
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcache_t *qpdb = (qpcache_t *)db;
|
2024-03-06 17:33:37 -08:00
|
|
|
|
|
|
|
REQUIRE(VALID_QPDB(qpdb));
|
|
|
|
|
|
|
|
*ttl = qpdb->common.serve_stale_ttl;
|
2024-11-19 10:38:03 +01:00
|
|
|
return ISC_R_SUCCESS;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static isc_result_t
|
|
|
|
setservestalerefresh(dns_db_t *db, uint32_t interval) {
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcache_t *qpdb = (qpcache_t *)db;
|
2024-03-06 17:33:37 -08:00
|
|
|
|
|
|
|
REQUIRE(VALID_QPDB(qpdb));
|
|
|
|
|
|
|
|
/* currently no bounds checking. 0 means disable. */
|
|
|
|
qpdb->serve_stale_refresh = interval;
|
2024-11-19 10:38:03 +01:00
|
|
|
return ISC_R_SUCCESS;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static isc_result_t
|
|
|
|
getservestalerefresh(dns_db_t *db, uint32_t *interval) {
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcache_t *qpdb = (qpcache_t *)db;
|
2024-03-06 17:33:37 -08:00
|
|
|
|
|
|
|
REQUIRE(VALID_QPDB(qpdb));
|
|
|
|
|
|
|
|
*interval = qpdb->serve_stale_refresh;
|
2024-11-19 10:38:03 +01:00
|
|
|
return ISC_R_SUCCESS;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
Decouple database and node lifetimes by adding node-specific vtables
All databases in the codebase follow the same structure: a database is
an associative container from DNS names to nodes, and each node is an
associative container from RR types to RR data.
Each database implementation (qpzone, qpcache, sdlz, builtin, dyndb) has
its own corresponding node type (qpznode, qpcnode, etc). However, some
code needs to work with nodes generically regardless of their specific
type - for example, to acquire locks, manage references, or
register/unregister slabs from the heap.
Currently, these generic node operations are implemented as methods in
the database vtable, which creates problematic coupling between database
and node lifetimes. If a node outlives its parent database, the node
destructor will destroy all RR data, and each RR data destructor will
try to unregister from heaps by calling a virtual function from the
database vtable. Since the database was already freed, this causes a
crash.
This commit breaks the coupling by standardizing the layout of all
database nodes, adding a dedicated vtable for node operations, and
moving node-specific methods from the database vtable to the node
vtable.
2025-06-05 11:51:29 +02:00
|
|
|
qpcnode_expiredata(dns_dbnode_t *node, void *data) {
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcnode_t *qpnode = (qpcnode_t *)node;
|
Decouple database and node lifetimes by adding node-specific vtables
All databases in the codebase follow the same structure: a database is
an associative container from DNS names to nodes, and each node is an
associative container from RR types to RR data.
Each database implementation (qpzone, qpcache, sdlz, builtin, dyndb) has
its own corresponding node type (qpznode, qpcnode, etc). However, some
code needs to work with nodes generically regardless of their specific
type - for example, to acquire locks, manage references, or
register/unregister slabs from the heap.
Currently, these generic node operations are implemented as methods in
the database vtable, which creates problematic coupling between database
and node lifetimes. If a node outlives its parent database, the node
destructor will destroy all RR data, and each RR data destructor will
try to unregister from heaps by calling a virtual function from the
database vtable. Since the database was already freed, this causes a
crash.
This commit breaks the coupling by standardizing the layout of all
database nodes, adding a dedicated vtable for node operations, and
moving node-specific methods from the database vtable to the node
vtable.
2025-06-05 11:51:29 +02:00
|
|
|
qpcache_t *qpdb = (qpcache_t *)qpnode->qpdb;
|
|
|
|
|
2024-03-06 17:33:37 -08:00
|
|
|
dns_slabheader_t *header = data;
|
|
|
|
isc_rwlocktype_t nlocktype = isc_rwlocktype_none;
|
|
|
|
isc_rwlocktype_t tlocktype = isc_rwlocktype_none;
|
|
|
|
|
Decouple database and node lifetimes by adding node-specific vtables
All databases in the codebase follow the same structure: a database is
an associative container from DNS names to nodes, and each node is an
associative container from RR types to RR data.
Each database implementation (qpzone, qpcache, sdlz, builtin, dyndb) has
its own corresponding node type (qpznode, qpcnode, etc). However, some
code needs to work with nodes generically regardless of their specific
type - for example, to acquire locks, manage references, or
register/unregister slabs from the heap.
Currently, these generic node operations are implemented as methods in
the database vtable, which creates problematic coupling between database
and node lifetimes. If a node outlives its parent database, the node
destructor will destroy all RR data, and each RR data destructor will
try to unregister from heaps by calling a virtual function from the
database vtable. Since the database was already freed, this causes a
crash.
This commit breaks the coupling by standardizing the layout of all
database nodes, adding a dedicated vtable for node operations, and
moving node-specific methods from the database vtable to the node
vtable.
2025-06-05 11:51:29 +02:00
|
|
|
isc_rwlock_t *nlock = &qpdb->buckets[qpnode->locknum].lock;
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_WRLOCK(nlock, &nlocktype);
|
2024-03-26 14:13:24 +01:00
|
|
|
expireheader(header, &nlocktype, &tlocktype,
|
|
|
|
dns_expire_flush DNS__DB_FILELINE);
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_UNLOCK(nlock, &nlocktype);
|
2024-03-06 17:33:37 -08:00
|
|
|
INSIST(tlocktype == isc_rwlocktype_none);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*%
|
|
|
|
* These functions allow the heap code to rank the priority of each
|
|
|
|
* element. It returns true if v1 happens "sooner" than v2.
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
ttl_sooner(void *v1, void *v2) {
|
|
|
|
dns_slabheader_t *h1 = v1;
|
|
|
|
dns_slabheader_t *h2 = v2;
|
|
|
|
|
2025-02-02 13:31:36 +01:00
|
|
|
return h1->expire < h2->expire;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*%
|
|
|
|
* This function sets the heap index into the header.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
set_index(void *what, unsigned int idx) {
|
|
|
|
dns_slabheader_t *h = what;
|
|
|
|
|
|
|
|
h->heap_index = idx;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2025-01-27 21:07:11 +01:00
|
|
|
qpcache__destroy(qpcache_t *qpdb) {
|
2024-03-06 17:33:37 -08:00
|
|
|
unsigned int i;
|
|
|
|
char buf[DNS_NAME_FORMATSIZE];
|
|
|
|
dns_qp_t **treep = NULL;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
/*
|
|
|
|
* pick the next tree to (start to) destroy
|
|
|
|
*/
|
|
|
|
treep = &qpdb->tree;
|
|
|
|
if (*treep == NULL) {
|
|
|
|
treep = &qpdb->nsec;
|
|
|
|
if (*treep == NULL) {
|
2024-03-12 22:19:47 -07:00
|
|
|
break;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
dns_qp_destroy(treep);
|
|
|
|
INSIST(*treep == NULL);
|
|
|
|
}
|
|
|
|
|
2025-01-27 21:07:11 +01:00
|
|
|
if (dns_name_dynamic(&qpdb->common.origin)) {
|
|
|
|
dns_name_format(&qpdb->common.origin, buf, sizeof(buf));
|
|
|
|
} else {
|
|
|
|
strlcpy(buf, "<UNKNOWN>", sizeof(buf));
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
2025-01-27 21:07:11 +01:00
|
|
|
isc_log_write(DNS_LOGCATEGORY_DATABASE, DNS_LOGMODULE_CACHE,
|
2025-06-19 14:49:53 +02:00
|
|
|
ISC_LOG_DEBUG(DNS_QPCACHE_LOG_STATS_LEVEL), "done %s(%s)",
|
|
|
|
__func__, buf);
|
2025-01-27 21:07:11 +01:00
|
|
|
|
2024-03-06 17:33:37 -08:00
|
|
|
if (dns_name_dynamic(&qpdb->common.origin)) {
|
|
|
|
dns_name_free(&qpdb->common.origin, qpdb->common.mctx);
|
|
|
|
}
|
2025-02-03 13:36:27 +01:00
|
|
|
for (i = 0; i < qpdb->buckets_count; i++) {
|
|
|
|
NODE_DESTROYLOCK(&qpdb->buckets[i].lock);
|
2024-03-06 17:33:37 -08:00
|
|
|
|
2025-02-23 14:36:35 +01:00
|
|
|
INSIST(ISC_SIEVE_EMPTY(qpdb->buckets[i].sieve));
|
2024-03-25 12:17:42 +01:00
|
|
|
|
2025-02-03 13:36:27 +01:00
|
|
|
INSIST(isc_queue_empty(&qpdb->buckets[i].deadnodes));
|
|
|
|
isc_queue_destroy(&qpdb->buckets[i].deadnodes);
|
|
|
|
|
|
|
|
isc_heap_destroy(&qpdb->buckets[i].heap);
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (qpdb->rrsetstats != NULL) {
|
|
|
|
dns_stats_detach(&qpdb->rrsetstats);
|
|
|
|
}
|
|
|
|
if (qpdb->cachestats != NULL) {
|
|
|
|
isc_stats_detach(&qpdb->cachestats);
|
|
|
|
}
|
|
|
|
|
|
|
|
TREE_DESTROYLOCK(&qpdb->tree_lock);
|
2025-01-27 21:07:11 +01:00
|
|
|
isc_refcount_destroy(&qpdb->references);
|
2024-03-06 17:33:37 -08:00
|
|
|
isc_refcount_destroy(&qpdb->common.references);
|
|
|
|
|
|
|
|
isc_rwlock_destroy(&qpdb->lock);
|
|
|
|
qpdb->common.magic = 0;
|
|
|
|
qpdb->common.impmagic = 0;
|
|
|
|
isc_mem_detach(&qpdb->hmctx);
|
|
|
|
|
2025-02-03 13:36:27 +01:00
|
|
|
isc_mem_putanddetach(&qpdb->common.mctx, qpdb,
|
|
|
|
sizeof(*qpdb) + qpdb->buckets_count *
|
|
|
|
sizeof(qpdb->buckets[0]));
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2024-11-14 11:18:00 +01:00
|
|
|
qpcache_destroy(dns_db_t *arg) {
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcache_t *qpdb = (qpcache_t *)arg;
|
2024-03-06 17:33:37 -08:00
|
|
|
|
2025-01-27 21:07:11 +01:00
|
|
|
qpcache_detach(&qpdb);
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*%
|
|
|
|
* Clean up dead nodes. These are nodes which have no references, and
|
|
|
|
* have no data. They are dead but we could not or chose not to delete
|
|
|
|
* them when we deleted all the data at that node because we did not want
|
|
|
|
* to wait for the tree write lock.
|
|
|
|
*/
|
|
|
|
static void
|
2025-02-23 14:36:35 +01:00
|
|
|
cleanup_deadnodes(qpcache_t *qpdb, uint16_t locknum) {
|
2024-03-25 12:17:42 +01:00
|
|
|
isc_rwlocktype_t tlocktype = isc_rwlocktype_none;
|
|
|
|
isc_rwlocktype_t nlocktype = isc_rwlocktype_none;
|
2025-02-03 13:36:27 +01:00
|
|
|
isc_rwlock_t *nlock = &qpdb->buckets[locknum].lock;
|
2024-03-25 12:17:42 +01:00
|
|
|
qpcnode_t *qpnode = NULL, *qpnext = NULL;
|
|
|
|
isc_queue_t deadnodes;
|
2024-03-06 17:33:37 -08:00
|
|
|
|
2025-02-03 13:36:27 +01:00
|
|
|
INSIST(locknum < qpdb->buckets_count);
|
2024-03-06 17:33:37 -08:00
|
|
|
|
2024-03-25 12:17:42 +01:00
|
|
|
isc_queue_init(&deadnodes);
|
2024-03-06 17:33:37 -08:00
|
|
|
|
2024-03-25 12:17:42 +01:00
|
|
|
TREE_WRLOCK(&qpdb->tree_lock, &tlocktype);
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_WRLOCK(nlock, &nlocktype);
|
2024-03-06 17:33:37 -08:00
|
|
|
|
2025-02-23 14:36:35 +01:00
|
|
|
isc_queue_splice(&deadnodes, &qpdb->buckets[locknum].deadnodes);
|
2024-03-25 12:17:42 +01:00
|
|
|
isc_queue_for_each_entry_safe(&deadnodes, qpnode, qpnext, deadlink) {
|
2025-03-21 03:06:16 +01:00
|
|
|
qpcnode_release(qpdb, qpnode, &nlocktype,
|
|
|
|
&tlocktype DNS__DB_FILELINE);
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
2024-03-25 12:17:42 +01:00
|
|
|
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_UNLOCK(nlock, &nlocktype);
|
2024-03-25 12:17:42 +01:00
|
|
|
TREE_UNLOCK(&qpdb->tree_lock, &tlocktype);
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
2025-02-23 14:36:35 +01:00
|
|
|
static void
|
|
|
|
cleanup_deadnodes_cb(void *arg) {
|
|
|
|
qpcache_t *qpdb = arg;
|
|
|
|
uint16_t locknum = isc_tid();
|
|
|
|
|
|
|
|
cleanup_deadnodes(qpdb, locknum);
|
|
|
|
qpcache_unref(qpdb);
|
|
|
|
}
|
2024-03-06 17:33:37 -08:00
|
|
|
/*
|
|
|
|
* This function is assumed to be called when a node is newly referenced
|
2024-03-25 12:17:42 +01:00
|
|
|
* and can be in the deadnode list. In that case the node will be references
|
|
|
|
* and cleanup_deadnodes() will remove it from the list when the cleaning
|
|
|
|
* happens.
|
2024-03-06 17:33:37 -08:00
|
|
|
* Note: while a new reference is gained in multiple places, there are only very
|
|
|
|
* few cases where the node can be in the deadnode list (only empty nodes can
|
|
|
|
* have been added to the list).
|
|
|
|
*/
|
|
|
|
static void
|
2024-04-29 15:29:33 -07:00
|
|
|
reactivate_node(qpcache_t *qpdb, qpcnode_t *node,
|
2024-03-25 12:17:42 +01:00
|
|
|
isc_rwlocktype_t tlocktype ISC_ATTR_UNUSED DNS__DB_FLARG) {
|
2024-03-06 17:33:37 -08:00
|
|
|
isc_rwlocktype_t nlocktype = isc_rwlocktype_none;
|
2025-02-03 13:36:27 +01:00
|
|
|
isc_rwlock_t *nlock = &qpdb->buckets[node->locknum].lock;
|
Improve node reference counting
QP database node data is not reference counted the same way RBT nodes
were: in the RBT, node->references could be zero if the node was in the
tree but was not in use by any caller, whereas in the QP trie, the
database itself uses reference counting of nodes internally.
this caused some subtle errors. in RBTDB, when the newref() function is
called and the node reference count was zero, the node lock reference
counter would also be incremented. in the QP trie, this can never
happen - because as long as the node is in the database its reference
count cannot be zero - and so the node lock reference counter was never
incremented.
reference counting will probably need to be refactored in more detail
later; the node lock reference count may not be needed at all. but
for now, as a temporary measure, we add a third reference counter,
'erefs' (external references), to the dns_qpdata structure. this is
counted separately from the main reference counter, and should match
the node reference count as it would have been in RBTDB.
this change revealed a number of places where the node reference counter
was being incremented on behalf of a caller without newref() being
called; those were cleaned up as well.
This is an adaptation of commit 3dd686261d2c4bcd15a96ebfea10baffa277732b
2024-01-25 10:19:00 +01:00
|
|
|
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_RDLOCK(nlock, &nlocktype);
|
2025-01-30 14:42:57 -08:00
|
|
|
qpcnode_acquire(qpdb, node, nlocktype, tlocktype DNS__DB_FLARG_PASS);
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_UNLOCK(nlock, &nlocktype);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2024-04-29 15:29:33 -07:00
|
|
|
static qpcnode_t *
|
2025-07-07 11:42:17 +02:00
|
|
|
new_qpcnode(qpcache_t *qpdb, const dns_name_t *name, dns_namespace_t nspace) {
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcnode_t *newdata = isc_mem_get(qpdb->common.mctx, sizeof(*newdata));
|
|
|
|
*newdata = (qpcnode_t){
|
Decouple database and node lifetimes by adding node-specific vtables
All databases in the codebase follow the same structure: a database is
an associative container from DNS names to nodes, and each node is an
associative container from RR types to RR data.
Each database implementation (qpzone, qpcache, sdlz, builtin, dyndb) has
its own corresponding node type (qpznode, qpcnode, etc). However, some
code needs to work with nodes generically regardless of their specific
type - for example, to acquire locks, manage references, or
register/unregister slabs from the heap.
Currently, these generic node operations are implemented as methods in
the database vtable, which creates problematic coupling between database
and node lifetimes. If a node outlives its parent database, the node
destructor will destroy all RR data, and each RR data destructor will
try to unregister from heaps by calling a virtual function from the
database vtable. Since the database was already freed, this causes a
crash.
This commit breaks the coupling by standardizing the layout of all
database nodes, adding a dedicated vtable for node operations, and
moving node-specific methods from the database vtable to the node
vtable.
2025-06-05 11:51:29 +02:00
|
|
|
.methods = &qpcnode_methods,
|
|
|
|
.qpdb = qpdb,
|
2024-03-11 18:53:49 -07:00
|
|
|
.name = DNS_NAME_INITEMPTY,
|
2025-07-07 11:42:17 +02:00
|
|
|
.nspace = nspace,
|
2024-03-06 17:33:37 -08:00
|
|
|
.references = ISC_REFCOUNT_INITIALIZER(1),
|
2025-02-03 13:36:27 +01:00
|
|
|
.locknum = isc_random_uniform(qpdb->buckets_count),
|
2024-03-06 17:33:37 -08:00
|
|
|
};
|
2024-03-25 12:17:42 +01:00
|
|
|
|
2024-03-06 17:33:37 -08:00
|
|
|
isc_mem_attach(qpdb->common.mctx, &newdata->mctx);
|
2025-02-21 12:09:28 +01:00
|
|
|
dns_name_dup(name, newdata->mctx, &newdata->name);
|
2024-03-06 17:33:37 -08:00
|
|
|
|
|
|
|
#ifdef DNS_DB_NODETRACE
|
2024-04-29 15:29:33 -07:00
|
|
|
fprintf(stderr, "new_qpcnode:%s:%s:%d:%p->references = 1\n", __func__,
|
2024-03-06 17:33:37 -08:00
|
|
|
__FILE__, __LINE__ + 1, name);
|
|
|
|
#endif
|
2024-11-19 10:38:03 +01:00
|
|
|
return newdata;
|
2024-03-06 17:33:37 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static isc_result_t
|
2024-11-14 11:18:00 +01:00
|
|
|
qpcache_findnode(dns_db_t *db, const dns_name_t *name, bool create,
|
|
|
|
dns_dbnode_t **nodep DNS__DB_FLARG) {
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcache_t *qpdb = (qpcache_t *)db;
|
|
|
|
qpcnode_t *node = NULL;
|
2024-01-09 16:18:57 +01:00
|
|
|
isc_result_t result;
|
|
|
|
isc_rwlocktype_t tlocktype = isc_rwlocktype_none;
|
2025-07-07 11:29:45 +02:00
|
|
|
dns_namespace_t nspace = DNS_DBNAMESPACE_NORMAL;
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
TREE_RDLOCK(&qpdb->tree_lock, &tlocktype);
|
2025-05-26 17:36:33 +02:00
|
|
|
result = dns_qp_getname(qpdb->tree, name, nspace, (void **)&node, NULL);
|
2024-01-09 16:18:57 +01:00
|
|
|
if (result != ISC_R_SUCCESS) {
|
|
|
|
if (!create) {
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Try to upgrade the lock and if that fails unlock then relock.
|
|
|
|
*/
|
2024-03-05 14:28:43 -08:00
|
|
|
TREE_FORCEUPGRADE(&qpdb->tree_lock, &tlocktype);
|
2025-05-26 17:36:33 +02:00
|
|
|
result = dns_qp_getname(qpdb->tree, name, nspace,
|
|
|
|
(void **)&node, NULL);
|
2024-02-19 12:05:34 +01:00
|
|
|
if (result != ISC_R_SUCCESS) {
|
2025-07-07 11:42:17 +02:00
|
|
|
node = new_qpcnode(qpdb, name, nspace);
|
2024-03-06 17:33:37 -08:00
|
|
|
result = dns_qp_insert(qpdb->tree, node, 0);
|
2024-02-19 12:05:34 +01:00
|
|
|
INSIST(result == ISC_R_SUCCESS);
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcnode_unref(node);
|
2024-02-19 12:05:34 +01:00
|
|
|
}
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
reactivate_node(qpdb, node, tlocktype DNS__DB_FLARG_PASS);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
|
|
|
*nodep = (dns_dbnode_t *)node;
|
|
|
|
unlock:
|
2024-03-05 14:28:43 -08:00
|
|
|
TREE_UNLOCK(&qpdb->tree_lock, &tlocktype);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return result;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2024-03-06 17:33:37 -08:00
|
|
|
static isc_result_t
|
2024-11-14 11:18:00 +01:00
|
|
|
qpcache_createiterator(dns_db_t *db, unsigned int options ISC_ATTR_UNUSED,
|
|
|
|
dns_dbiterator_t **iteratorp) {
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcache_t *qpdb = (qpcache_t *)db;
|
2024-04-29 15:45:26 -07:00
|
|
|
qpc_dbit_t *qpdbiter = NULL;
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
REQUIRE(VALID_QPDB(qpdb));
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
qpdbiter = isc_mem_get(qpdb->common.mctx, sizeof(*qpdbiter));
|
2024-04-29 15:45:26 -07:00
|
|
|
*qpdbiter = (qpc_dbit_t){
|
2024-03-12 22:19:47 -07:00
|
|
|
.common.methods = &dbiterator_methods,
|
|
|
|
.common.magic = DNS_DBITERATOR_MAGIC,
|
|
|
|
.paused = true,
|
|
|
|
};
|
2024-03-05 14:28:43 -08:00
|
|
|
|
2024-02-07 14:52:59 +01:00
|
|
|
qpdbiter->name = dns_fixedname_initname(&qpdbiter->fixed);
|
2024-03-12 22:19:47 -07:00
|
|
|
dns_db_attach(db, &qpdbiter->common.db);
|
2024-01-16 11:26:20 +01:00
|
|
|
dns_qpiter_init(qpdb->tree, &qpdbiter->iter);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
*iteratorp = (dns_dbiterator_t *)qpdbiter;
|
2024-11-19 10:38:03 +01:00
|
|
|
return ISC_R_SUCCESS;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2024-03-06 17:33:37 -08:00
|
|
|
static isc_result_t
|
2024-11-14 11:18:00 +01:00
|
|
|
qpcache_allrdatasets(dns_db_t *db, dns_dbnode_t *node, dns_dbversion_t *version,
|
2025-02-03 00:00:39 +01:00
|
|
|
unsigned int options, isc_stdtime_t __now,
|
2024-11-14 11:18:00 +01:00
|
|
|
dns_rdatasetiter_t **iteratorp DNS__DB_FLARG) {
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcache_t *qpdb = (qpcache_t *)db;
|
|
|
|
qpcnode_t *qpnode = (qpcnode_t *)node;
|
2024-04-29 15:45:26 -07:00
|
|
|
qpc_rditer_t *iterator = NULL;
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
REQUIRE(VALID_QPDB(qpdb));
|
2025-02-03 00:06:48 +01:00
|
|
|
REQUIRE(version == NULL);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-06 15:04:20 -08:00
|
|
|
iterator = isc_mem_get(qpdb->common.mctx, sizeof(*iterator));
|
2025-02-03 00:00:39 +01:00
|
|
|
*iterator = (qpc_rditer_t){
|
|
|
|
.common.magic = DNS_RDATASETITER_MAGIC,
|
|
|
|
.common.methods = &rdatasetiter_methods,
|
|
|
|
.common.db = db,
|
|
|
|
.common.node = node,
|
|
|
|
.common.options = options,
|
|
|
|
.common.now = __now ? __now : isc_stdtime_now(),
|
|
|
|
};
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2025-01-30 14:42:57 -08:00
|
|
|
qpcnode_acquire(qpdb, qpnode, isc_rwlocktype_none,
|
|
|
|
isc_rwlocktype_none DNS__DB_FLARG_PASS);
|
Improve node reference counting
QP database node data is not reference counted the same way RBT nodes
were: in the RBT, node->references could be zero if the node was in the
tree but was not in use by any caller, whereas in the QP trie, the
database itself uses reference counting of nodes internally.
this caused some subtle errors. in RBTDB, when the newref() function is
called and the node reference count was zero, the node lock reference
counter would also be incremented. in the QP trie, this can never
happen - because as long as the node is in the database its reference
count cannot be zero - and so the node lock reference counter was never
incremented.
reference counting will probably need to be refactored in more detail
later; the node lock reference count may not be needed at all. but
for now, as a temporary measure, we add a third reference counter,
'erefs' (external references), to the dns_qpdata structure. this is
counted separately from the main reference counter, and should match
the node reference count as it would have been in RBTDB.
this change revealed a number of places where the node reference counter
was being incremented on behalf of a caller without newref() being
called; those were cleaned up as well.
This is an adaptation of commit 3dd686261d2c4bcd15a96ebfea10baffa277732b
2024-01-25 10:19:00 +01:00
|
|
|
|
2024-01-09 16:18:57 +01:00
|
|
|
*iteratorp = (dns_rdatasetiter_t *)iterator;
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return ISC_R_SUCCESS;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2024-06-17 11:40:40 +02:00
|
|
|
static bool
|
|
|
|
overmaxtype(qpcache_t *qpdb, uint32_t ntypes) {
|
|
|
|
if (qpdb->maxtypepername == 0) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return false;
|
2024-06-17 11:40:40 +02:00
|
|
|
}
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return ntypes >= qpdb->maxtypepername;
|
2024-06-17 11:40:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
prio_header(dns_slabheader_t *header) {
|
2025-08-06 19:34:35 +02:00
|
|
|
if (NEGATIVE(header) &&
|
|
|
|
prio_type(DNS_TYPEPAIR_COVERS(header->typepair)))
|
|
|
|
{
|
2024-11-19 10:38:03 +01:00
|
|
|
return true;
|
2024-06-17 11:40:40 +02:00
|
|
|
}
|
|
|
|
|
2025-08-06 19:34:35 +02:00
|
|
|
return prio_type(header->typepair);
|
2024-06-17 11:40:40 +02:00
|
|
|
}
|
|
|
|
|
Decouple database and node lifetimes by adding node-specific vtables
All databases in the codebase follow the same structure: a database is
an associative container from DNS names to nodes, and each node is an
associative container from RR types to RR data.
Each database implementation (qpzone, qpcache, sdlz, builtin, dyndb) has
its own corresponding node type (qpznode, qpcnode, etc). However, some
code needs to work with nodes generically regardless of their specific
type - for example, to acquire locks, manage references, or
register/unregister slabs from the heap.
Currently, these generic node operations are implemented as methods in
the database vtable, which creates problematic coupling between database
and node lifetimes. If a node outlives its parent database, the node
destructor will destroy all RR data, and each RR data destructor will
try to unregister from heaps by calling a virtual function from the
database vtable. Since the database was already freed, this causes a
crash.
This commit breaks the coupling by standardizing the layout of all
database nodes, adding a dedicated vtable for node operations, and
moving node-specific methods from the database vtable to the node
vtable.
2025-06-05 11:51:29 +02:00
|
|
|
static void
|
|
|
|
qpcnode_attachnode(dns_dbnode_t *source, dns_dbnode_t **targetp DNS__DB_FLARG) {
|
|
|
|
REQUIRE(targetp != NULL && *targetp == NULL);
|
|
|
|
|
|
|
|
qpcnode_t *node = (qpcnode_t *)source;
|
|
|
|
qpcache_t *qpdb = (qpcache_t *)node->qpdb;
|
|
|
|
|
|
|
|
qpcnode_acquire(qpdb, node, isc_rwlocktype_none,
|
|
|
|
isc_rwlocktype_none DNS__DB_FLARG_PASS);
|
|
|
|
|
|
|
|
*targetp = source;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
qpcnode_detachnode(dns_dbnode_t **nodep DNS__DB_FLARG) {
|
|
|
|
qpcnode_t *node = NULL;
|
|
|
|
isc_rwlocktype_t nlocktype = isc_rwlocktype_none;
|
|
|
|
isc_rwlocktype_t tlocktype = isc_rwlocktype_none;
|
|
|
|
isc_rwlock_t *nlock = NULL;
|
|
|
|
|
|
|
|
REQUIRE(nodep != NULL && *nodep != NULL);
|
|
|
|
|
|
|
|
node = (qpcnode_t *)(*nodep);
|
|
|
|
qpcache_t *qpdb = (qpcache_t *)node->qpdb;
|
|
|
|
*nodep = NULL;
|
|
|
|
nlock = &qpdb->buckets[node->locknum].lock;
|
|
|
|
|
|
|
|
REQUIRE(VALID_QPDB(qpdb));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We can't destroy qpcache while holding a nodelock, so we need to
|
|
|
|
* reference it before acquiring the lock and release it afterward.
|
|
|
|
* Additionally, we must ensure that we don't destroy the database while
|
|
|
|
* the NODE_LOCK is locked.
|
|
|
|
*/
|
|
|
|
qpcache_ref(qpdb);
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
NODE_RDLOCK(nlock, &nlocktype);
|
|
|
|
qpcnode_release(qpdb, node, &nlocktype, &tlocktype DNS__DB_FLARG_PASS);
|
|
|
|
NODE_UNLOCK(nlock, &nlocktype);
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
qpcache_detach(&qpdb);
|
|
|
|
}
|
|
|
|
|
2024-03-06 17:33:37 -08:00
|
|
|
static isc_result_t
|
2024-04-29 15:29:33 -07:00
|
|
|
add(qpcache_t *qpdb, qpcnode_t *qpnode,
|
2024-03-06 17:33:37 -08:00
|
|
|
const dns_name_t *nodename ISC_ATTR_UNUSED, dns_slabheader_t *newheader,
|
2025-01-22 23:08:04 -08:00
|
|
|
unsigned int options, dns_rdataset_t *addedrdataset, isc_stdtime_t now,
|
|
|
|
isc_rwlocktype_t nlocktype, isc_rwlocktype_t tlocktype DNS__DB_FLARG) {
|
2024-01-09 16:18:57 +01:00
|
|
|
dns_slabheader_t *topheader = NULL, *topheader_prev = NULL;
|
|
|
|
dns_slabheader_t *header = NULL, *sigheader = NULL;
|
2024-06-17 11:40:40 +02:00
|
|
|
dns_slabheader_t *prioheader = NULL, *expireheader = NULL;
|
2025-08-06 19:34:35 +02:00
|
|
|
dns_typepair_t negpair = 0;
|
2024-01-09 16:18:57 +01:00
|
|
|
dns_trust_t trust;
|
2024-06-17 11:40:40 +02:00
|
|
|
uint32_t ntypes = 0;
|
2024-01-09 16:18:57 +01:00
|
|
|
|
|
|
|
if ((options & DNS_DBADD_FORCE) != 0) {
|
|
|
|
trust = dns_trust_ultimate;
|
|
|
|
} else {
|
|
|
|
trust = newheader->trust;
|
|
|
|
}
|
|
|
|
|
2025-01-22 23:08:04 -08:00
|
|
|
if (EXISTS(newheader)) {
|
2025-08-06 19:34:35 +02:00
|
|
|
dns_rdatatype_t rdtype = DNS_TYPEPAIR_TYPE(newheader->typepair);
|
|
|
|
dns_rdatatype_t covers =
|
|
|
|
DNS_TYPEPAIR_COVERS(newheader->typepair);
|
|
|
|
dns_typepair_t sigpair = DNS_SIGTYPE(covers);
|
2024-01-09 16:18:57 +01:00
|
|
|
if (NEGATIVE(newheader)) {
|
|
|
|
/*
|
|
|
|
* We're adding a negative cache entry.
|
|
|
|
*/
|
|
|
|
if (covers == dns_rdatatype_any) {
|
|
|
|
/*
|
|
|
|
* If we're adding an negative cache entry
|
|
|
|
* which covers all types (NXDOMAIN,
|
|
|
|
* NODATA(QTYPE=ANY)),
|
|
|
|
*
|
|
|
|
* We make all other data ancient so that the
|
|
|
|
* only rdataset that can be found at this
|
|
|
|
* node is the negative cache entry.
|
|
|
|
*/
|
2024-01-16 12:09:52 +01:00
|
|
|
for (topheader = qpnode->data;
|
2024-01-09 16:18:57 +01:00
|
|
|
topheader != NULL;
|
|
|
|
topheader = topheader->next)
|
|
|
|
{
|
|
|
|
mark_ancient(topheader);
|
|
|
|
}
|
|
|
|
goto find_header;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Otherwise look for any RRSIGs of the given
|
|
|
|
* type so they can be marked ancient later.
|
|
|
|
*/
|
2024-01-16 12:09:52 +01:00
|
|
|
for (topheader = qpnode->data; topheader != NULL;
|
2024-01-09 16:18:57 +01:00
|
|
|
topheader = topheader->next)
|
|
|
|
{
|
2025-08-06 19:34:35 +02:00
|
|
|
if (topheader->typepair == sigpair) {
|
2024-01-09 16:18:57 +01:00
|
|
|
sigheader = topheader;
|
2024-06-17 11:40:40 +02:00
|
|
|
break;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
}
|
2025-08-06 19:34:35 +02:00
|
|
|
negpair = DNS_TYPEPAIR_VALUE(covers, 0);
|
2024-01-09 16:18:57 +01:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* We're adding something that isn't a
|
|
|
|
* negative cache entry. Look for an extant
|
|
|
|
* non-ancient NXDOMAIN/NODATA(QTYPE=ANY) negative
|
|
|
|
* cache entry. If we're adding an RRSIG, also
|
|
|
|
* check for an extant non-ancient NODATA ncache
|
|
|
|
* entry which covers the same type as the RRSIG.
|
|
|
|
*/
|
2024-01-16 12:09:52 +01:00
|
|
|
for (topheader = qpnode->data; topheader != NULL;
|
2024-01-09 16:18:57 +01:00
|
|
|
topheader = topheader->next)
|
|
|
|
{
|
2025-08-06 19:34:35 +02:00
|
|
|
if ((topheader->typepair ==
|
|
|
|
RDATATYPE_NCACHEANY) ||
|
|
|
|
(newheader->typepair == sigpair &&
|
|
|
|
topheader->typepair ==
|
2024-01-09 16:18:57 +01:00
|
|
|
DNS_TYPEPAIR_VALUE(0, covers)))
|
|
|
|
{
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (topheader != NULL && EXISTS(topheader) &&
|
|
|
|
ACTIVE(topheader, now))
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Found one.
|
|
|
|
*/
|
|
|
|
if (trust < topheader->trust) {
|
|
|
|
/*
|
|
|
|
* The NXDOMAIN/NODATA(QTYPE=ANY)
|
|
|
|
* is more trusted.
|
|
|
|
*/
|
|
|
|
dns_slabheader_destroy(&newheader);
|
|
|
|
if (addedrdataset != NULL) {
|
2024-03-06 17:33:37 -08:00
|
|
|
bindrdataset(
|
2024-01-16 12:09:52 +01:00
|
|
|
qpdb, qpnode, topheader,
|
2024-03-26 14:13:24 +01:00
|
|
|
now, nlocktype,
|
|
|
|
tlocktype,
|
2024-01-09 16:18:57 +01:00
|
|
|
addedrdataset
|
|
|
|
DNS__DB_FLARG_PASS);
|
|
|
|
}
|
2024-11-19 10:38:03 +01:00
|
|
|
return DNS_R_UNCHANGED;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* The new rdataset is better. Expire the
|
|
|
|
* ncache entry.
|
|
|
|
*/
|
|
|
|
mark_ancient(topheader);
|
|
|
|
topheader = NULL;
|
|
|
|
goto find_header;
|
|
|
|
}
|
2025-08-06 19:34:35 +02:00
|
|
|
negpair = DNS_TYPEPAIR_VALUE(0, rdtype);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-01-16 12:09:52 +01:00
|
|
|
for (topheader = qpnode->data; topheader != NULL;
|
2024-01-09 16:18:57 +01:00
|
|
|
topheader = topheader->next)
|
|
|
|
{
|
2024-06-17 11:40:40 +02:00
|
|
|
if (ACTIVE(topheader, now)) {
|
|
|
|
++ntypes;
|
|
|
|
expireheader = topheader;
|
|
|
|
}
|
|
|
|
if (prio_header(topheader)) {
|
2024-01-09 16:18:57 +01:00
|
|
|
prioheader = topheader;
|
|
|
|
}
|
2024-06-17 11:40:40 +02:00
|
|
|
|
2025-08-06 19:34:35 +02:00
|
|
|
if (topheader->typepair == newheader->typepair ||
|
|
|
|
topheader->typepair == negpair)
|
2024-01-09 16:18:57 +01:00
|
|
|
{
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
topheader_prev = topheader;
|
|
|
|
}
|
|
|
|
|
|
|
|
find_header:
|
|
|
|
/*
|
2025-01-22 23:08:04 -08:00
|
|
|
* If header isn't NULL, we've found the right type.
|
2024-01-09 16:18:57 +01:00
|
|
|
*/
|
|
|
|
header = topheader;
|
|
|
|
if (header != NULL) {
|
|
|
|
/*
|
|
|
|
* Deleting an already non-existent rdataset has no effect.
|
|
|
|
*/
|
2025-01-22 23:08:04 -08:00
|
|
|
if (!EXISTS(header) && !EXISTS(newheader)) {
|
2024-01-09 16:18:57 +01:00
|
|
|
dns_slabheader_destroy(&newheader);
|
2024-11-19 10:38:03 +01:00
|
|
|
return DNS_R_UNCHANGED;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Trying to add an rdataset with lower trust to a cache
|
|
|
|
* DB has no effect, provided that the cache data isn't
|
|
|
|
* stale. If the cache data is stale, new lower trust
|
|
|
|
* data will supersede it below. Unclear what the best
|
|
|
|
* policy is here.
|
|
|
|
*/
|
2025-01-22 23:08:04 -08:00
|
|
|
if (trust < header->trust &&
|
|
|
|
(ACTIVE(header, now) || !EXISTS(header)))
|
2024-01-09 16:18:57 +01:00
|
|
|
{
|
|
|
|
dns_slabheader_destroy(&newheader);
|
|
|
|
if (addedrdataset != NULL) {
|
2024-03-06 17:33:37 -08:00
|
|
|
bindrdataset(qpdb, qpnode, header, now,
|
2024-03-26 14:13:24 +01:00
|
|
|
nlocktype, tlocktype,
|
2024-03-06 17:33:37 -08:00
|
|
|
addedrdataset DNS__DB_FLARG_PASS);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
2024-11-19 10:38:03 +01:00
|
|
|
return DNS_R_UNCHANGED;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't replace existing NS, A and AAAA RRsets in the
|
2025-01-22 23:08:04 -08:00
|
|
|
* cache if they already exist. This prevents named
|
2024-01-09 16:18:57 +01:00
|
|
|
* being locked to old servers. Don't lower trust of
|
|
|
|
* existing record if the update is forced. Nothing
|
|
|
|
* special to be done w.r.t stale data; it gets replaced
|
|
|
|
* normally further down.
|
|
|
|
*/
|
2025-08-06 19:34:35 +02:00
|
|
|
if (ACTIVE(header, now) &&
|
|
|
|
header->typepair == dns_rdatatype_ns && EXISTS(header) &&
|
|
|
|
EXISTS(newheader) && header->trust >= newheader->trust &&
|
2025-02-06 15:50:52 -08:00
|
|
|
dns_rdataslab_equalx(header, newheader,
|
2024-03-05 14:28:43 -08:00
|
|
|
qpdb->common.rdclass,
|
2025-08-06 19:34:35 +02:00
|
|
|
DNS_TYPEPAIR_TYPE(header->typepair)))
|
2024-01-09 16:18:57 +01:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Honour the new ttl if it is less than the
|
|
|
|
* older one.
|
|
|
|
*/
|
2025-02-02 13:31:36 +01:00
|
|
|
if (header->expire > newheader->expire) {
|
|
|
|
setttl(header, newheader->expire);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
2025-02-23 14:36:35 +01:00
|
|
|
|
|
|
|
qpcache_hit(qpdb, header);
|
|
|
|
|
2024-01-09 16:18:57 +01:00
|
|
|
if (header->noqname == NULL &&
|
|
|
|
newheader->noqname != NULL)
|
|
|
|
{
|
|
|
|
header->noqname = newheader->noqname;
|
|
|
|
newheader->noqname = NULL;
|
|
|
|
}
|
|
|
|
if (header->closest == NULL &&
|
|
|
|
newheader->closest != NULL)
|
|
|
|
{
|
|
|
|
header->closest = newheader->closest;
|
|
|
|
newheader->closest = NULL;
|
|
|
|
}
|
|
|
|
dns_slabheader_destroy(&newheader);
|
|
|
|
if (addedrdataset != NULL) {
|
2024-03-06 17:33:37 -08:00
|
|
|
bindrdataset(qpdb, qpnode, header, now,
|
2024-03-26 14:13:24 +01:00
|
|
|
nlocktype, tlocktype,
|
2024-03-06 17:33:37 -08:00
|
|
|
addedrdataset DNS__DB_FLARG_PASS);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
2024-11-19 10:38:03 +01:00
|
|
|
return ISC_R_SUCCESS;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2025-01-22 23:08:04 -08:00
|
|
|
* If we will be replacing a NS RRset force its TTL
|
2024-01-09 16:18:57 +01:00
|
|
|
* to be no more than the current NS RRset's TTL. This
|
|
|
|
* ensures the delegations that are withdrawn are honoured.
|
|
|
|
*/
|
2025-08-06 19:34:35 +02:00
|
|
|
if (ACTIVE(header, now) &&
|
|
|
|
header->typepair == dns_rdatatype_ns && EXISTS(header) &&
|
|
|
|
EXISTS(newheader) && header->trust <= newheader->trust)
|
2024-01-09 16:18:57 +01:00
|
|
|
{
|
2025-02-02 13:31:36 +01:00
|
|
|
if (newheader->expire > header->expire) {
|
|
|
|
newheader->expire = header->expire;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
}
|
2024-03-06 15:04:20 -08:00
|
|
|
if (ACTIVE(header, now) &&
|
2024-01-09 16:18:57 +01:00
|
|
|
(options & DNS_DBADD_PREFETCH) == 0 &&
|
2025-08-06 19:34:35 +02:00
|
|
|
(header->typepair == dns_rdatatype_a ||
|
|
|
|
header->typepair == dns_rdatatype_aaaa ||
|
|
|
|
header->typepair == dns_rdatatype_ds ||
|
|
|
|
header->typepair == DNS_SIGTYPE(dns_rdatatype_ds)) &&
|
2025-01-22 23:08:04 -08:00
|
|
|
EXISTS(header) && EXISTS(newheader) &&
|
2024-01-09 16:18:57 +01:00
|
|
|
header->trust >= newheader->trust &&
|
2025-02-06 15:50:52 -08:00
|
|
|
dns_rdataslab_equal(header, newheader))
|
2024-01-09 16:18:57 +01:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Honour the new ttl if it is less than the
|
|
|
|
* older one.
|
|
|
|
*/
|
2025-02-02 13:31:36 +01:00
|
|
|
if (header->expire > newheader->expire) {
|
|
|
|
setttl(header, newheader->expire);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
2025-02-23 14:36:35 +01:00
|
|
|
|
|
|
|
qpcache_hit(qpdb, header);
|
|
|
|
|
2024-01-09 16:18:57 +01:00
|
|
|
if (header->noqname == NULL &&
|
|
|
|
newheader->noqname != NULL)
|
|
|
|
{
|
|
|
|
header->noqname = newheader->noqname;
|
|
|
|
newheader->noqname = NULL;
|
|
|
|
}
|
|
|
|
if (header->closest == NULL &&
|
|
|
|
newheader->closest != NULL)
|
|
|
|
{
|
|
|
|
header->closest = newheader->closest;
|
|
|
|
newheader->closest = NULL;
|
|
|
|
}
|
|
|
|
dns_slabheader_destroy(&newheader);
|
|
|
|
if (addedrdataset != NULL) {
|
2024-03-06 17:33:37 -08:00
|
|
|
bindrdataset(qpdb, qpnode, header, now,
|
2024-03-26 14:13:24 +01:00
|
|
|
nlocktype, tlocktype,
|
2024-03-06 17:33:37 -08:00
|
|
|
addedrdataset DNS__DB_FLARG_PASS);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
2024-11-19 10:38:03 +01:00
|
|
|
return ISC_R_SUCCESS;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
2024-03-06 15:04:20 -08:00
|
|
|
|
2025-02-23 14:36:35 +01:00
|
|
|
qpcache_miss(qpdb, newheader, &nlocktype,
|
|
|
|
&tlocktype DNS__DB_FLARG_PASS);
|
|
|
|
|
2025-01-22 23:08:04 -08:00
|
|
|
if (topheader_prev != NULL) {
|
|
|
|
topheader_prev->next = newheader;
|
|
|
|
} else {
|
|
|
|
qpnode->data = newheader;
|
|
|
|
}
|
|
|
|
newheader->next = topheader->next;
|
|
|
|
newheader->down = topheader;
|
|
|
|
topheader->next = newheader;
|
|
|
|
mark_ancient(header);
|
|
|
|
if (sigheader != NULL) {
|
|
|
|
mark_ancient(sigheader);
|
|
|
|
}
|
|
|
|
} else if (!EXISTS(newheader)) {
|
2024-01-09 16:18:57 +01:00
|
|
|
/*
|
2025-01-22 23:08:04 -08:00
|
|
|
* The type already doesn't exist; no point trying
|
|
|
|
* to delete it.
|
2024-01-09 16:18:57 +01:00
|
|
|
*/
|
2025-01-22 23:08:04 -08:00
|
|
|
dns_slabheader_destroy(&newheader);
|
|
|
|
return DNS_R_UNCHANGED;
|
|
|
|
} else {
|
|
|
|
/* No rdatasets of the given type exist at the node. */
|
|
|
|
INSIST(newheader->down == NULL);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2025-02-23 14:36:35 +01:00
|
|
|
qpcache_miss(qpdb, newheader, &nlocktype,
|
|
|
|
&tlocktype DNS__DB_FLARG_PASS);
|
2025-01-22 23:08:04 -08:00
|
|
|
if (prio_header(newheader)) {
|
|
|
|
/* This is a priority type, prepend it */
|
|
|
|
newheader->next = qpnode->data;
|
|
|
|
qpnode->data = newheader;
|
|
|
|
} else if (prioheader != NULL) {
|
|
|
|
/* Append after the priority headers */
|
|
|
|
newheader->next = prioheader->next;
|
|
|
|
prioheader->next = newheader;
|
2024-01-09 16:18:57 +01:00
|
|
|
} else {
|
2025-01-22 23:08:04 -08:00
|
|
|
/* There were no priority headers */
|
|
|
|
newheader->next = qpnode->data;
|
|
|
|
qpnode->data = newheader;
|
|
|
|
}
|
2024-06-17 11:40:40 +02:00
|
|
|
|
2025-01-22 23:08:04 -08:00
|
|
|
if (overmaxtype(qpdb, ntypes)) {
|
|
|
|
if (expireheader == NULL) {
|
|
|
|
expireheader = newheader;
|
|
|
|
}
|
|
|
|
if (NEGATIVE(newheader) && !prio_header(newheader)) {
|
2024-06-17 11:40:40 +02:00
|
|
|
/*
|
2025-01-22 23:08:04 -08:00
|
|
|
* Add the new non-priority negative
|
|
|
|
* header to the database only
|
|
|
|
* temporarily.
|
2024-06-17 11:40:40 +02:00
|
|
|
*/
|
2025-01-22 23:08:04 -08:00
|
|
|
expireheader = newheader;
|
2024-06-17 11:40:40 +02:00
|
|
|
}
|
2025-01-22 23:08:04 -08:00
|
|
|
|
|
|
|
mark_ancient(expireheader);
|
|
|
|
/*
|
|
|
|
* FIXME: In theory, we should mark the RRSIG
|
|
|
|
* and the header at the same time, but there is
|
|
|
|
* no direct link between those two headers, so
|
|
|
|
* we would have to check the whole list again.
|
|
|
|
*/
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (addedrdataset != NULL) {
|
2024-03-26 14:13:24 +01:00
|
|
|
bindrdataset(qpdb, qpnode, newheader, now, nlocktype, tlocktype,
|
2024-03-06 17:33:37 -08:00
|
|
|
addedrdataset DNS__DB_FLARG_PASS);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return ISC_R_SUCCESS;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static isc_result_t
|
2024-03-01 08:26:07 +01:00
|
|
|
addnoqname(isc_mem_t *mctx, dns_slabheader_t *newheader, uint32_t maxrrperset,
|
2024-01-09 16:18:57 +01:00
|
|
|
dns_rdataset_t *rdataset) {
|
|
|
|
isc_result_t result;
|
|
|
|
dns_slabheader_proof_t *noqname = NULL;
|
|
|
|
dns_name_t name = DNS_NAME_INITEMPTY;
|
|
|
|
dns_rdataset_t neg = DNS_RDATASET_INIT, negsig = DNS_RDATASET_INIT;
|
|
|
|
isc_region_t r1, r2;
|
|
|
|
|
|
|
|
result = dns_rdataset_getnoqname(rdataset, &name, &neg, &negsig);
|
|
|
|
RUNTIME_CHECK(result == ISC_R_SUCCESS);
|
|
|
|
|
2025-02-19 13:59:23 +01:00
|
|
|
result = dns_rdataslab_fromrdataset(&neg, mctx, &r1, maxrrperset);
|
2024-01-09 16:18:57 +01:00
|
|
|
if (result != ISC_R_SUCCESS) {
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2025-02-19 13:59:23 +01:00
|
|
|
result = dns_rdataslab_fromrdataset(&negsig, mctx, &r2, maxrrperset);
|
2024-01-09 16:18:57 +01:00
|
|
|
if (result != ISC_R_SUCCESS) {
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
noqname = isc_mem_get(mctx, sizeof(*noqname));
|
|
|
|
*noqname = (dns_slabheader_proof_t){
|
2025-02-19 13:59:23 +01:00
|
|
|
.neg = dns_slabheader_raw((dns_slabheader_t *)r1.base),
|
|
|
|
.negsig = dns_slabheader_raw((dns_slabheader_t *)r2.base),
|
2024-01-09 16:18:57 +01:00
|
|
|
.type = neg.type,
|
|
|
|
.name = DNS_NAME_INITEMPTY,
|
|
|
|
};
|
|
|
|
dns_name_dup(&name, mctx, &noqname->name);
|
|
|
|
newheader->noqname = noqname;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
dns_rdataset_disassociate(&neg);
|
|
|
|
dns_rdataset_disassociate(&negsig);
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return result;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static isc_result_t
|
2024-03-01 08:26:07 +01:00
|
|
|
addclosest(isc_mem_t *mctx, dns_slabheader_t *newheader, uint32_t maxrrperset,
|
2024-01-09 16:18:57 +01:00
|
|
|
dns_rdataset_t *rdataset) {
|
|
|
|
isc_result_t result;
|
|
|
|
dns_slabheader_proof_t *closest = NULL;
|
|
|
|
dns_name_t name = DNS_NAME_INITEMPTY;
|
|
|
|
dns_rdataset_t neg = DNS_RDATASET_INIT, negsig = DNS_RDATASET_INIT;
|
|
|
|
isc_region_t r1, r2;
|
|
|
|
|
|
|
|
result = dns_rdataset_getclosest(rdataset, &name, &neg, &negsig);
|
|
|
|
RUNTIME_CHECK(result == ISC_R_SUCCESS);
|
|
|
|
|
2025-02-19 13:59:23 +01:00
|
|
|
result = dns_rdataslab_fromrdataset(&neg, mctx, &r1, maxrrperset);
|
2024-01-09 16:18:57 +01:00
|
|
|
if (result != ISC_R_SUCCESS) {
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2025-02-19 13:59:23 +01:00
|
|
|
result = dns_rdataslab_fromrdataset(&negsig, mctx, &r2, maxrrperset);
|
2024-01-09 16:18:57 +01:00
|
|
|
if (result != ISC_R_SUCCESS) {
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
closest = isc_mem_get(mctx, sizeof(*closest));
|
|
|
|
*closest = (dns_slabheader_proof_t){
|
2025-02-19 13:59:23 +01:00
|
|
|
.neg = dns_slabheader_raw((dns_slabheader_t *)r1.base),
|
|
|
|
.negsig = dns_slabheader_raw((dns_slabheader_t *)r2.base),
|
2024-01-09 16:18:57 +01:00
|
|
|
.name = DNS_NAME_INITEMPTY,
|
|
|
|
.type = neg.type,
|
|
|
|
};
|
|
|
|
dns_name_dup(&name, mctx, &closest->name);
|
|
|
|
newheader->closest = closest;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
dns_rdataset_disassociate(&neg);
|
|
|
|
dns_rdataset_disassociate(&negsig);
|
2024-11-19 10:38:03 +01:00
|
|
|
return result;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2024-04-29 15:29:33 -07:00
|
|
|
expire_ttl_headers(qpcache_t *qpdb, unsigned int locknum,
|
2024-03-26 14:13:24 +01:00
|
|
|
isc_rwlocktype_t *nlocktypep, isc_rwlocktype_t *tlocktypep,
|
2025-02-23 14:36:35 +01:00
|
|
|
isc_stdtime_t now DNS__DB_FLARG);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-06 17:33:37 -08:00
|
|
|
static isc_result_t
|
2024-11-14 11:18:00 +01:00
|
|
|
qpcache_addrdataset(dns_db_t *db, dns_dbnode_t *node, dns_dbversion_t *version,
|
2025-02-03 00:00:39 +01:00
|
|
|
isc_stdtime_t __now, dns_rdataset_t *rdataset,
|
2024-11-14 11:18:00 +01:00
|
|
|
unsigned int options,
|
|
|
|
dns_rdataset_t *addedrdataset DNS__DB_FLARG) {
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcache_t *qpdb = (qpcache_t *)db;
|
|
|
|
qpcnode_t *qpnode = (qpcnode_t *)node;
|
2024-01-09 16:18:57 +01:00
|
|
|
isc_region_t region;
|
|
|
|
dns_slabheader_t *newheader = NULL;
|
|
|
|
isc_result_t result;
|
2024-03-06 15:04:20 -08:00
|
|
|
bool delegating = false;
|
2025-04-16 09:10:47 +00:00
|
|
|
bool newnsec = false;
|
2024-01-09 16:18:57 +01:00
|
|
|
isc_rwlocktype_t tlocktype = isc_rwlocktype_none;
|
|
|
|
isc_rwlocktype_t nlocktype = isc_rwlocktype_none;
|
2025-01-27 21:07:11 +01:00
|
|
|
isc_rwlock_t *nlock = NULL;
|
2024-01-09 16:18:57 +01:00
|
|
|
dns_fixedname_t fixed;
|
|
|
|
dns_name_t *name = NULL;
|
2025-02-03 00:00:39 +01:00
|
|
|
isc_stdtime_t now = __now ? __now : isc_stdtime_now();
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
REQUIRE(VALID_QPDB(qpdb));
|
2024-03-06 15:04:20 -08:00
|
|
|
REQUIRE(version == NULL);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
Decouple database and node lifetimes by adding node-specific vtables
All databases in the codebase follow the same structure: a database is
an associative container from DNS names to nodes, and each node is an
associative container from RR types to RR data.
Each database implementation (qpzone, qpcache, sdlz, builtin, dyndb) has
its own corresponding node type (qpznode, qpcnode, etc). However, some
code needs to work with nodes generically regardless of their specific
type - for example, to acquire locks, manage references, or
register/unregister slabs from the heap.
Currently, these generic node operations are implemented as methods in
the database vtable, which creates problematic coupling between database
and node lifetimes. If a node outlives its parent database, the node
destructor will destroy all RR data, and each RR data destructor will
try to unregister from heaps by calling a virtual function from the
database vtable. Since the database was already freed, this causes a
crash.
This commit breaks the coupling by standardizing the layout of all
database nodes, adding a dedicated vtable for node operations, and
moving node-specific methods from the database vtable to the node
vtable.
2025-06-05 11:51:29 +02:00
|
|
|
result = dns_rdataslab_fromrdataset(rdataset, qpnode->mctx, ®ion,
|
|
|
|
qpdb->maxrrperset);
|
2024-01-09 16:18:57 +01:00
|
|
|
if (result != ISC_R_SUCCESS) {
|
2024-08-29 16:24:48 +09:00
|
|
|
if (result == DNS_R_TOOMANYRECORDS) {
|
|
|
|
dns__db_logtoomanyrecords((dns_db_t *)qpdb,
|
|
|
|
&qpnode->name, rdataset->type,
|
|
|
|
"adding", qpdb->maxrrperset);
|
|
|
|
}
|
2024-11-19 10:38:03 +01:00
|
|
|
return result;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
name = dns_fixedname_initname(&fixed);
|
2024-03-11 18:53:49 -07:00
|
|
|
dns_name_copy(&qpnode->name, name);
|
2024-01-09 16:18:57 +01:00
|
|
|
dns_rdataset_getownercase(rdataset, name);
|
|
|
|
|
|
|
|
newheader = (dns_slabheader_t *)region.base;
|
2025-07-15 12:16:39 +02:00
|
|
|
dns_slabheader_reset(newheader, node);
|
2025-02-07 21:06:34 -08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* By default, dns_rdataslab_fromrdataset() sets newheader->ttl
|
|
|
|
* to the rdataset TTL. In the case of the cache, that's wrong;
|
|
|
|
* we need it to be set to the expire time instead.
|
|
|
|
*/
|
2024-03-06 17:33:37 -08:00
|
|
|
setttl(newheader, rdataset->ttl + now);
|
2024-01-09 16:18:57 +01:00
|
|
|
if (rdataset->ttl == 0U) {
|
|
|
|
DNS_SLABHEADER_SETATTR(newheader, DNS_SLABHEADERATTR_ZEROTTL);
|
|
|
|
}
|
2025-02-07 21:06:34 -08:00
|
|
|
|
2024-01-09 16:18:57 +01:00
|
|
|
atomic_init(&newheader->count,
|
|
|
|
atomic_fetch_add_relaxed(&init_count, 1));
|
2025-07-09 16:56:22 +02:00
|
|
|
if (rdataset->attributes.prefetch) {
|
2024-03-06 15:04:20 -08:00
|
|
|
DNS_SLABHEADER_SETATTR(newheader, DNS_SLABHEADERATTR_PREFETCH);
|
|
|
|
}
|
2025-07-09 16:56:22 +02:00
|
|
|
if (rdataset->attributes.negative) {
|
2024-03-06 15:04:20 -08:00
|
|
|
DNS_SLABHEADER_SETATTR(newheader, DNS_SLABHEADERATTR_NEGATIVE);
|
|
|
|
}
|
2025-07-09 16:56:22 +02:00
|
|
|
if (rdataset->attributes.nxdomain) {
|
2024-03-06 15:04:20 -08:00
|
|
|
DNS_SLABHEADER_SETATTR(newheader, DNS_SLABHEADERATTR_NXDOMAIN);
|
|
|
|
}
|
2025-07-09 16:56:22 +02:00
|
|
|
if (rdataset->attributes.optout) {
|
2024-03-06 15:04:20 -08:00
|
|
|
DNS_SLABHEADER_SETATTR(newheader, DNS_SLABHEADERATTR_OPTOUT);
|
|
|
|
}
|
2025-07-09 16:56:22 +02:00
|
|
|
if (rdataset->attributes.noqname) {
|
Decouple database and node lifetimes by adding node-specific vtables
All databases in the codebase follow the same structure: a database is
an associative container from DNS names to nodes, and each node is an
associative container from RR types to RR data.
Each database implementation (qpzone, qpcache, sdlz, builtin, dyndb) has
its own corresponding node type (qpznode, qpcnode, etc). However, some
code needs to work with nodes generically regardless of their specific
type - for example, to acquire locks, manage references, or
register/unregister slabs from the heap.
Currently, these generic node operations are implemented as methods in
the database vtable, which creates problematic coupling between database
and node lifetimes. If a node outlives its parent database, the node
destructor will destroy all RR data, and each RR data destructor will
try to unregister from heaps by calling a virtual function from the
database vtable. Since the database was already freed, this causes a
crash.
This commit breaks the coupling by standardizing the layout of all
database nodes, adding a dedicated vtable for node operations, and
moving node-specific methods from the database vtable to the node
vtable.
2025-06-05 11:51:29 +02:00
|
|
|
result = addnoqname(qpnode->mctx, newheader, qpdb->maxrrperset,
|
|
|
|
rdataset);
|
2024-03-06 15:04:20 -08:00
|
|
|
if (result != ISC_R_SUCCESS) {
|
|
|
|
dns_slabheader_destroy(&newheader);
|
2024-11-19 10:38:03 +01:00
|
|
|
return result;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
2024-03-06 15:04:20 -08:00
|
|
|
}
|
2025-07-09 16:56:22 +02:00
|
|
|
if (rdataset->attributes.closest) {
|
Decouple database and node lifetimes by adding node-specific vtables
All databases in the codebase follow the same structure: a database is
an associative container from DNS names to nodes, and each node is an
associative container from RR types to RR data.
Each database implementation (qpzone, qpcache, sdlz, builtin, dyndb) has
its own corresponding node type (qpznode, qpcnode, etc). However, some
code needs to work with nodes generically regardless of their specific
type - for example, to acquire locks, manage references, or
register/unregister slabs from the heap.
Currently, these generic node operations are implemented as methods in
the database vtable, which creates problematic coupling between database
and node lifetimes. If a node outlives its parent database, the node
destructor will destroy all RR data, and each RR data destructor will
try to unregister from heaps by calling a virtual function from the
database vtable. Since the database was already freed, this causes a
crash.
This commit breaks the coupling by standardizing the layout of all
database nodes, adding a dedicated vtable for node operations, and
moving node-specific methods from the database vtable to the node
vtable.
2025-06-05 11:51:29 +02:00
|
|
|
result = addclosest(qpnode->mctx, newheader, qpdb->maxrrperset,
|
|
|
|
rdataset);
|
2024-03-06 15:04:20 -08:00
|
|
|
if (result != ISC_R_SUCCESS) {
|
|
|
|
dns_slabheader_destroy(&newheader);
|
2024-11-19 10:38:03 +01:00
|
|
|
return result;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2025-04-16 09:10:47 +00:00
|
|
|
nlock = &qpdb->buckets[qpnode->locknum].lock;
|
|
|
|
|
2024-01-09 16:18:57 +01:00
|
|
|
/*
|
2024-03-06 15:04:20 -08:00
|
|
|
* If we're adding a delegation type (which would be an NS or DNAME
|
|
|
|
* for a zone, but only DNAME counts for a cache), we need to set
|
|
|
|
* the callback bit on the node.
|
2024-01-09 16:18:57 +01:00
|
|
|
*/
|
2024-03-06 15:04:20 -08:00
|
|
|
if (rdataset->type == dns_rdatatype_dname) {
|
2024-01-09 16:18:57 +01:00
|
|
|
delegating = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add to the auxiliary NSEC tree if we're adding an NSEC record.
|
|
|
|
*/
|
2025-04-16 09:10:47 +00:00
|
|
|
if (rdataset->type == dns_rdatatype_nsec) {
|
|
|
|
NODE_RDLOCK(nlock, &nlocktype);
|
Prepend qpkey with denial byte
In preparation to merge the three qp tries (tree, nsec, nsec3) into
one, add the piece of information into the qpkey. This is the most
significant bit of information, so prepend the denial type to the qpkey.
This means we need to pass on the denial type when constructing the
qpkey from a name, or doing a lookup.
Reuse the the DNS_DB_NSEC_* values. Most qp tries in the code we just
pass on 0 (nta, rpz, zt, etc.), because there is no need for denial of
existence, but for qpzone and qpcache we must pass the right value.
Change the code, so that node->nsec no longer can have the value
DNS_DB_NSEC_HAS_NSEC, instead track this in a new attribute 'havensec'.
Since we use node->nsec to convert names to keys, the value MUST be set
before inserting the node into the qp-trie.
Update the fuzzing and unit tests accordingly. This only adds a few
extra test cases, more are needed.
In the qp_test.c we can remove test code for empty keys as this is
no longer possible.
2025-04-25 17:21:16 +02:00
|
|
|
if (!qpnode->havensec) {
|
2025-04-16 09:10:47 +00:00
|
|
|
newnsec = true;
|
|
|
|
}
|
|
|
|
NODE_UNLOCK(nlock, &nlocktype);
|
|
|
|
}
|
2024-01-09 16:18:57 +01:00
|
|
|
|
|
|
|
/*
|
2025-02-23 14:36:35 +01:00
|
|
|
* If we're adding a delegation type or adding to the auxiliary
|
|
|
|
* NSEC tree, hold an exclusive lock on the tree.
|
2024-01-09 16:18:57 +01:00
|
|
|
*/
|
2025-02-23 14:36:35 +01:00
|
|
|
if (delegating || newnsec) {
|
2024-03-05 14:28:43 -08:00
|
|
|
TREE_WRLOCK(&qpdb->tree_lock, &tlocktype);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_WRLOCK(nlock, &nlocktype);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
if (qpdb->rrsetstats != NULL) {
|
2024-01-09 16:18:57 +01:00
|
|
|
DNS_SLABHEADER_SETATTR(newheader, DNS_SLABHEADERATTR_STATCOUNT);
|
2025-08-06 19:34:35 +02:00
|
|
|
update_rrsetstats(qpdb->rrsetstats, newheader->typepair,
|
2024-01-09 16:18:57 +01:00
|
|
|
atomic_load_acquire(&newheader->attributes),
|
|
|
|
true);
|
|
|
|
}
|
|
|
|
|
2025-02-23 14:36:35 +01:00
|
|
|
expire_ttl_headers(qpdb, qpnode->locknum, &nlocktype, &tlocktype,
|
|
|
|
now DNS__DB_FLARG_PASS);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
Prepend qpkey with denial byte
In preparation to merge the three qp tries (tree, nsec, nsec3) into
one, add the piece of information into the qpkey. This is the most
significant bit of information, so prepend the denial type to the qpkey.
This means we need to pass on the denial type when constructing the
qpkey from a name, or doing a lookup.
Reuse the the DNS_DB_NSEC_* values. Most qp tries in the code we just
pass on 0 (nta, rpz, zt, etc.), because there is no need for denial of
existence, but for qpzone and qpcache we must pass the right value.
Change the code, so that node->nsec no longer can have the value
DNS_DB_NSEC_HAS_NSEC, instead track this in a new attribute 'havensec'.
Since we use node->nsec to convert names to keys, the value MUST be set
before inserting the node into the qp-trie.
Update the fuzzing and unit tests accordingly. This only adds a few
extra test cases, more are needed.
In the qp_test.c we can remove test code for empty keys as this is
no longer possible.
2025-04-25 17:21:16 +02:00
|
|
|
if (newnsec && !qpnode->havensec) {
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcnode_t *nsecnode = NULL;
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2025-07-07 11:29:45 +02:00
|
|
|
result = dns_qp_getname(qpdb->nsec, name, DNS_DBNAMESPACE_NSEC,
|
Prepend qpkey with denial byte
In preparation to merge the three qp tries (tree, nsec, nsec3) into
one, add the piece of information into the qpkey. This is the most
significant bit of information, so prepend the denial type to the qpkey.
This means we need to pass on the denial type when constructing the
qpkey from a name, or doing a lookup.
Reuse the the DNS_DB_NSEC_* values. Most qp tries in the code we just
pass on 0 (nta, rpz, zt, etc.), because there is no need for denial of
existence, but for qpzone and qpcache we must pass the right value.
Change the code, so that node->nsec no longer can have the value
DNS_DB_NSEC_HAS_NSEC, instead track this in a new attribute 'havensec'.
Since we use node->nsec to convert names to keys, the value MUST be set
before inserting the node into the qp-trie.
Update the fuzzing and unit tests accordingly. This only adds a few
extra test cases, more are needed.
In the qp_test.c we can remove test code for empty keys as this is
no longer possible.
2025-04-25 17:21:16 +02:00
|
|
|
(void **)&nsecnode, NULL);
|
2025-02-23 14:36:35 +01:00
|
|
|
if (result != ISC_R_SUCCESS) {
|
2024-01-10 16:29:57 +01:00
|
|
|
INSIST(nsecnode == NULL);
|
2025-07-07 11:42:17 +02:00
|
|
|
nsecnode = new_qpcnode(qpdb, name,
|
|
|
|
DNS_DBNAMESPACE_NSEC);
|
2024-01-10 16:29:57 +01:00
|
|
|
result = dns_qp_insert(qpdb->nsec, nsecnode, 0);
|
|
|
|
INSIST(result == ISC_R_SUCCESS);
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcnode_detach(&nsecnode);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
Prepend qpkey with denial byte
In preparation to merge the three qp tries (tree, nsec, nsec3) into
one, add the piece of information into the qpkey. This is the most
significant bit of information, so prepend the denial type to the qpkey.
This means we need to pass on the denial type when constructing the
qpkey from a name, or doing a lookup.
Reuse the the DNS_DB_NSEC_* values. Most qp tries in the code we just
pass on 0 (nta, rpz, zt, etc.), because there is no need for denial of
existence, but for qpzone and qpcache we must pass the right value.
Change the code, so that node->nsec no longer can have the value
DNS_DB_NSEC_HAS_NSEC, instead track this in a new attribute 'havensec'.
Since we use node->nsec to convert names to keys, the value MUST be set
before inserting the node into the qp-trie.
Update the fuzzing and unit tests accordingly. This only adds a few
extra test cases, more are needed.
In the qp_test.c we can remove test code for empty keys as this is
no longer possible.
2025-04-25 17:21:16 +02:00
|
|
|
qpnode->havensec = true;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2025-02-23 14:36:35 +01:00
|
|
|
result = add(qpdb, qpnode, name, newheader, options, addedrdataset, now,
|
|
|
|
nlocktype, tlocktype DNS__DB_FLARG_PASS);
|
|
|
|
|
2024-01-09 16:18:57 +01:00
|
|
|
if (result == ISC_R_SUCCESS && delegating) {
|
2024-03-12 01:05:07 -07:00
|
|
|
qpnode->delegating = 1;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_UNLOCK(nlock, &nlocktype);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
|
|
|
if (tlocktype != isc_rwlocktype_none) {
|
2024-03-05 14:28:43 -08:00
|
|
|
TREE_UNLOCK(&qpdb->tree_lock, &tlocktype);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
2025-02-23 14:36:35 +01:00
|
|
|
|
2024-01-09 16:18:57 +01:00
|
|
|
INSIST(tlocktype == isc_rwlocktype_none);
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return result;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2024-03-06 17:33:37 -08:00
|
|
|
static isc_result_t
|
2024-11-14 11:18:00 +01:00
|
|
|
qpcache_deleterdataset(dns_db_t *db, dns_dbnode_t *node,
|
|
|
|
dns_dbversion_t *version, dns_rdatatype_t type,
|
|
|
|
dns_rdatatype_t covers DNS__DB_FLARG) {
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcache_t *qpdb = (qpcache_t *)db;
|
|
|
|
qpcnode_t *qpnode = (qpcnode_t *)node;
|
2024-01-09 16:18:57 +01:00
|
|
|
isc_result_t result;
|
|
|
|
dns_slabheader_t *newheader = NULL;
|
|
|
|
isc_rwlocktype_t nlocktype = isc_rwlocktype_none;
|
2025-01-27 21:07:11 +01:00
|
|
|
isc_rwlock_t *nlock = NULL;
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
REQUIRE(VALID_QPDB(qpdb));
|
2024-03-06 15:04:20 -08:00
|
|
|
REQUIRE(version == NULL);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
|
|
|
if (type == dns_rdatatype_any) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return ISC_R_NOTIMPLEMENTED;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
if (type == dns_rdatatype_rrsig && covers == 0) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return ISC_R_NOTIMPLEMENTED;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
newheader = dns_slabheader_new(db, node);
|
2025-08-06 19:34:35 +02:00
|
|
|
newheader->typepair = DNS_TYPEPAIR_VALUE(type, covers);
|
2024-03-06 17:33:37 -08:00
|
|
|
setttl(newheader, 0);
|
2024-01-09 16:18:57 +01:00
|
|
|
atomic_init(&newheader->attributes, DNS_SLABHEADERATTR_NONEXISTENT);
|
|
|
|
|
2025-02-03 13:36:27 +01:00
|
|
|
nlock = &qpdb->buckets[qpnode->locknum].lock;
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_WRLOCK(nlock, &nlocktype);
|
2025-01-22 23:08:04 -08:00
|
|
|
result = add(qpdb, qpnode, NULL, newheader, DNS_DBADD_FORCE, NULL, 0,
|
|
|
|
nlocktype, isc_rwlocktype_none DNS__DB_FLARG_PASS);
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_UNLOCK(nlock, &nlocktype);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return result;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2024-03-06 17:33:37 -08:00
|
|
|
static unsigned int
|
|
|
|
nodecount(dns_db_t *db, dns_dbtree_t tree) {
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcache_t *qpdb = (qpcache_t *)db;
|
2024-01-16 11:51:46 +01:00
|
|
|
dns_qp_memusage_t mu;
|
2024-01-09 16:18:57 +01:00
|
|
|
isc_rwlocktype_t tlocktype = isc_rwlocktype_none;
|
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
REQUIRE(VALID_QPDB(qpdb));
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
TREE_RDLOCK(&qpdb->tree_lock, &tlocktype);
|
2024-01-09 16:18:57 +01:00
|
|
|
switch (tree) {
|
|
|
|
case dns_dbtree_main:
|
2024-01-16 11:51:46 +01:00
|
|
|
mu = dns_qp_memusage(qpdb->tree);
|
2024-01-09 16:18:57 +01:00
|
|
|
break;
|
|
|
|
case dns_dbtree_nsec:
|
2024-01-16 11:51:46 +01:00
|
|
|
mu = dns_qp_memusage(qpdb->nsec);
|
2024-01-09 16:18:57 +01:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
2024-03-05 14:28:43 -08:00
|
|
|
TREE_UNLOCK(&qpdb->tree_lock, &tlocktype);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return mu.leaves;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2024-03-06 17:33:37 -08:00
|
|
|
static void
|
Decouple database and node lifetimes by adding node-specific vtables
All databases in the codebase follow the same structure: a database is
an associative container from DNS names to nodes, and each node is an
associative container from RR types to RR data.
Each database implementation (qpzone, qpcache, sdlz, builtin, dyndb) has
its own corresponding node type (qpznode, qpcnode, etc). However, some
code needs to work with nodes generically regardless of their specific
type - for example, to acquire locks, manage references, or
register/unregister slabs from the heap.
Currently, these generic node operations are implemented as methods in
the database vtable, which creates problematic coupling between database
and node lifetimes. If a node outlives its parent database, the node
destructor will destroy all RR data, and each RR data destructor will
try to unregister from heaps by calling a virtual function from the
database vtable. Since the database was already freed, this causes a
crash.
This commit breaks the coupling by standardizing the layout of all
database nodes, adding a dedicated vtable for node operations, and
moving node-specific methods from the database vtable to the node
vtable.
2025-06-05 11:51:29 +02:00
|
|
|
qpcnode_locknode(dns_dbnode_t *node, isc_rwlocktype_t type) {
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcnode_t *qpnode = (qpcnode_t *)node;
|
Decouple database and node lifetimes by adding node-specific vtables
All databases in the codebase follow the same structure: a database is
an associative container from DNS names to nodes, and each node is an
associative container from RR types to RR data.
Each database implementation (qpzone, qpcache, sdlz, builtin, dyndb) has
its own corresponding node type (qpznode, qpcnode, etc). However, some
code needs to work with nodes generically regardless of their specific
type - for example, to acquire locks, manage references, or
register/unregister slabs from the heap.
Currently, these generic node operations are implemented as methods in
the database vtable, which creates problematic coupling between database
and node lifetimes. If a node outlives its parent database, the node
destructor will destroy all RR data, and each RR data destructor will
try to unregister from heaps by calling a virtual function from the
database vtable. Since the database was already freed, this causes a
crash.
This commit breaks the coupling by standardizing the layout of all
database nodes, adding a dedicated vtable for node operations, and
moving node-specific methods from the database vtable to the node
vtable.
2025-06-05 11:51:29 +02:00
|
|
|
qpcache_t *qpdb = qpnode->qpdb;
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2025-02-03 13:36:27 +01:00
|
|
|
RWLOCK(&qpdb->buckets[qpnode->locknum].lock, type);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2024-03-06 17:33:37 -08:00
|
|
|
static void
|
Decouple database and node lifetimes by adding node-specific vtables
All databases in the codebase follow the same structure: a database is
an associative container from DNS names to nodes, and each node is an
associative container from RR types to RR data.
Each database implementation (qpzone, qpcache, sdlz, builtin, dyndb) has
its own corresponding node type (qpznode, qpcnode, etc). However, some
code needs to work with nodes generically regardless of their specific
type - for example, to acquire locks, manage references, or
register/unregister slabs from the heap.
Currently, these generic node operations are implemented as methods in
the database vtable, which creates problematic coupling between database
and node lifetimes. If a node outlives its parent database, the node
destructor will destroy all RR data, and each RR data destructor will
try to unregister from heaps by calling a virtual function from the
database vtable. Since the database was already freed, this causes a
crash.
This commit breaks the coupling by standardizing the layout of all
database nodes, adding a dedicated vtable for node operations, and
moving node-specific methods from the database vtable to the node
vtable.
2025-06-05 11:51:29 +02:00
|
|
|
qpcnode_unlocknode(dns_dbnode_t *node, isc_rwlocktype_t type) {
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcnode_t *qpnode = (qpcnode_t *)node;
|
Decouple database and node lifetimes by adding node-specific vtables
All databases in the codebase follow the same structure: a database is
an associative container from DNS names to nodes, and each node is an
associative container from RR types to RR data.
Each database implementation (qpzone, qpcache, sdlz, builtin, dyndb) has
its own corresponding node type (qpznode, qpcnode, etc). However, some
code needs to work with nodes generically regardless of their specific
type - for example, to acquire locks, manage references, or
register/unregister slabs from the heap.
Currently, these generic node operations are implemented as methods in
the database vtable, which creates problematic coupling between database
and node lifetimes. If a node outlives its parent database, the node
destructor will destroy all RR data, and each RR data destructor will
try to unregister from heaps by calling a virtual function from the
database vtable. Since the database was already freed, this causes a
crash.
This commit breaks the coupling by standardizing the layout of all
database nodes, adding a dedicated vtable for node operations, and
moving node-specific methods from the database vtable to the node
vtable.
2025-06-05 11:51:29 +02:00
|
|
|
qpcache_t *qpdb = qpnode->qpdb;
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2025-02-03 13:36:27 +01:00
|
|
|
RWUNLOCK(&qpdb->buckets[qpnode->locknum].lock, type);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
isc_result_t
|
2024-03-06 17:54:37 -08:00
|
|
|
dns__qpcache_create(isc_mem_t *mctx, const dns_name_t *origin,
|
|
|
|
dns_dbtype_t type, dns_rdataclass_t rdclass,
|
|
|
|
unsigned int argc, char *argv[],
|
|
|
|
void *driverarg ISC_ATTR_UNUSED, dns_db_t **dbp) {
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcache_t *qpdb = NULL;
|
2024-01-09 16:18:57 +01:00
|
|
|
isc_mem_t *hmctx = mctx;
|
2024-03-25 12:17:42 +01:00
|
|
|
isc_loop_t *loop = isc_loop();
|
2024-03-06 15:04:20 -08:00
|
|
|
int i;
|
2025-07-14 10:50:21 +02:00
|
|
|
size_t nloops = isc_loopmgr_nloops();
|
2024-03-06 15:04:20 -08:00
|
|
|
|
|
|
|
/* This database implementation only supports cache semantics */
|
|
|
|
REQUIRE(type == dns_dbtype_cache);
|
2024-03-25 12:17:42 +01:00
|
|
|
REQUIRE(loop != NULL);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2025-02-03 13:36:27 +01:00
|
|
|
qpdb = isc_mem_get(mctx,
|
|
|
|
sizeof(*qpdb) + nloops * sizeof(qpdb->buckets[0]));
|
2024-04-29 15:29:33 -07:00
|
|
|
*qpdb = (qpcache_t){
|
2024-03-12 01:05:07 -07:00
|
|
|
.common.methods = &qpdb_cachemethods,
|
2024-01-09 16:18:57 +01:00
|
|
|
.common.origin = DNS_NAME_INITEMPTY,
|
|
|
|
.common.rdclass = rdclass,
|
2024-03-12 01:05:07 -07:00
|
|
|
.common.attributes = DNS_DBATTR_CACHE,
|
2025-01-27 21:07:11 +01:00
|
|
|
.common.references = 1,
|
|
|
|
.references = 1,
|
2025-02-03 13:36:27 +01:00
|
|
|
.buckets_count = nloops,
|
2024-01-09 16:18:57 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If argv[0] exists, it points to a memory context to use for heap
|
|
|
|
*/
|
|
|
|
if (argc != 0) {
|
|
|
|
hmctx = (isc_mem_t *)argv[0];
|
|
|
|
}
|
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
isc_rwlock_init(&qpdb->lock);
|
|
|
|
TREE_INITLOCK(&qpdb->tree_lock);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2025-07-14 10:50:21 +02:00
|
|
|
qpdb->buckets_count = isc_loopmgr_nloops();
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-06 15:04:20 -08:00
|
|
|
dns_rdatasetstats_create(mctx, &qpdb->rrsetstats);
|
2025-02-03 13:36:27 +01:00
|
|
|
for (i = 0; i < (int)qpdb->buckets_count; i++) {
|
2025-02-23 14:36:35 +01:00
|
|
|
ISC_SIEVE_INIT(qpdb->buckets[i].sieve);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2025-02-03 13:36:27 +01:00
|
|
|
qpdb->buckets[i].heap = NULL;
|
2024-03-06 15:04:20 -08:00
|
|
|
isc_heap_create(hmctx, ttl_sooner, set_index, 0,
|
2025-02-03 13:36:27 +01:00
|
|
|
&qpdb->buckets[i].heap);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2025-02-03 13:36:27 +01:00
|
|
|
isc_queue_init(&qpdb->buckets[i].deadnodes);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2025-02-03 13:36:27 +01:00
|
|
|
NODE_INITLOCK(&qpdb->buckets[i].lock);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Attach to the mctx. The database will persist so long as there
|
|
|
|
* are references to it, and attaching to the mctx ensures that our
|
|
|
|
* mctx won't disappear out from under us.
|
|
|
|
*/
|
2024-03-05 14:28:43 -08:00
|
|
|
isc_mem_attach(mctx, &qpdb->common.mctx);
|
|
|
|
isc_mem_attach(hmctx, &qpdb->hmctx);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Make a copy of the origin name.
|
|
|
|
*/
|
2025-02-21 12:09:28 +01:00
|
|
|
dns_name_dup(origin, mctx, &qpdb->common.origin);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Make the qp tries.
|
|
|
|
*/
|
2024-03-05 14:28:43 -08:00
|
|
|
dns_qp_create(mctx, &qpmethods, qpdb, &qpdb->tree);
|
|
|
|
dns_qp_create(mctx, &qpmethods, qpdb, &qpdb->nsec);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
qpdb->common.magic = DNS_DB_MAGIC;
|
|
|
|
qpdb->common.impmagic = QPDB_MAGIC;
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
*dbp = (dns_db_t *)qpdb;
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return ISC_R_SUCCESS;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Rdataset Iterator Methods
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void
|
|
|
|
rdatasetiter_destroy(dns_rdatasetiter_t **iteratorp DNS__DB_FLARG) {
|
2024-09-13 19:26:54 -07:00
|
|
|
qpc_rditer_t *iterator = NULL;
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-09-13 19:26:54 -07:00
|
|
|
iterator = (qpc_rditer_t *)(*iteratorp);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
Decouple database and node lifetimes by adding node-specific vtables
All databases in the codebase follow the same structure: a database is
an associative container from DNS names to nodes, and each node is an
associative container from RR types to RR data.
Each database implementation (qpzone, qpcache, sdlz, builtin, dyndb) has
its own corresponding node type (qpznode, qpcnode, etc). However, some
code needs to work with nodes generically regardless of their specific
type - for example, to acquire locks, manage references, or
register/unregister slabs from the heap.
Currently, these generic node operations are implemented as methods in
the database vtable, which creates problematic coupling between database
and node lifetimes. If a node outlives its parent database, the node
destructor will destroy all RR data, and each RR data destructor will
try to unregister from heaps by calling a virtual function from the
database vtable. Since the database was already freed, this causes a
crash.
This commit breaks the coupling by standardizing the layout of all
database nodes, adding a dedicated vtable for node operations, and
moving node-specific methods from the database vtable to the node
vtable.
2025-06-05 11:51:29 +02:00
|
|
|
dns__db_detachnode(&iterator->common.node DNS__DB_FLARG_PASS);
|
2024-09-13 19:26:54 -07:00
|
|
|
isc_mem_put(iterator->common.db->mctx, iterator, sizeof(*iterator));
|
2024-01-09 16:18:57 +01:00
|
|
|
|
|
|
|
*iteratorp = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
2024-09-13 19:26:54 -07:00
|
|
|
iterator_active(qpcache_t *qpdb, qpc_rditer_t *iterator,
|
2024-01-09 16:18:57 +01:00
|
|
|
dns_slabheader_t *header) {
|
2025-02-02 13:31:36 +01:00
|
|
|
dns_ttl_t stale_ttl = header->expire + STALE_TTL(header, qpdb);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Is this a "this rdataset doesn't exist" record?
|
|
|
|
*/
|
2025-01-22 23:08:04 -08:00
|
|
|
if (!EXISTS(header)) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return false;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2024-03-06 15:04:20 -08:00
|
|
|
* If this header is still active then return it.
|
2024-01-09 16:18:57 +01:00
|
|
|
*/
|
2024-09-13 19:26:54 -07:00
|
|
|
if (ACTIVE(header, iterator->common.now)) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return true;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we are not returning stale records or the rdataset is
|
|
|
|
* too old don't return it.
|
|
|
|
*/
|
2024-09-13 19:26:54 -07:00
|
|
|
if (!STALEOK(iterator) || (iterator->common.now > stale_ttl)) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return false;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
2024-11-19 10:38:03 +01:00
|
|
|
return true;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static isc_result_t
|
2024-09-13 19:26:54 -07:00
|
|
|
rdatasetiter_first(dns_rdatasetiter_t *it DNS__DB_FLARG) {
|
|
|
|
qpc_rditer_t *iterator = (qpc_rditer_t *)it;
|
|
|
|
qpcache_t *qpdb = (qpcache_t *)(iterator->common.db);
|
2024-11-05 16:13:10 +01:00
|
|
|
qpcnode_t *qpnode = (qpcnode_t *)iterator->common.node;
|
2025-01-22 23:08:04 -08:00
|
|
|
dns_slabheader_t *header = NULL;
|
2024-01-09 16:18:57 +01:00
|
|
|
isc_rwlocktype_t nlocktype = isc_rwlocktype_none;
|
2025-02-03 13:36:27 +01:00
|
|
|
isc_rwlock_t *nlock = &qpdb->buckets[qpnode->locknum].lock;
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_RDLOCK(nlock, &nlocktype);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2025-01-22 23:08:04 -08:00
|
|
|
for (header = qpnode->data; header != NULL; header = header->next) {
|
|
|
|
if ((EXPIREDOK(iterator) && EXISTS(header)) ||
|
|
|
|
iterator_active(qpdb, iterator, header))
|
|
|
|
{
|
2024-01-09 16:18:57 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_UNLOCK(nlock, &nlocktype);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-09-13 19:26:54 -07:00
|
|
|
iterator->current = header;
|
2024-01-09 16:18:57 +01:00
|
|
|
|
|
|
|
if (header == NULL) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return ISC_R_NOMORE;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return ISC_R_SUCCESS;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static isc_result_t
|
2024-09-13 19:26:54 -07:00
|
|
|
rdatasetiter_next(dns_rdatasetiter_t *it DNS__DB_FLARG) {
|
|
|
|
qpc_rditer_t *iterator = (qpc_rditer_t *)it;
|
|
|
|
qpcache_t *qpdb = (qpcache_t *)(iterator->common.db);
|
2024-11-05 16:13:10 +01:00
|
|
|
qpcnode_t *qpnode = (qpcnode_t *)iterator->common.node;
|
2025-01-22 23:08:04 -08:00
|
|
|
dns_slabheader_t *header = NULL;
|
2024-01-09 16:18:57 +01:00
|
|
|
isc_rwlocktype_t nlocktype = isc_rwlocktype_none;
|
2025-02-03 13:36:27 +01:00
|
|
|
isc_rwlock_t *nlock = &qpdb->buckets[qpnode->locknum].lock;
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-09-13 19:26:54 -07:00
|
|
|
header = iterator->current;
|
2024-01-09 16:18:57 +01:00
|
|
|
if (header == NULL) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return ISC_R_NOMORE;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_RDLOCK(nlock, &nlocktype);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2025-01-22 23:08:04 -08:00
|
|
|
for (header = header->next; header != NULL; header = header->next) {
|
|
|
|
if ((EXPIREDOK(iterator) && EXISTS(header)) ||
|
|
|
|
iterator_active(qpdb, iterator, header))
|
2024-01-09 16:18:57 +01:00
|
|
|
{
|
2025-01-22 23:08:04 -08:00
|
|
|
break;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_UNLOCK(nlock, &nlocktype);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-09-13 19:26:54 -07:00
|
|
|
iterator->current = header;
|
2024-01-09 16:18:57 +01:00
|
|
|
|
|
|
|
if (header == NULL) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return ISC_R_NOMORE;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return ISC_R_SUCCESS;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2024-09-13 19:26:54 -07:00
|
|
|
rdatasetiter_current(dns_rdatasetiter_t *it,
|
2024-01-09 16:18:57 +01:00
|
|
|
dns_rdataset_t *rdataset DNS__DB_FLARG) {
|
2024-09-13 19:26:54 -07:00
|
|
|
qpc_rditer_t *iterator = (qpc_rditer_t *)it;
|
|
|
|
qpcache_t *qpdb = (qpcache_t *)(iterator->common.db);
|
2024-11-05 16:13:10 +01:00
|
|
|
qpcnode_t *qpnode = (qpcnode_t *)iterator->common.node;
|
2024-01-09 16:18:57 +01:00
|
|
|
dns_slabheader_t *header = NULL;
|
|
|
|
isc_rwlocktype_t nlocktype = isc_rwlocktype_none;
|
2025-02-03 13:36:27 +01:00
|
|
|
isc_rwlock_t *nlock = &qpdb->buckets[qpnode->locknum].lock;
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-09-13 19:26:54 -07:00
|
|
|
header = iterator->current;
|
2024-01-09 16:18:57 +01:00
|
|
|
REQUIRE(header != NULL);
|
|
|
|
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_RDLOCK(nlock, &nlocktype);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-09-13 19:26:54 -07:00
|
|
|
bindrdataset(qpdb, qpnode, header, iterator->common.now, nlocktype,
|
2024-03-26 14:13:24 +01:00
|
|
|
isc_rwlocktype_none, rdataset DNS__DB_FLARG_PASS);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_UNLOCK(nlock, &nlocktype);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Database Iterator Methods
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void
|
2024-04-29 15:45:26 -07:00
|
|
|
reference_iter_node(qpc_dbit_t *qpdbiter DNS__DB_FLARG) {
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcache_t *qpdb = (qpcache_t *)qpdbiter->common.db;
|
|
|
|
qpcnode_t *node = qpdbiter->node;
|
2024-01-09 16:18:57 +01:00
|
|
|
|
|
|
|
if (node == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
INSIST(qpdbiter->tree_locked != isc_rwlocktype_none);
|
|
|
|
reactivate_node(qpdb, node, qpdbiter->tree_locked DNS__DB_FLARG_PASS);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2024-04-29 15:45:26 -07:00
|
|
|
dereference_iter_node(qpc_dbit_t *qpdbiter DNS__DB_FLARG) {
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcache_t *qpdb = (qpcache_t *)qpdbiter->common.db;
|
|
|
|
qpcnode_t *node = qpdbiter->node;
|
2025-01-27 21:07:11 +01:00
|
|
|
isc_rwlock_t *nlock = NULL;
|
2024-01-09 16:18:57 +01:00
|
|
|
isc_rwlocktype_t nlocktype = isc_rwlocktype_none;
|
2024-03-05 14:28:43 -08:00
|
|
|
isc_rwlocktype_t tlocktype = qpdbiter->tree_locked;
|
2024-01-09 16:18:57 +01:00
|
|
|
|
|
|
|
if (node == NULL) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
REQUIRE(tlocktype != isc_rwlocktype_write);
|
|
|
|
|
2025-02-03 13:36:27 +01:00
|
|
|
nlock = &qpdb->buckets[node->locknum].lock;
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_RDLOCK(nlock, &nlocktype);
|
2025-03-21 03:06:16 +01:00
|
|
|
qpcnode_release(qpdb, node, &nlocktype,
|
|
|
|
&qpdbiter->tree_locked DNS__DB_FLARG_PASS);
|
2025-01-27 21:07:11 +01:00
|
|
|
NODE_UNLOCK(nlock, &nlocktype);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
INSIST(qpdbiter->tree_locked == tlocktype);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
qpdbiter->node = NULL;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2024-04-29 15:45:26 -07:00
|
|
|
resume_iteration(qpc_dbit_t *qpdbiter, bool continuing) {
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcache_t *qpdb = (qpcache_t *)qpdbiter->common.db;
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
REQUIRE(qpdbiter->paused);
|
|
|
|
REQUIRE(qpdbiter->tree_locked == isc_rwlocktype_none);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
TREE_RDLOCK(&qpdb->tree_lock, &qpdbiter->tree_locked);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-02-07 14:52:59 +01:00
|
|
|
/*
|
|
|
|
* If we're being called from dbiterator_next or _prev,
|
|
|
|
* then we may need to reinitialize the iterator to the current
|
|
|
|
* name. The tree could have changed while it was unlocked,
|
|
|
|
* would make the iterator traversal inconsistent.
|
|
|
|
*
|
|
|
|
* As long as the iterator is holding a reference to
|
|
|
|
* qpdbiter->node, the node won't be removed from the tree,
|
|
|
|
* so the lookup should always succeed.
|
|
|
|
*/
|
|
|
|
if (continuing && qpdbiter->node != NULL) {
|
|
|
|
isc_result_t result;
|
2025-07-07 11:29:45 +02:00
|
|
|
result = dns_qp_lookup(qpdb->tree, qpdbiter->name,
|
|
|
|
DNS_DBNAMESPACE_NORMAL, NULL,
|
2024-03-12 22:19:47 -07:00
|
|
|
&qpdbiter->iter, NULL, NULL, NULL);
|
2024-02-07 14:52:59 +01:00
|
|
|
INSIST(result == ISC_R_SUCCESS);
|
|
|
|
}
|
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
qpdbiter->paused = false;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
dbiterator_destroy(dns_dbiterator_t **iteratorp DNS__DB_FLARG) {
|
2024-04-29 15:45:26 -07:00
|
|
|
qpc_dbit_t *qpdbiter = (qpc_dbit_t *)(*iteratorp);
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcache_t *qpdb = (qpcache_t *)qpdbiter->common.db;
|
2024-01-09 16:18:57 +01:00
|
|
|
dns_db_t *db = NULL;
|
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
if (qpdbiter->tree_locked == isc_rwlocktype_read) {
|
|
|
|
TREE_UNLOCK(&qpdb->tree_lock, &qpdbiter->tree_locked);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
2024-03-05 14:28:43 -08:00
|
|
|
INSIST(qpdbiter->tree_locked == isc_rwlocktype_none);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
dereference_iter_node(qpdbiter DNS__DB_FLARG_PASS);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
dns_db_attach(qpdbiter->common.db, &db);
|
|
|
|
dns_db_detach(&qpdbiter->common.db);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
isc_mem_put(db->mctx, qpdbiter, sizeof(*qpdbiter));
|
2024-01-09 16:18:57 +01:00
|
|
|
dns_db_detach(&db);
|
|
|
|
|
|
|
|
*iteratorp = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static isc_result_t
|
|
|
|
dbiterator_first(dns_dbiterator_t *iterator DNS__DB_FLARG) {
|
|
|
|
isc_result_t result;
|
2024-04-29 15:45:26 -07:00
|
|
|
qpc_dbit_t *qpdbiter = (qpc_dbit_t *)iterator;
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcache_t *qpdb = (qpcache_t *)iterator->db;
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
if (qpdbiter->result != ISC_R_SUCCESS &&
|
|
|
|
qpdbiter->result != ISC_R_NOTFOUND &&
|
|
|
|
qpdbiter->result != DNS_R_PARTIALMATCH &&
|
|
|
|
qpdbiter->result != ISC_R_NOMORE)
|
2024-01-09 16:18:57 +01:00
|
|
|
{
|
2024-11-19 10:38:03 +01:00
|
|
|
return qpdbiter->result;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
if (qpdbiter->paused) {
|
2024-02-07 14:52:59 +01:00
|
|
|
resume_iteration(qpdbiter, false);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
dereference_iter_node(qpdbiter DNS__DB_FLARG_PASS);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-12 22:19:47 -07:00
|
|
|
dns_qpiter_init(qpdb->tree, &qpdbiter->iter);
|
|
|
|
result = dns_qpiter_next(&qpdbiter->iter, NULL,
|
|
|
|
(void **)&qpdbiter->node, NULL);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-04-29 14:57:42 -07:00
|
|
|
if (result == ISC_R_SUCCESS) {
|
2024-03-12 22:19:47 -07:00
|
|
|
dns_name_copy(&qpdbiter->node->name, qpdbiter->name);
|
2024-01-16 11:26:20 +01:00
|
|
|
reference_iter_node(qpdbiter DNS__DB_FLARG_PASS);
|
2024-01-09 16:18:57 +01:00
|
|
|
} else {
|
2024-01-16 11:26:20 +01:00
|
|
|
INSIST(result == ISC_R_NOMORE); /* The tree is empty. */
|
2024-02-07 14:52:59 +01:00
|
|
|
qpdbiter->node = NULL;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
qpdbiter->result = result;
|
2024-01-09 16:18:57 +01:00
|
|
|
|
|
|
|
if (result != ISC_R_SUCCESS) {
|
2024-03-05 14:28:43 -08:00
|
|
|
ENSURE(!qpdbiter->paused);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return result;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static isc_result_t
|
|
|
|
dbiterator_last(dns_dbiterator_t *iterator DNS__DB_FLARG) {
|
|
|
|
isc_result_t result;
|
2024-04-29 15:45:26 -07:00
|
|
|
qpc_dbit_t *qpdbiter = (qpc_dbit_t *)iterator;
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcache_t *qpdb = (qpcache_t *)iterator->db;
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
if (qpdbiter->result != ISC_R_SUCCESS &&
|
|
|
|
qpdbiter->result != ISC_R_NOTFOUND &&
|
|
|
|
qpdbiter->result != DNS_R_PARTIALMATCH &&
|
|
|
|
qpdbiter->result != ISC_R_NOMORE)
|
2024-01-09 16:18:57 +01:00
|
|
|
{
|
2024-11-19 10:38:03 +01:00
|
|
|
return qpdbiter->result;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
if (qpdbiter->paused) {
|
2024-02-07 14:52:59 +01:00
|
|
|
resume_iteration(qpdbiter, false);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
dereference_iter_node(qpdbiter DNS__DB_FLARG_PASS);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-12 22:19:47 -07:00
|
|
|
dns_qpiter_init(qpdb->tree, &qpdbiter->iter);
|
|
|
|
result = dns_qpiter_prev(&qpdbiter->iter, NULL,
|
|
|
|
(void **)&qpdbiter->node, NULL);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-04-29 14:57:42 -07:00
|
|
|
if (result == ISC_R_SUCCESS) {
|
2024-03-12 22:19:47 -07:00
|
|
|
dns_name_copy(&qpdbiter->node->name, qpdbiter->name);
|
2024-01-16 11:26:20 +01:00
|
|
|
reference_iter_node(qpdbiter DNS__DB_FLARG_PASS);
|
2024-01-09 16:18:57 +01:00
|
|
|
} else {
|
2024-01-16 11:26:20 +01:00
|
|
|
INSIST(result == ISC_R_NOMORE); /* The tree is empty. */
|
2024-02-07 14:52:59 +01:00
|
|
|
qpdbiter->node = NULL;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
qpdbiter->result = result;
|
2024-11-19 10:38:03 +01:00
|
|
|
return result;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static isc_result_t
|
|
|
|
dbiterator_seek(dns_dbiterator_t *iterator,
|
|
|
|
const dns_name_t *name DNS__DB_FLARG) {
|
2024-03-12 22:19:47 -07:00
|
|
|
isc_result_t result;
|
2024-04-29 15:45:26 -07:00
|
|
|
qpc_dbit_t *qpdbiter = (qpc_dbit_t *)iterator;
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcache_t *qpdb = (qpcache_t *)iterator->db;
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
if (qpdbiter->result != ISC_R_SUCCESS &&
|
|
|
|
qpdbiter->result != ISC_R_NOTFOUND &&
|
|
|
|
qpdbiter->result != DNS_R_PARTIALMATCH &&
|
|
|
|
qpdbiter->result != ISC_R_NOMORE)
|
2024-01-09 16:18:57 +01:00
|
|
|
{
|
2024-11-19 10:38:03 +01:00
|
|
|
return qpdbiter->result;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
if (qpdbiter->paused) {
|
2024-02-07 14:52:59 +01:00
|
|
|
resume_iteration(qpdbiter, false);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
dereference_iter_node(qpdbiter DNS__DB_FLARG_PASS);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2025-07-07 11:29:45 +02:00
|
|
|
result = dns_qp_lookup(qpdb->tree, name, DNS_DBNAMESPACE_NORMAL, NULL,
|
|
|
|
&qpdbiter->iter, NULL, (void **)&qpdbiter->node,
|
|
|
|
NULL);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
|
|
|
if (result == ISC_R_SUCCESS || result == DNS_R_PARTIALMATCH) {
|
2024-03-11 18:53:49 -07:00
|
|
|
dns_name_copy(&qpdbiter->node->name, qpdbiter->name);
|
2024-01-16 11:26:20 +01:00
|
|
|
reference_iter_node(qpdbiter DNS__DB_FLARG_PASS);
|
2024-01-09 16:18:57 +01:00
|
|
|
} else {
|
2024-03-05 14:28:43 -08:00
|
|
|
qpdbiter->node = NULL;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
qpdbiter->result = (result == DNS_R_PARTIALMATCH) ? ISC_R_SUCCESS
|
|
|
|
: result;
|
2024-11-19 10:38:03 +01:00
|
|
|
return result;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static isc_result_t
|
|
|
|
dbiterator_prev(dns_dbiterator_t *iterator DNS__DB_FLARG) {
|
|
|
|
isc_result_t result;
|
2024-04-29 15:45:26 -07:00
|
|
|
qpc_dbit_t *qpdbiter = (qpc_dbit_t *)iterator;
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
REQUIRE(qpdbiter->node != NULL);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
if (qpdbiter->result != ISC_R_SUCCESS) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return qpdbiter->result;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
if (qpdbiter->paused) {
|
2024-02-07 14:52:59 +01:00
|
|
|
resume_iteration(qpdbiter, true);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
dereference_iter_node(qpdbiter DNS__DB_FLARG_PASS);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-12 22:19:47 -07:00
|
|
|
result = dns_qpiter_prev(&qpdbiter->iter, NULL,
|
2024-01-16 11:26:20 +01:00
|
|
|
(void **)&qpdbiter->node, NULL);
|
|
|
|
|
2024-01-09 16:18:57 +01:00
|
|
|
if (result == ISC_R_SUCCESS) {
|
2024-03-12 22:19:47 -07:00
|
|
|
dns_name_copy(&qpdbiter->node->name, qpdbiter->name);
|
2024-03-05 14:28:43 -08:00
|
|
|
reference_iter_node(qpdbiter DNS__DB_FLARG_PASS);
|
2024-02-07 14:52:59 +01:00
|
|
|
} else {
|
|
|
|
INSIST(result == ISC_R_NOMORE);
|
|
|
|
qpdbiter->node = NULL;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
qpdbiter->result = result;
|
2024-11-19 10:38:03 +01:00
|
|
|
return result;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static isc_result_t
|
|
|
|
dbiterator_next(dns_dbiterator_t *iterator DNS__DB_FLARG) {
|
|
|
|
isc_result_t result;
|
2024-04-29 15:45:26 -07:00
|
|
|
qpc_dbit_t *qpdbiter = (qpc_dbit_t *)iterator;
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
REQUIRE(qpdbiter->node != NULL);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
if (qpdbiter->result != ISC_R_SUCCESS) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return qpdbiter->result;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
if (qpdbiter->paused) {
|
2024-02-07 14:52:59 +01:00
|
|
|
resume_iteration(qpdbiter, true);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2024-01-16 11:26:20 +01:00
|
|
|
dereference_iter_node(qpdbiter DNS__DB_FLARG_PASS);
|
|
|
|
|
2024-03-12 22:19:47 -07:00
|
|
|
result = dns_qpiter_next(&qpdbiter->iter, NULL,
|
2024-01-16 11:26:20 +01:00
|
|
|
(void **)&qpdbiter->node, NULL);
|
|
|
|
|
2024-01-09 16:18:57 +01:00
|
|
|
if (result == ISC_R_SUCCESS) {
|
2024-03-12 22:19:47 -07:00
|
|
|
dns_name_copy(&qpdbiter->node->name, qpdbiter->name);
|
2024-03-05 14:28:43 -08:00
|
|
|
reference_iter_node(qpdbiter DNS__DB_FLARG_PASS);
|
2024-02-07 14:52:59 +01:00
|
|
|
} else {
|
|
|
|
INSIST(result == ISC_R_NOMORE);
|
|
|
|
qpdbiter->node = NULL;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
qpdbiter->result = result;
|
2024-11-19 10:38:03 +01:00
|
|
|
return result;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static isc_result_t
|
|
|
|
dbiterator_current(dns_dbiterator_t *iterator, dns_dbnode_t **nodep,
|
|
|
|
dns_name_t *name DNS__DB_FLARG) {
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcache_t *qpdb = (qpcache_t *)iterator->db;
|
2024-04-29 15:45:26 -07:00
|
|
|
qpc_dbit_t *qpdbiter = (qpc_dbit_t *)iterator;
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcnode_t *node = qpdbiter->node;
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
REQUIRE(qpdbiter->result == ISC_R_SUCCESS);
|
2024-03-12 22:19:47 -07:00
|
|
|
REQUIRE(node != NULL);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
if (qpdbiter->paused) {
|
2024-02-07 14:52:59 +01:00
|
|
|
resume_iteration(qpdbiter, false);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (name != NULL) {
|
2024-03-12 22:19:47 -07:00
|
|
|
dns_name_copy(&node->name, name);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2025-01-30 14:42:57 -08:00
|
|
|
qpcnode_acquire(qpdb, node, isc_rwlocktype_none,
|
|
|
|
qpdbiter->tree_locked DNS__DB_FLARG_PASS);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-11-05 16:13:10 +01:00
|
|
|
*nodep = (dns_dbnode_t *)qpdbiter->node;
|
2024-11-19 10:38:03 +01:00
|
|
|
return ISC_R_SUCCESS;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static isc_result_t
|
|
|
|
dbiterator_pause(dns_dbiterator_t *iterator) {
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcache_t *qpdb = (qpcache_t *)iterator->db;
|
2024-04-29 15:45:26 -07:00
|
|
|
qpc_dbit_t *qpdbiter = (qpc_dbit_t *)iterator;
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
if (qpdbiter->result != ISC_R_SUCCESS &&
|
|
|
|
qpdbiter->result != ISC_R_NOTFOUND &&
|
|
|
|
qpdbiter->result != DNS_R_PARTIALMATCH &&
|
|
|
|
qpdbiter->result != ISC_R_NOMORE)
|
2024-01-09 16:18:57 +01:00
|
|
|
{
|
2024-11-19 10:38:03 +01:00
|
|
|
return qpdbiter->result;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
if (qpdbiter->paused) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return ISC_R_SUCCESS;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
qpdbiter->paused = true;
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
if (qpdbiter->tree_locked == isc_rwlocktype_read) {
|
|
|
|
TREE_UNLOCK(&qpdb->tree_lock, &qpdbiter->tree_locked);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
2024-03-05 14:28:43 -08:00
|
|
|
INSIST(qpdbiter->tree_locked == isc_rwlocktype_none);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return ISC_R_SUCCESS;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static isc_result_t
|
|
|
|
dbiterator_origin(dns_dbiterator_t *iterator, dns_name_t *name) {
|
2024-04-29 15:45:26 -07:00
|
|
|
qpc_dbit_t *qpdbiter = (qpc_dbit_t *)iterator;
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
if (qpdbiter->result != ISC_R_SUCCESS) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return qpdbiter->result;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2024-04-29 14:57:42 -07:00
|
|
|
dns_name_copy(dns_rootname, name);
|
2024-11-19 10:38:03 +01:00
|
|
|
return ISC_R_SUCCESS;
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
2024-03-06 17:33:37 -08:00
|
|
|
static void
|
Decouple database and node lifetimes by adding node-specific vtables
All databases in the codebase follow the same structure: a database is
an associative container from DNS names to nodes, and each node is an
associative container from RR types to RR data.
Each database implementation (qpzone, qpcache, sdlz, builtin, dyndb) has
its own corresponding node type (qpznode, qpcnode, etc). However, some
code needs to work with nodes generically regardless of their specific
type - for example, to acquire locks, manage references, or
register/unregister slabs from the heap.
Currently, these generic node operations are implemented as methods in
the database vtable, which creates problematic coupling between database
and node lifetimes. If a node outlives its parent database, the node
destructor will destroy all RR data, and each RR data destructor will
try to unregister from heaps by calling a virtual function from the
database vtable. Since the database was already freed, this causes a
crash.
This commit breaks the coupling by standardizing the layout of all
database nodes, adding a dedicated vtable for node operations, and
moving node-specific methods from the database vtable to the node
vtable.
2025-06-05 11:51:29 +02:00
|
|
|
qpcnode_deletedata(dns_dbnode_t *node ISC_ATTR_UNUSED, void *data) {
|
2024-01-09 16:18:57 +01:00
|
|
|
dns_slabheader_t *header = data;
|
Decouple database and node lifetimes by adding node-specific vtables
All databases in the codebase follow the same structure: a database is
an associative container from DNS names to nodes, and each node is an
associative container from RR types to RR data.
Each database implementation (qpzone, qpcache, sdlz, builtin, dyndb) has
its own corresponding node type (qpznode, qpcnode, etc). However, some
code needs to work with nodes generically regardless of their specific
type - for example, to acquire locks, manage references, or
register/unregister slabs from the heap.
Currently, these generic node operations are implemented as methods in
the database vtable, which creates problematic coupling between database
and node lifetimes. If a node outlives its parent database, the node
destructor will destroy all RR data, and each RR data destructor will
try to unregister from heaps by calling a virtual function from the
database vtable. Since the database was already freed, this causes a
crash.
This commit breaks the coupling by standardizing the layout of all
database nodes, adding a dedicated vtable for node operations, and
moving node-specific methods from the database vtable to the node
vtable.
2025-06-05 11:51:29 +02:00
|
|
|
qpcache_t *qpdb = HEADERNODE(header)->qpdb;
|
|
|
|
|
2025-02-23 14:36:35 +01:00
|
|
|
int idx = HEADERNODE(header)->locknum;
|
2024-01-09 16:18:57 +01:00
|
|
|
|
|
|
|
if (header->heap != NULL && header->heap_index != 0) {
|
|
|
|
isc_heap_delete(header->heap, header->heap_index);
|
|
|
|
}
|
|
|
|
|
2025-08-06 19:34:35 +02:00
|
|
|
update_rrsetstats(qpdb->rrsetstats, header->typepair,
|
2024-03-06 15:04:20 -08:00
|
|
|
atomic_load_acquire(&header->attributes), false);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-06 15:04:20 -08:00
|
|
|
if (ISC_LINK_LINKED(header, link)) {
|
2025-02-23 14:36:35 +01:00
|
|
|
ISC_SIEVE_UNLINK(qpdb->buckets[idx].sieve, header, link);
|
2024-03-06 15:04:20 -08:00
|
|
|
}
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-06 15:04:20 -08:00
|
|
|
if (header->noqname != NULL) {
|
Decouple database and node lifetimes by adding node-specific vtables
All databases in the codebase follow the same structure: a database is
an associative container from DNS names to nodes, and each node is an
associative container from RR types to RR data.
Each database implementation (qpzone, qpcache, sdlz, builtin, dyndb) has
its own corresponding node type (qpznode, qpcnode, etc). However, some
code needs to work with nodes generically regardless of their specific
type - for example, to acquire locks, manage references, or
register/unregister slabs from the heap.
Currently, these generic node operations are implemented as methods in
the database vtable, which creates problematic coupling between database
and node lifetimes. If a node outlives its parent database, the node
destructor will destroy all RR data, and each RR data destructor will
try to unregister from heaps by calling a virtual function from the
database vtable. Since the database was already freed, this causes a
crash.
This commit breaks the coupling by standardizing the layout of all
database nodes, adding a dedicated vtable for node operations, and
moving node-specific methods from the database vtable to the node
vtable.
2025-06-05 11:51:29 +02:00
|
|
|
dns_slabheader_freeproof(qpdb->common.mctx, &header->noqname);
|
2024-03-06 15:04:20 -08:00
|
|
|
}
|
|
|
|
if (header->closest != NULL) {
|
Decouple database and node lifetimes by adding node-specific vtables
All databases in the codebase follow the same structure: a database is
an associative container from DNS names to nodes, and each node is an
associative container from RR types to RR data.
Each database implementation (qpzone, qpcache, sdlz, builtin, dyndb) has
its own corresponding node type (qpznode, qpcnode, etc). However, some
code needs to work with nodes generically regardless of their specific
type - for example, to acquire locks, manage references, or
register/unregister slabs from the heap.
Currently, these generic node operations are implemented as methods in
the database vtable, which creates problematic coupling between database
and node lifetimes. If a node outlives its parent database, the node
destructor will destroy all RR data, and each RR data destructor will
try to unregister from heaps by calling a virtual function from the
database vtable. Since the database was already freed, this causes a
crash.
This commit breaks the coupling by standardizing the layout of all
database nodes, adding a dedicated vtable for node operations, and
moving node-specific methods from the database vtable to the node
vtable.
2025-06-05 11:51:29 +02:00
|
|
|
dns_slabheader_freeproof(qpdb->common.mctx, &header->closest);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Caller must be holding the node write lock.
|
|
|
|
*/
|
|
|
|
static void
|
2024-04-29 15:29:33 -07:00
|
|
|
expire_ttl_headers(qpcache_t *qpdb, unsigned int locknum,
|
2024-03-26 14:13:24 +01:00
|
|
|
isc_rwlocktype_t *nlocktypep, isc_rwlocktype_t *tlocktypep,
|
2025-02-23 14:36:35 +01:00
|
|
|
isc_stdtime_t now DNS__DB_FLARG) {
|
2025-02-03 13:36:27 +01:00
|
|
|
isc_heap_t *heap = qpdb->buckets[locknum].heap;
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
for (size_t i = 0; i < DNS_QPDB_EXPIRE_TTL_COUNT; i++) {
|
2024-01-09 16:18:57 +01:00
|
|
|
dns_slabheader_t *header = isc_heap_element(heap, 1);
|
|
|
|
|
|
|
|
if (header == NULL) {
|
|
|
|
/* No headers left on this TTL heap; exit cleaning */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2025-02-23 14:36:35 +01:00
|
|
|
dns_ttl_t ttl = header->expire + STALE_TTL(header, qpdb);
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-05 14:28:43 -08:00
|
|
|
if (ttl >= now - QPDB_VIRTUAL) {
|
2024-01-09 16:18:57 +01:00
|
|
|
/*
|
|
|
|
* The header at the top of this TTL heap is not yet
|
|
|
|
* eligible for expiry, so none of the other headers on
|
|
|
|
* the same heap can be eligible for expiry, either;
|
|
|
|
* exit cleaning.
|
|
|
|
*/
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2024-05-02 08:33:51 +00:00
|
|
|
expireheader(header, nlocktypep, tlocktypep,
|
2024-03-06 17:33:37 -08:00
|
|
|
dns_expire_ttl DNS__DB_FLARG_PASS);
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-03-01 08:26:07 +01:00
|
|
|
static void
|
|
|
|
setmaxrrperset(dns_db_t *db, uint32_t value) {
|
|
|
|
qpcache_t *qpdb = (qpcache_t *)db;
|
|
|
|
|
|
|
|
REQUIRE(VALID_QPDB(qpdb));
|
|
|
|
|
|
|
|
qpdb->maxrrperset = value;
|
|
|
|
}
|
|
|
|
|
2024-05-25 11:46:56 +02:00
|
|
|
static void
|
|
|
|
setmaxtypepername(dns_db_t *db, uint32_t value) {
|
|
|
|
qpcache_t *qpdb = (qpcache_t *)db;
|
|
|
|
|
|
|
|
REQUIRE(VALID_QPDB(qpdb));
|
|
|
|
|
|
|
|
qpdb->maxtypepername = value;
|
|
|
|
}
|
|
|
|
|
2024-03-06 17:33:37 -08:00
|
|
|
static dns_dbmethods_t qpdb_cachemethods = {
|
2024-11-14 11:18:00 +01:00
|
|
|
.destroy = qpcache_destroy,
|
|
|
|
.findnode = qpcache_findnode,
|
|
|
|
.find = qpcache_find,
|
|
|
|
.findzonecut = qpcache_findzonecut,
|
|
|
|
.createiterator = qpcache_createiterator,
|
|
|
|
.findrdataset = qpcache_findrdataset,
|
|
|
|
.allrdatasets = qpcache_allrdatasets,
|
|
|
|
.addrdataset = qpcache_addrdataset,
|
|
|
|
.deleterdataset = qpcache_deleterdataset,
|
2024-03-06 17:33:37 -08:00
|
|
|
.nodecount = nodecount,
|
|
|
|
.getrrsetstats = getrrsetstats,
|
|
|
|
.setcachestats = setcachestats,
|
|
|
|
.setservestalettl = setservestalettl,
|
|
|
|
.getservestalettl = getservestalettl,
|
|
|
|
.setservestalerefresh = setservestalerefresh,
|
|
|
|
.getservestalerefresh = getservestalerefresh,
|
2024-03-01 08:26:07 +01:00
|
|
|
.setmaxrrperset = setmaxrrperset,
|
2024-05-25 11:46:56 +02:00
|
|
|
.setmaxtypepername = setmaxtypepername,
|
2024-03-06 17:33:37 -08:00
|
|
|
};
|
2024-01-09 16:18:57 +01:00
|
|
|
|
2024-03-06 17:33:37 -08:00
|
|
|
static void
|
2024-04-29 15:29:33 -07:00
|
|
|
qpcnode_destroy(qpcnode_t *data) {
|
2024-01-17 16:53:27 +01:00
|
|
|
dns_slabheader_t *current = NULL, *next = NULL;
|
|
|
|
|
|
|
|
for (current = data->data; current != NULL; current = next) {
|
|
|
|
dns_slabheader_t *down = current->down, *down_next = NULL;
|
|
|
|
|
|
|
|
next = current->next;
|
|
|
|
|
|
|
|
for (down = current->down; down != NULL; down = down_next) {
|
|
|
|
down_next = down->down;
|
|
|
|
dns_slabheader_destroy(&down);
|
|
|
|
}
|
|
|
|
|
|
|
|
dns_slabheader_destroy(¤t);
|
|
|
|
}
|
|
|
|
|
2024-03-11 18:53:49 -07:00
|
|
|
dns_name_free(&data->name, data->mctx);
|
2024-04-29 15:29:33 -07:00
|
|
|
isc_mem_putanddetach(&data->mctx, data, sizeof(qpcnode_t));
|
2024-01-09 16:18:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef DNS_DB_NODETRACE
|
2024-03-12 01:05:07 -07:00
|
|
|
ISC_REFCOUNT_STATIC_TRACE_IMPL(qpcnode, qpcnode_destroy);
|
2024-01-09 16:18:57 +01:00
|
|
|
#else
|
2024-03-12 01:05:07 -07:00
|
|
|
ISC_REFCOUNT_STATIC_IMPL(qpcnode, qpcnode_destroy);
|
2024-01-09 16:18:57 +01:00
|
|
|
#endif
|
2025-01-27 21:07:11 +01:00
|
|
|
|
|
|
|
#ifdef DNS_DB_NODETRACE
|
|
|
|
ISC_REFCOUNT_STATIC_TRACE_IMPL(qpcache, qpcache__destroy);
|
|
|
|
#else
|
|
|
|
ISC_REFCOUNT_STATIC_IMPL(qpcache, qpcache__destroy);
|
|
|
|
#endif
|