2
0
mirror of https://gitlab.isc.org/isc-projects/bind9 synced 2025-08-22 10:10:06 +00:00
bind/lib/dns/view.c

2337 lines
55 KiB
C
Raw Normal View History

1999-08-05 22:08:45 +00:00
/*
* Copyright (C) Internet Systems Consortium, Inc. ("ISC")
*
* SPDX-License-Identifier: MPL-2.0
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, you can obtain one at https://mozilla.org/MPL/2.0/.
*
* See the COPYRIGHT file distributed with this work for additional
* information regarding copyright ownership.
1999-08-05 22:08:45 +00:00
*/
/*! \file */
2000-06-22 22:00:42 +00:00
#include <inttypes.h>
#include <limits.h>
#include <stdbool.h>
#ifdef HAVE_LMDB
#include <lmdb.h>
#endif /* ifdef HAVE_LMDB */
#include <isc/atomic.h>
#include <isc/dir.h>
#include <isc/file.h>
#include <isc/hash.h>
#include <isc/lex.h>
#include <isc/print.h>
#include <isc/result.h>
#include <isc/stats.h>
#include <isc/string.h>
#include <isc/task.h>
1999-12-16 22:24:22 +00:00
#include <isc/util.h>
1999-08-05 22:08:45 +00:00
2000-04-06 00:19:44 +00:00
#include <dns/acl.h>
1999-10-28 19:52:10 +00:00
#include <dns/adb.h>
#include <dns/badcache.h>
#include <dns/cache.h>
1999-08-12 07:49:09 +00:00
#include <dns/db.h>
#include <dns/dispatch.h>
#include <dns/dlz.h>
#include <dns/dns64.h>
#include <dns/dnssec.h>
#include <dns/events.h>
#include <dns/forward.h>
#include <dns/keytable.h>
#include <dns/keyvalues.h>
#include <dns/master.h>
#include <dns/masterdump.h>
#include <dns/nta.h>
#include <dns/order.h>
2000-02-24 21:12:16 +00:00
#include <dns/peer.h>
#include <dns/rbt.h>
1999-09-22 19:35:47 +00:00
#include <dns/rdataset.h>
#include <dns/request.h>
#include <dns/resolver.h>
#include <dns/rpz.h>
#include <dns/rrl.h>
#include <dns/stats.h>
#include <dns/time.h>
#include <dns/transport.h>
2000-01-21 20:18:41 +00:00
#include <dns/tsig.h>
#include <dns/view.h>
2000-01-20 01:07:16 +00:00
#include <dns/zone.h>
#include <dns/zt.h>
1999-08-05 22:08:45 +00:00
#define CHECK(op) \
do { \
result = (op); \
if (result != ISC_R_SUCCESS) \
goto cleanup; \
} while (0)
2020-02-13 14:44:37 -08:00
#define DNS_VIEW_DELONLYHASH 111
#define DNS_VIEW_FAILCACHESIZE 1021
1999-08-05 22:08:45 +00:00
isc_result_t
dns_view_create(isc_mem_t *mctx, dns_rdataclass_t rdclass, const char *name,
2020-02-13 14:44:37 -08:00
dns_view_t **viewp) {
dns_view_t *view = NULL;
1999-08-05 22:08:45 +00:00
isc_result_t result;
2020-02-13 14:44:37 -08:00
char buffer[1024];
1999-08-05 22:08:45 +00:00
REQUIRE(name != NULL);
REQUIRE(viewp != NULL && *viewp == NULL);
result = isc_file_sanitize(NULL, name, "nta", buffer, sizeof(buffer));
if (result != ISC_R_SUCCESS) {
return (result);
}
2001-11-12 19:05:39 +00:00
view = isc_mem_get(mctx, sizeof(*view));
*view = (dns_view_t){
.rdclass = rdclass,
.name = isc_mem_strdup(mctx, name),
.nta_file = isc_mem_strdup(mctx, buffer),
.recursion = true,
.enablevalidation = true,
.minimalresponses = dns_minimal_no,
.transfer_format = dns_one_answer,
.msgcompression = true,
.provideixfr = true,
.maxcachettl = 7 * 24 * 3600,
.maxncachettl = 3 * 3600,
.dstport = 53,
.staleanswerttl = 1,
.staleanswersok = dns_stale_answer_conf,
.sendcookie = true,
.synthfromdnssec = true,
.trust_anchor_telemetry = true,
.root_key_sentinel = true,
};
isc_refcount_init(&view->references, 1);
isc_refcount_init(&view->weakrefs, 1);
dns_fixedname_init(&view->redirectfixed);
ISC_LIST_INIT(view->dlz_searched);
ISC_LIST_INIT(view->dlz_unsearched);
ISC_LIST_INIT(view->dns64);
ISC_LINK_INIT(view, link);
isc_mem_attach(mctx, &view->mctx);
2018-11-16 15:33:22 +01:00
isc_mutex_init(&view->lock);
view->zonetable = NULL;
2018-10-03 17:56:50 -07:00
result = dns_zt_create(mctx, rdclass, &view->zonetable);
if (result != ISC_R_SUCCESS) {
UNEXPECTED_ERROR(__FILE__, __LINE__,
"dns_zt_create() failed: %s",
isc_result_totext(result));
result = ISC_R_UNEXPECTED;
goto cleanup_mutex;
1999-08-05 22:08:45 +00:00
}
2018-10-03 17:56:50 -07:00
result = dns_fwdtable_create(mctx, &view->fwdtable);
if (result != ISC_R_SUCCESS) {
UNEXPECTED_ERROR(__FILE__, __LINE__,
"dns_fwdtable_create() failed: %s",
isc_result_totext(result));
result = ISC_R_UNEXPECTED;
goto cleanup_zt;
}
result = dns_tsigkeyring_create(view->mctx, &view->dynamickeys);
if (result != ISC_R_SUCCESS) {
goto cleanup_weakrefs;
}
result = dns_badcache_init(view->mctx, DNS_VIEW_FAILCACHESIZE,
&view->failcache);
if (result != ISC_R_SUCCESS) {
goto cleanup_dynkeys;
}
2018-11-16 15:33:22 +01:00
isc_mutex_init(&view->new_zone_lock);
2018-10-03 17:56:50 -07:00
result = dns_order_create(view->mctx, &view->order);
if (result != ISC_R_SUCCESS) {
goto cleanup_new_zone_lock;
}
result = dns_peerlist_new(view->mctx, &view->peers);
if (result != ISC_R_SUCCESS) {
goto cleanup_order;
}
result = dns_aclenv_create(view->mctx, &view->aclenv);
if (result != ISC_R_SUCCESS) {
goto cleanup_peerlist;
}
1999-08-05 22:08:45 +00:00
view->magic = DNS_VIEW_MAGIC;
*viewp = view;
return (ISC_R_SUCCESS);
cleanup_peerlist:
if (view->peers != NULL) {
dns_peerlist_detach(&view->peers);
}
cleanup_order:
if (view->order != NULL) {
dns_order_detach(&view->order);
}
cleanup_new_zone_lock:
isc_mutex_destroy(&view->new_zone_lock);
dns_badcache_destroy(&view->failcache);
cleanup_dynkeys:
if (view->dynamickeys != NULL) {
dns_tsigkeyring_detach(&view->dynamickeys);
}
2000-02-24 21:12:16 +00:00
cleanup_weakrefs:
isc_refcount_decrementz(&view->weakrefs);
isc_refcount_destroy(&view->weakrefs);
isc_refcount_decrementz(&view->references);
isc_refcount_destroy(&view->references);
if (view->fwdtable != NULL) {
dns_fwdtable_destroy(&view->fwdtable);
}
cleanup_zt:
if (view->zonetable != NULL) {
dns_zt_detach(&view->zonetable);
}
1999-10-11 19:13:17 +00:00
cleanup_mutex:
isc_mutex_destroy(&view->lock);
1999-08-05 22:08:45 +00:00
if (view->nta_file != NULL) {
isc_mem_free(mctx, view->nta_file);
}
1999-08-05 22:08:45 +00:00
isc_mem_free(mctx, view->name);
isc_mem_putanddetach(&view->mctx, view, sizeof(*view));
1999-08-05 22:08:45 +00:00
return (result);
}
static void
2020-02-13 14:44:37 -08:00
destroy(dns_view_t *view) {
dns_dns64_t *dns64;
dns_dlzdb_t *dlzdb;
1999-08-05 22:08:45 +00:00
REQUIRE(!ISC_LINK_LINKED(view, link));
isc_refcount_destroy(&view->references);
isc_refcount_destroy(&view->weakrefs);
if (view->order != NULL) {
dns_order_detach(&view->order);
}
if (view->peers != NULL) {
2000-02-24 21:12:16 +00:00
dns_peerlist_detach(&view->peers);
}
if (view->dynamickeys != NULL) {
isc_result_t result;
char template[PATH_MAX];
2020-02-13 14:44:37 -08:00
char keyfile[PATH_MAX];
FILE *fp = NULL;
result = isc_file_mktemplate(NULL, template, sizeof(template));
if (result == ISC_R_SUCCESS) {
(void)isc_file_openuniqueprivate(template, &fp);
}
if (fp == NULL) {
dns_tsigkeyring_detach(&view->dynamickeys);
} else {
result = dns_tsigkeyring_dumpanddetach(
&view->dynamickeys, fp);
if (result == ISC_R_SUCCESS) {
if (fclose(fp) == 0) {
result = isc_file_sanitize(
NULL, view->name, "tsigkeys",
keyfile, sizeof(keyfile));
if (result == ISC_R_SUCCESS) {
result = isc_file_rename(
template, keyfile);
}
}
if (result != ISC_R_SUCCESS) {
(void)remove(template);
}
} else {
(void)fclose(fp);
(void)remove(template);
}
}
}
if (view->transports != NULL) {
dns_transport_list_detach(&view->transports);
}
if (view->statickeys != NULL) {
dns_tsigkeyring_detach(&view->statickeys);
}
if (view->adb != NULL) {
1999-10-28 19:52:10 +00:00
dns_adb_detach(&view->adb);
}
if (view->resolver != NULL) {
1999-08-05 22:08:45 +00:00
dns_resolver_detach(&view->resolver);
}
dns_rrl_view_destroy(view);
if (view->rpzs != NULL) {
dns_rpz_detach_rpzs(&view->rpzs);
}
if (view->catzs != NULL) {
dns_catz_catzs_detach(&view->catzs);
}
for (dlzdb = ISC_LIST_HEAD(view->dlz_searched); dlzdb != NULL;
2020-02-13 14:44:37 -08:00
dlzdb = ISC_LIST_HEAD(view->dlz_searched))
{
ISC_LIST_UNLINK(view->dlz_searched, dlzdb, link);
dns_dlzdestroy(&dlzdb);
}
for (dlzdb = ISC_LIST_HEAD(view->dlz_unsearched); dlzdb != NULL;
2020-02-13 14:44:37 -08:00
dlzdb = ISC_LIST_HEAD(view->dlz_unsearched))
{
ISC_LIST_UNLINK(view->dlz_unsearched, dlzdb, link);
dns_dlzdestroy(&dlzdb);
}
if (view->requestmgr != NULL) {
dns_requestmgr_detach(&view->requestmgr);
}
if (view->task != NULL) {
isc_task_detach(&view->task);
}
if (view->hints != NULL) {
1999-09-24 01:40:50 +00:00
dns_db_detach(&view->hints);
}
if (view->cachedb != NULL) {
1999-08-12 07:49:09 +00:00
dns_db_detach(&view->cachedb);
}
if (view->cache != NULL) {
dns_cache_detach(&view->cache);
}
if (view->nocasecompress != NULL) {
dns_acl_detach(&view->nocasecompress);
}
if (view->matchclients != NULL) {
2000-04-06 00:19:44 +00:00
dns_acl_detach(&view->matchclients);
}
if (view->matchdestinations != NULL) {
dns_acl_detach(&view->matchdestinations);
}
if (view->cacheacl != NULL) {
dns_acl_detach(&view->cacheacl);
}
if (view->cacheonacl != NULL) {
dns_acl_detach(&view->cacheonacl);
}
if (view->queryacl != NULL) {
dns_acl_detach(&view->queryacl);
}
if (view->queryonacl != NULL) {
dns_acl_detach(&view->queryonacl);
}
if (view->recursionacl != NULL) {
dns_acl_detach(&view->recursionacl);
}
if (view->recursiononacl != NULL) {
dns_acl_detach(&view->recursiononacl);
}
if (view->sortlist != NULL) {
dns_acl_detach(&view->sortlist);
}
if (view->transferacl != NULL) {
dns_acl_detach(&view->transferacl);
}
if (view->notifyacl != NULL) {
dns_acl_detach(&view->notifyacl);
}
if (view->updateacl != NULL) {
dns_acl_detach(&view->updateacl);
}
if (view->upfwdacl != NULL) {
dns_acl_detach(&view->upfwdacl);
}
if (view->denyansweracl != NULL) {
dns_acl_detach(&view->denyansweracl);
}
if (view->pad_acl != NULL) {
dns_acl_detach(&view->pad_acl);
}
if (view->answeracl_exclude != NULL) {
dns_rbt_destroy(&view->answeracl_exclude);
}
if (view->denyanswernames != NULL) {
dns_rbt_destroy(&view->denyanswernames);
}
if (view->answernames_exclude != NULL) {
dns_rbt_destroy(&view->answernames_exclude);
}
if (view->delonly != NULL) {
dns_name_t *name;
2020-02-13 14:44:37 -08:00
int i;
for (i = 0; i < DNS_VIEW_DELONLYHASH; i++) {
name = ISC_LIST_HEAD(view->delonly[i]);
while (name != NULL) {
ISC_LIST_UNLINK(view->delonly[i], name, link);
dns_name_free(name, view->mctx);
isc_mem_put(view->mctx, name, sizeof(*name));
name = ISC_LIST_HEAD(view->delonly[i]);
}
}
isc_mem_put(view->mctx, view->delonly,
sizeof(dns_namelist_t) * DNS_VIEW_DELONLYHASH);
view->delonly = NULL;
}
if (view->rootexclude != NULL) {
dns_name_t *name;
2020-02-13 14:44:37 -08:00
int i;
for (i = 0; i < DNS_VIEW_DELONLYHASH; i++) {
name = ISC_LIST_HEAD(view->rootexclude[i]);
while (name != NULL) {
ISC_LIST_UNLINK(view->rootexclude[i], name,
link);
dns_name_free(name, view->mctx);
isc_mem_put(view->mctx, name, sizeof(*name));
name = ISC_LIST_HEAD(view->rootexclude[i]);
}
}
isc_mem_put(view->mctx, view->rootexclude,
sizeof(dns_namelist_t) * DNS_VIEW_DELONLYHASH);
view->rootexclude = NULL;
}
if (view->secroots_priv != NULL) {
dns_keytable_detach(&view->secroots_priv);
}
if (view->ntatable_priv != NULL) {
dns_ntatable_detach(&view->ntatable_priv);
}
for (dns64 = ISC_LIST_HEAD(view->dns64); dns64 != NULL;
2020-02-13 14:44:37 -08:00
dns64 = ISC_LIST_HEAD(view->dns64))
{
dns_dns64_unlink(&view->dns64, dns64);
dns_dns64_destroy(&dns64);
}
if (view->managed_keys != NULL) {
dns_zone_detach(&view->managed_keys);
}
if (view->redirect != NULL) {
dns_zone_detach(&view->redirect);
}
#ifdef HAVE_DNSTAP
if (view->dtenv != NULL) {
dns_dt_detach(&view->dtenv);
}
#endif /* HAVE_DNSTAP */
dns_view_setnewzones(view, false, NULL, NULL, 0ULL);
if (view->new_zone_file != NULL) {
isc_mem_free(view->mctx, view->new_zone_file);
view->new_zone_file = NULL;
}
if (view->new_zone_dir != NULL) {
isc_mem_free(view->mctx, view->new_zone_dir);
view->new_zone_dir = NULL;
}
#ifdef HAVE_LMDB
if (view->new_zone_dbenv != NULL) {
mdb_env_close((MDB_env *)view->new_zone_dbenv);
view->new_zone_dbenv = NULL;
}
if (view->new_zone_db != NULL) {
isc_mem_free(view->mctx, view->new_zone_db);
view->new_zone_db = NULL;
}
#endif /* HAVE_LMDB */
dns_fwdtable_destroy(&view->fwdtable);
dns_aclenv_detach(&view->aclenv);
if (view->failcache != NULL) {
dns_badcache_destroy(&view->failcache);
}
isc_mutex_destroy(&view->new_zone_lock);
isc_mutex_destroy(&view->lock);
isc_refcount_destroy(&view->references);
isc_refcount_destroy(&view->weakrefs);
isc_mem_free(view->mctx, view->nta_file);
1999-08-05 22:08:45 +00:00
isc_mem_free(view->mctx, view->name);
if (view->hooktable != NULL && view->hooktable_free != NULL) {
view->hooktable_free(view->mctx, &view->hooktable);
}
if (view->plugins != NULL && view->plugins_free != NULL) {
view->plugins_free(view->mctx, &view->plugins);
}
isc_mem_putanddetach(&view->mctx, view, sizeof(*view));
1999-08-05 22:08:45 +00:00
}
void
2020-02-13 14:44:37 -08:00
dns_view_attach(dns_view_t *source, dns_view_t **targetp) {
REQUIRE(DNS_VIEW_VALID(source));
REQUIRE(targetp != NULL && *targetp == NULL);
isc_refcount_increment(&source->references);
*targetp = source;
}
void
dns_view_detach(dns_view_t **viewp) {
dns_view_t *view = NULL;
REQUIRE(viewp != NULL && DNS_VIEW_VALID(*viewp));
1999-08-05 22:08:45 +00:00
view = *viewp;
*viewp = NULL;
1999-08-05 22:08:45 +00:00
if (isc_refcount_decrement(&view->references) == 1) {
dns_zone_t *mkzone = NULL, *rdzone = NULL;
dns_zt_t *zt = NULL;
isc_refcount_destroy(&view->references);
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
if (view->resolver != NULL) {
dns_resolver_shutdown(view->resolver);
dns_resolver_detach(&view->resolver);
}
if (view->adb != NULL) {
dns_adb_shutdown(view->adb);
dns_adb_detach(&view->adb);
}
if (view->requestmgr != NULL) {
dns_requestmgr_shutdown(view->requestmgr);
dns_requestmgr_detach(&view->requestmgr);
}
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
LOCK(&view->lock);
if (view->zonetable != NULL) {
zt = view->zonetable;
view->zonetable = NULL;
if (view->flush) {
dns_zt_flush(zt);
}
}
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
if (view->managed_keys != NULL) {
mkzone = view->managed_keys;
view->managed_keys = NULL;
if (view->flush) {
dns_zone_flush(mkzone);
}
}
if (view->redirect != NULL) {
rdzone = view->redirect;
view->redirect = NULL;
if (view->flush) {
dns_zone_flush(rdzone);
}
}
if (view->catzs != NULL) {
dns_catz_catzs_detach(&view->catzs);
}
if (view->ntatable_priv != NULL) {
dns_ntatable_shutdown(view->ntatable_priv);
}
UNLOCK(&view->lock);
/* Need to detach zt and zones outside view lock */
if (zt != NULL) {
dns_zt_detach(&zt);
}
if (mkzone != NULL) {
dns_zone_detach(&mkzone);
}
if (rdzone != NULL) {
dns_zone_detach(&rdzone);
}
1999-08-05 22:08:45 +00:00
dns_view_weakdetach(&view);
}
}
2000-11-03 07:16:09 +00:00
static isc_result_t
2020-02-13 14:44:37 -08:00
dialup(dns_zone_t *zone, void *dummy) {
UNUSED(dummy);
2000-11-03 07:16:09 +00:00
dns_zone_dialup(zone);
return (ISC_R_SUCCESS);
}
void
2020-02-13 14:44:37 -08:00
dns_view_dialup(dns_view_t *view) {
2000-11-03 07:16:09 +00:00
REQUIRE(DNS_VIEW_VALID(view));
REQUIRE(view->zonetable != NULL);
(void)dns_zt_apply(view->zonetable, false, NULL, dialup, NULL);
2000-11-03 07:16:09 +00:00
}
void
2020-02-13 14:44:37 -08:00
dns_view_weakattach(dns_view_t *source, dns_view_t **targetp) {
REQUIRE(DNS_VIEW_VALID(source));
REQUIRE(targetp != NULL && *targetp == NULL);
isc_refcount_increment(&source->weakrefs);
*targetp = source;
}
void
2020-02-13 14:44:37 -08:00
dns_view_weakdetach(dns_view_t **viewp) {
dns_view_t *view = NULL;
REQUIRE(viewp != NULL);
view = *viewp;
*viewp = NULL;
REQUIRE(DNS_VIEW_VALID(view));
if (isc_refcount_decrement(&view->weakrefs) == 1) {
destroy(view);
}
}
isc_result_t
2020-02-13 14:44:37 -08:00
dns_view_createzonetable(dns_view_t *view) {
REQUIRE(DNS_VIEW_VALID(view));
REQUIRE(!view->frozen);
REQUIRE(view->zonetable == NULL);
return (dns_zt_create(view->mctx, view->rdclass, &view->zonetable));
}
1999-09-24 01:40:50 +00:00
isc_result_t
dns_view_createresolver(dns_view_t *view, isc_taskmgr_t *taskmgr,
unsigned int ndisp, isc_nm_t *nm,
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
isc_timermgr_t *timermgr, unsigned int options,
dns_dispatchmgr_t *dispatchmgr,
2020-02-13 14:44:37 -08:00
dns_dispatch_t *dispatchv4,
dns_dispatch_t *dispatchv6) {
1999-10-28 19:52:10 +00:00
isc_result_t result;
2020-02-13 14:44:37 -08:00
isc_mem_t *mctx = NULL;
1999-10-28 19:52:10 +00:00
REQUIRE(DNS_VIEW_VALID(view));
1999-09-22 19:35:47 +00:00
REQUIRE(!view->frozen);
REQUIRE(view->resolver == NULL);
result = isc_task_create(taskmgr, 0, &view->task, 0);
if (result != ISC_R_SUCCESS) {
return (result);
}
2000-01-25 19:31:23 +00:00
isc_task_setname(view->task, "view", view);
result = dns_resolver_create(view, taskmgr, ndisp, nm, timermgr,
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
options, dispatchmgr, dispatchv4,
dispatchv6, &view->resolver);
if (result != ISC_R_SUCCESS) {
isc_task_detach(&view->task);
1999-10-28 19:52:10 +00:00
return (result);
}
isc_mem_create(&mctx);
isc_mem_setname(mctx, "ADB");
Refactor ADB reference counting, shutdown and locking The ADB previously used separate reference counters for internal and external references, plus additional counters for ABD find and namehook objects, and used all these counters to coordinate its shutdown process, which was a multi-stage affair involving a sequence of control events. It also used a complex interlocking set of static functions for referencing, deferencing, linking, unlinking, and cleaning up various internal objects; these functions returned boolean values to their callers to indicate what additional processing was needed. The changes in the previous two commits destabilized this fragile system in a way that was difficult to recover from, so in this commit we refactor all of it. The dns_adb and dns_adbentry objects now use conventional attach and detach functions for reference counting, and the shutdown process is much more straightforward. Instead of handling shutdown asynchronously, we can just destroy the ADB when references reach zero In addition, ADB locking has been simplified. Instead of a single `find_{name,entry}_and_lock()` function which searches for a name or entry's hash bucket, locks it, and then searches for the name or entry in the bucket, we now use one function to find the bucket (leaving it to the caller to do the locking) and another find the name or entry. Instead of locking the entire ADB when modifying hash tables, we now use read-write locks around the specific hash table. The only remaining need for adb->lock is when modifying the `whenshutdown` list. Comments throughout the module have been improved.
2022-03-21 12:48:52 -07:00
result = dns_adb_create(mctx, view, taskmgr, &view->adb);
isc_mem_detach(&mctx);
if (result != ISC_R_SUCCESS) {
goto cleanup_resolver;
}
1999-10-28 19:52:10 +00:00
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
result = dns_requestmgr_create(
view->mctx, dns_resolver_taskmgr(view->resolver),
dns_resolver_dispatchmgr(view->resolver), dispatchv4,
dispatchv6, &view->requestmgr);
if (result != ISC_R_SUCCESS) {
goto cleanup_adb;
}
return (ISC_R_SUCCESS);
cleanup_adb:
dns_adb_shutdown(view->adb);
dns_adb_detach(&view->adb);
cleanup_resolver:
dns_resolver_shutdown(view->resolver);
dns_resolver_detach(&view->resolver);
return (result);
}
void
2020-02-13 14:44:37 -08:00
dns_view_setcache(dns_view_t *view, dns_cache_t *cache, bool shared) {
REQUIRE(DNS_VIEW_VALID(view));
1999-09-22 19:35:47 +00:00
REQUIRE(!view->frozen);
view->cacheshared = shared;
if (view->cache != NULL) {
1999-10-28 19:52:10 +00:00
dns_db_detach(&view->cachedb);
dns_cache_detach(&view->cache);
}
dns_cache_attach(cache, &view->cache);
dns_cache_attachdb(cache, &view->cachedb);
INSIST(DNS_DB_VALID(view->cachedb));
}
bool
2020-02-13 14:44:37 -08:00
dns_view_iscacheshared(dns_view_t *view) {
REQUIRE(DNS_VIEW_VALID(view));
return (view->cacheshared);
}
1999-09-24 01:40:50 +00:00
void
2020-02-13 14:44:37 -08:00
dns_view_sethints(dns_view_t *view, dns_db_t *hints) {
1999-09-24 01:40:50 +00:00
REQUIRE(DNS_VIEW_VALID(view));
REQUIRE(!view->frozen);
REQUIRE(view->hints == NULL);
REQUIRE(dns_db_iszone(hints));
dns_db_attach(hints, &view->hints);
}
void
dns_view_settransports(dns_view_t *view, dns_transport_list_t *list) {
REQUIRE(DNS_VIEW_VALID(view));
REQUIRE(list != NULL);
if (view->transports != NULL) {
dns_transport_list_detach(&view->transports);
}
dns_transport_list_attach(list, &view->transports);
}
2000-01-21 20:18:41 +00:00
void
2020-02-13 14:44:37 -08:00
dns_view_setkeyring(dns_view_t *view, dns_tsig_keyring_t *ring) {
2000-01-21 20:18:41 +00:00
REQUIRE(DNS_VIEW_VALID(view));
REQUIRE(ring != NULL);
if (view->statickeys != NULL) {
dns_tsigkeyring_detach(&view->statickeys);
}
dns_tsigkeyring_attach(ring, &view->statickeys);
}
void
2020-02-13 14:44:37 -08:00
dns_view_setdynamickeyring(dns_view_t *view, dns_tsig_keyring_t *ring) {
REQUIRE(DNS_VIEW_VALID(view));
REQUIRE(ring != NULL);
if (view->dynamickeys != NULL) {
dns_tsigkeyring_detach(&view->dynamickeys);
}
dns_tsigkeyring_attach(ring, &view->dynamickeys);
}
void
2020-02-13 14:44:37 -08:00
dns_view_getdynamickeyring(dns_view_t *view, dns_tsig_keyring_t **ringp) {
REQUIRE(DNS_VIEW_VALID(view));
REQUIRE(ringp != NULL && *ringp == NULL);
if (view->dynamickeys != NULL) {
dns_tsigkeyring_attach(view->dynamickeys, ringp);
}
}
void
2020-02-13 14:44:37 -08:00
dns_view_restorekeyring(dns_view_t *view) {
FILE *fp;
char keyfile[PATH_MAX];
isc_result_t result;
REQUIRE(DNS_VIEW_VALID(view));
if (view->dynamickeys != NULL) {
result = isc_file_sanitize(NULL, view->name, "tsigkeys",
keyfile, sizeof(keyfile));
if (result == ISC_R_SUCCESS) {
fp = fopen(keyfile, "r");
if (fp != NULL) {
dns_keyring_restore(view->dynamickeys, fp);
(void)fclose(fp);
}
}
}
2000-01-21 20:18:41 +00:00
}
void
2020-02-13 14:44:37 -08:00
dns_view_setdstport(dns_view_t *view, in_port_t dstport) {
REQUIRE(DNS_VIEW_VALID(view));
view->dstport = dstport;
}
void
2020-02-13 14:44:37 -08:00
dns_view_freeze(dns_view_t *view) {
REQUIRE(DNS_VIEW_VALID(view));
REQUIRE(!view->frozen);
if (view->resolver != NULL) {
INSIST(view->cachedb != NULL);
dns_resolver_freeze(view->resolver);
}
view->frozen = true;
}
void
2020-02-13 14:44:37 -08:00
dns_view_thaw(dns_view_t *view) {
REQUIRE(DNS_VIEW_VALID(view));
REQUIRE(view->frozen);
view->frozen = false;
}
isc_result_t
2020-02-13 14:44:37 -08:00
dns_view_addzone(dns_view_t *view, dns_zone_t *zone) {
1999-09-22 19:35:47 +00:00
isc_result_t result;
REQUIRE(DNS_VIEW_VALID(view));
1999-09-22 19:35:47 +00:00
REQUIRE(!view->frozen);
REQUIRE(view->zonetable != NULL);
1999-09-22 19:35:47 +00:00
result = dns_zt_mount(view->zonetable, zone);
1999-09-22 19:35:47 +00:00
return (result);
}
isc_result_t
2020-02-13 14:44:37 -08:00
dns_view_findzone(dns_view_t *view, const dns_name_t *name,
dns_zone_t **zonep) {
isc_result_t result;
REQUIRE(DNS_VIEW_VALID(view));
LOCK(&view->lock);
if (view->zonetable != NULL) {
result = dns_zt_find(view->zonetable, name, 0, NULL, zonep);
if (result == DNS_R_PARTIALMATCH) {
dns_zone_detach(zonep);
result = ISC_R_NOTFOUND;
}
} else {
result = ISC_R_NOTFOUND;
}
UNLOCK(&view->lock);
1999-10-15 01:43:39 +00:00
return (result);
}
1999-09-22 19:35:47 +00:00
isc_result_t
dns_view_find(dns_view_t *view, const dns_name_t *name, dns_rdatatype_t type,
isc_stdtime_t now, unsigned int options, bool use_hints,
bool use_static_stub, dns_db_t **dbp, dns_dbnode_t **nodep,
dns_name_t *foundname, dns_rdataset_t *rdataset,
2020-02-13 14:44:37 -08:00
dns_rdataset_t *sigrdataset) {
isc_result_t result;
dns_db_t *db, *zdb;
dns_dbnode_t *node, *znode;
bool is_cache, is_staticstub_zone;
1999-09-22 19:35:47 +00:00
dns_rdataset_t zrdataset, zsigrdataset;
2020-02-13 14:44:37 -08:00
dns_zone_t *zone;
1999-09-22 19:35:47 +00:00
/*
* Find an rdataset whose owner name is 'name', and whose type is
* 'type'.
*/
REQUIRE(DNS_VIEW_VALID(view));
1999-09-22 19:35:47 +00:00
REQUIRE(view->frozen);
REQUIRE(type != dns_rdatatype_rrsig);
REQUIRE(rdataset != NULL); /* XXXBEW - remove this */
REQUIRE(nodep == NULL || *nodep == NULL);
1999-09-22 19:35:47 +00:00
/*
* Initialize.
*/
dns_rdataset_init(&zrdataset);
dns_rdataset_init(&zsigrdataset);
zdb = NULL;
znode = NULL;
1999-09-22 19:35:47 +00:00
/*
* Find a database to answer the query.
*/
db = NULL;
node = NULL;
is_staticstub_zone = false;
zone = NULL;
LOCK(&view->lock);
if (view->zonetable != NULL) {
result = dns_zt_find(view->zonetable, name, DNS_ZTFIND_MIRROR,
NULL, &zone);
} else {
result = ISC_R_NOTFOUND;
}
UNLOCK(&view->lock);
if (zone != NULL && dns_zone_gettype(zone) == dns_zone_staticstub &&
2020-02-13 14:44:37 -08:00
!use_static_stub)
{
result = ISC_R_NOTFOUND;
}
if (result == ISC_R_SUCCESS || result == DNS_R_PARTIALMATCH) {
result = dns_zone_getdb(zone, &db);
if (result != ISC_R_SUCCESS && view->cachedb != NULL) {
dns_db_attach(view->cachedb, &db);
} else if (result != ISC_R_SUCCESS) {
goto cleanup;
}
if (dns_zone_gettype(zone) == dns_zone_staticstub &&
2020-02-13 14:44:37 -08:00
dns_name_equal(name, dns_zone_getorigin(zone)))
{
is_staticstub_zone = true;
}
} else if (result == ISC_R_NOTFOUND && view->cachedb != NULL) {
1999-09-22 19:35:47 +00:00
dns_db_attach(view->cachedb, &db);
} else {
1999-09-22 19:35:47 +00:00
goto cleanup;
}
1999-09-22 19:35:47 +00:00
is_cache = dns_db_iscache(db);
db_find:
1999-09-22 19:35:47 +00:00
/*
* Now look for an answer in the database.
*/
result = dns_db_find(db, name, NULL, type, options, now, &node,
foundname, rdataset, sigrdataset);
1999-09-22 19:35:47 +00:00
if (result == DNS_R_DELEGATION || result == ISC_R_NOTFOUND) {
if (dns_rdataset_isassociated(rdataset)) {
1999-09-22 19:35:47 +00:00
dns_rdataset_disassociate(rdataset);
}
if (sigrdataset != NULL &&
dns_rdataset_isassociated(sigrdataset)) {
1999-09-22 19:35:47 +00:00
dns_rdataset_disassociate(sigrdataset);
}
if (node != NULL) {
dns_db_detachnode(db, &node);
}
if (!is_cache) {
dns_db_detach(&db);
if (view->cachedb != NULL && !is_staticstub_zone) {
1999-09-22 19:35:47 +00:00
/*
* Either the answer is in the cache, or we
* don't know it.
* Note that if the result comes from a
* static-stub zone we stop the search here
* (see the function description in view.h).
1999-09-22 19:35:47 +00:00
*/
is_cache = true;
1999-09-22 19:35:47 +00:00
dns_db_attach(view->cachedb, &db);
goto db_find;
}
} else {
/*
* We don't have the data in the cache. If we've got
* glue from the zone, use it.
*/
if (dns_rdataset_isassociated(&zrdataset)) {
1999-09-22 19:35:47 +00:00
dns_rdataset_clone(&zrdataset, rdataset);
1999-10-27 00:43:56 +00:00
if (sigrdataset != NULL &&
dns_rdataset_isassociated(&zsigrdataset)) {
1999-09-22 19:35:47 +00:00
dns_rdataset_clone(&zsigrdataset,
sigrdataset);
}
1999-09-22 19:35:47 +00:00
result = DNS_R_GLUE;
if (db != NULL) {
dns_db_detach(&db);
}
dns_db_attach(zdb, &db);
dns_db_attachnode(db, znode, &node);
1999-09-22 19:35:47 +00:00
goto cleanup;
}
}
/*
* We don't know the answer.
*/
result = ISC_R_NOTFOUND;
1999-09-22 19:35:47 +00:00
} else if (result == DNS_R_GLUE) {
if (view->cachedb != NULL && !is_staticstub_zone) {
1999-09-22 19:35:47 +00:00
/*
* We found an answer, but the cache may be better.
* Remember what we've got and go look in the cache.
*/
is_cache = true;
1999-09-22 19:35:47 +00:00
dns_rdataset_clone(rdataset, &zrdataset);
dns_rdataset_disassociate(rdataset);
1999-10-07 19:41:16 +00:00
if (sigrdataset != NULL &&
dns_rdataset_isassociated(sigrdataset)) {
1999-09-22 19:35:47 +00:00
dns_rdataset_clone(sigrdataset, &zsigrdataset);
dns_rdataset_disassociate(sigrdataset);
}
dns_db_attach(db, &zdb);
dns_db_attachnode(zdb, node, &znode);
dns_db_detachnode(db, &node);
1999-09-22 19:35:47 +00:00
dns_db_detach(&db);
dns_db_attach(view->cachedb, &db);
goto db_find;
}
/*
* Otherwise, the glue is the best answer.
*/
result = ISC_R_SUCCESS;
1999-09-24 01:40:50 +00:00
}
if (result == ISC_R_NOTFOUND && use_hints && view->hints != NULL) {
if (dns_rdataset_isassociated(rdataset)) {
1999-09-24 01:40:50 +00:00
dns_rdataset_disassociate(rdataset);
}
if (sigrdataset != NULL &&
dns_rdataset_isassociated(sigrdataset)) {
1999-09-24 01:40:50 +00:00
dns_rdataset_disassociate(sigrdataset);
}
if (db != NULL) {
if (node != NULL) {
dns_db_detachnode(db, &node);
}
dns_db_detach(&db);
}
1999-09-24 01:40:50 +00:00
result = dns_db_find(view->hints, name, NULL, type, options,
now, &node, foundname, rdataset,
sigrdataset);
2000-01-27 02:55:47 +00:00
if (result == ISC_R_SUCCESS || result == DNS_R_GLUE) {
/*
* We just used a hint. Let the resolver know it
* should consider priming.
*/
dns_resolver_prime(view->resolver);
dns_db_attach(view->hints, &db);
1999-09-24 01:40:50 +00:00
result = DNS_R_HINT;
} else if (result == DNS_R_NXRRSET) {
dns_db_attach(view->hints, &db);
result = DNS_R_HINTNXRRSET;
} else if (result == DNS_R_NXDOMAIN) {
result = ISC_R_NOTFOUND;
}
/*
* Cleanup if non-standard hints are used.
*/
if (db == NULL && node != NULL) {
dns_db_detachnode(view->hints, &node);
}
1999-09-24 01:40:50 +00:00
}
1999-09-22 19:35:47 +00:00
cleanup:
if (dns_rdataset_isassociated(&zrdataset)) {
1999-09-22 19:35:47 +00:00
dns_rdataset_disassociate(&zrdataset);
if (dns_rdataset_isassociated(&zsigrdataset)) {
1999-09-22 19:35:47 +00:00
dns_rdataset_disassociate(&zsigrdataset);
}
1999-09-22 19:35:47 +00:00
}
if (zdb != NULL) {
if (znode != NULL) {
dns_db_detachnode(zdb, &znode);
}
dns_db_detach(&zdb);
}
if (db != NULL) {
if (node != NULL) {
if (nodep != NULL) {
*nodep = node;
} else {
dns_db_detachnode(db, &node);
}
}
if (dbp != NULL) {
*dbp = db;
} else {
dns_db_detach(&db);
}
} else {
INSIST(node == NULL);
}
if (zone != NULL) {
dns_zone_detach(&zone);
}
1999-09-22 19:35:47 +00:00
return (result);
}
isc_result_t
dns_view_simplefind(dns_view_t *view, const dns_name_t *name,
dns_rdatatype_t type, isc_stdtime_t now,
unsigned int options, bool use_hints,
2020-02-13 14:44:37 -08:00
dns_rdataset_t *rdataset, dns_rdataset_t *sigrdataset) {
isc_result_t result;
dns_fixedname_t foundname;
dns_fixedname_init(&foundname);
result = dns_view_find(view, name, type, now, options, use_hints, false,
NULL, NULL, dns_fixedname_name(&foundname),
rdataset, sigrdataset);
if (result == DNS_R_NXDOMAIN) {
/*
* The rdataset and sigrdataset of the relevant NSEC record
* may be returned, but the caller cannot use them because
* foundname is not returned by this simplified API. We
* disassociate them here to prevent any misuse by the caller.
*/
if (dns_rdataset_isassociated(rdataset)) {
dns_rdataset_disassociate(rdataset);
}
if (sigrdataset != NULL &&
dns_rdataset_isassociated(sigrdataset)) {
dns_rdataset_disassociate(sigrdataset);
}
} else if (result != ISC_R_SUCCESS && result != DNS_R_GLUE &&
result != DNS_R_HINT && result != DNS_R_NCACHENXDOMAIN &&
result != DNS_R_NCACHENXRRSET && result != DNS_R_NXRRSET &&
2020-02-13 14:44:37 -08:00
result != DNS_R_HINTNXRRSET && result != ISC_R_NOTFOUND)
{
if (dns_rdataset_isassociated(rdataset)) {
dns_rdataset_disassociate(rdataset);
}
if (sigrdataset != NULL &&
dns_rdataset_isassociated(sigrdataset)) {
dns_rdataset_disassociate(sigrdataset);
}
result = ISC_R_NOTFOUND;
}
return (result);
}
1999-10-27 00:43:56 +00:00
isc_result_t
dns_view_findzonecut(dns_view_t *view, const dns_name_t *name,
dns_name_t *fname, dns_name_t *dcname, isc_stdtime_t now,
unsigned int options, bool use_hints, bool use_cache,
2020-02-13 14:44:37 -08:00
dns_rdataset_t *rdataset, dns_rdataset_t *sigrdataset) {
isc_result_t result;
dns_db_t *db;
bool is_cache, use_zone, try_hints;
dns_zone_t *zone;
dns_name_t *zfname;
dns_rdataset_t zrdataset, zsigrdataset;
1999-10-27 00:43:56 +00:00
dns_fixedname_t zfixedname;
2020-02-13 14:44:37 -08:00
unsigned int ztoptions = DNS_ZTFIND_MIRROR;
1999-10-27 00:43:56 +00:00
REQUIRE(DNS_VIEW_VALID(view));
REQUIRE(view->frozen);
db = NULL;
use_zone = false;
try_hints = false;
1999-10-27 00:43:56 +00:00
zfname = NULL;
/*
* Initialize.
*/
dns_fixedname_init(&zfixedname);
dns_rdataset_init(&zrdataset);
dns_rdataset_init(&zsigrdataset);
/*
* Find the right database.
*/
zone = NULL;
LOCK(&view->lock);
if (view->zonetable != NULL) {
if ((options & DNS_DBFIND_NOEXACT) != 0) {
ztoptions |= DNS_ZTFIND_NOEXACT;
}
result = dns_zt_find(view->zonetable, name, ztoptions, NULL,
&zone);
} else {
result = ISC_R_NOTFOUND;
}
UNLOCK(&view->lock);
if (result == ISC_R_SUCCESS || result == DNS_R_PARTIALMATCH) {
1999-10-27 00:43:56 +00:00
result = dns_zone_getdb(zone, &db);
}
1999-10-27 00:43:56 +00:00
if (result == ISC_R_NOTFOUND) {
/*
* We're not directly authoritative for this query name, nor
* is it a subdomain of any zone for which we're
* authoritative.
*/
if (use_cache && view->cachedb != NULL) {
1999-10-27 00:43:56 +00:00
/*
* We have a cache; try it.
*/
dns_db_attach(view->cachedb, &db);
} else if (use_hints && view->hints != NULL) {
1999-10-27 00:43:56 +00:00
/*
* Maybe we have hints...
*/
try_hints = true;
1999-10-27 00:43:56 +00:00
goto finish;
} else {
result = DNS_R_NXDOMAIN;
goto cleanup;
1999-10-27 00:43:56 +00:00
}
} else if (result != ISC_R_SUCCESS) {
/*
* Something is broken.
*/
goto cleanup;
}
is_cache = dns_db_iscache(db);
1999-10-27 00:43:56 +00:00
db_find:
1999-10-27 00:43:56 +00:00
/*
* Look for the zonecut.
*/
if (!is_cache) {
1999-10-27 00:43:56 +00:00
result = dns_db_find(db, name, NULL, dns_rdatatype_ns, options,
now, NULL, fname, rdataset, sigrdataset);
if (result == DNS_R_DELEGATION) {
1999-10-27 00:43:56 +00:00
result = ISC_R_SUCCESS;
} else if (result != ISC_R_SUCCESS) {
1999-10-27 00:43:56 +00:00
goto cleanup;
}
if (use_cache && view->cachedb != NULL && db != view->hints) {
1999-10-27 00:43:56 +00:00
/*
* We found an answer, but the cache may be better.
*/
zfname = dns_fixedname_name(&zfixedname);
dns_name_copy(fname, zfname);
1999-10-27 00:43:56 +00:00
dns_rdataset_clone(rdataset, &zrdataset);
dns_rdataset_disassociate(rdataset);
if (sigrdataset != NULL &&
dns_rdataset_isassociated(sigrdataset)) {
1999-10-27 00:43:56 +00:00
dns_rdataset_clone(sigrdataset, &zsigrdataset);
dns_rdataset_disassociate(sigrdataset);
}
dns_db_detach(&db);
dns_db_attach(view->cachedb, &db);
is_cache = true;
1999-10-27 00:43:56 +00:00
goto db_find;
}
} else {
result = dns_db_findzonecut(db, name, options, now, NULL, fname,
dcname, rdataset, sigrdataset);
1999-10-27 00:43:56 +00:00
if (result == ISC_R_SUCCESS) {
if (zfname != NULL &&
(!dns_name_issubdomain(fname, zfname) ||
(dns_zone_gettype(zone) == dns_zone_staticstub &&
2020-02-13 14:44:37 -08:00
dns_name_equal(fname, zfname))))
{
1999-10-27 00:43:56 +00:00
/*
* We found a zonecut in the cache, but our
* zone delegation is better.
*/
use_zone = true;
1999-10-27 00:43:56 +00:00
}
} else if (result == ISC_R_NOTFOUND) {
if (zfname != NULL) {
/*
* We didn't find anything in the cache, but we
* have a zone delegation, so use it.
*/
use_zone = true;
result = ISC_R_SUCCESS;
} else if (use_hints && view->hints != NULL) {
1999-10-27 00:43:56 +00:00
/*
* Maybe we have hints...
*/
try_hints = true;
result = ISC_R_SUCCESS;
} else {
result = DNS_R_NXDOMAIN;
1999-10-27 00:43:56 +00:00
}
} else {
/*
* Something bad happened.
*/
goto cleanup;
}
}
finish:
1999-10-27 00:43:56 +00:00
if (use_zone) {
if (dns_rdataset_isassociated(rdataset)) {
1999-10-27 00:43:56 +00:00
dns_rdataset_disassociate(rdataset);
if (sigrdataset != NULL &&
dns_rdataset_isassociated(sigrdataset)) {
1999-10-27 00:43:56 +00:00
dns_rdataset_disassociate(sigrdataset);
}
1999-10-27 00:43:56 +00:00
}
dns_name_copy(zfname, fname);
if (dcname != NULL) {
dns_name_copy(zfname, dcname);
}
1999-10-27 00:43:56 +00:00
dns_rdataset_clone(&zrdataset, rdataset);
if (sigrdataset != NULL &&
dns_rdataset_isassociated(&zrdataset)) {
1999-10-27 00:43:56 +00:00
dns_rdataset_clone(&zsigrdataset, sigrdataset);
}
} else if (try_hints) {
1999-10-27 00:43:56 +00:00
/*
* We've found nothing so far, but we have hints.
*/
result = dns_db_find(view->hints, dns_rootname, NULL,
dns_rdatatype_ns, 0, now, NULL, fname,
rdataset, NULL);
if (result != ISC_R_SUCCESS) {
/*
* We can't even find the hints for the root
* nameservers!
*/
if (dns_rdataset_isassociated(rdataset)) {
dns_rdataset_disassociate(rdataset);
}
1999-10-27 00:43:56 +00:00
result = ISC_R_NOTFOUND;
} else if (dcname != NULL) {
dns_name_copy(fname, dcname);
1999-10-27 00:43:56 +00:00
}
}
cleanup:
if (dns_rdataset_isassociated(&zrdataset)) {
1999-10-27 00:43:56 +00:00
dns_rdataset_disassociate(&zrdataset);
if (dns_rdataset_isassociated(&zsigrdataset)) {
1999-10-27 00:43:56 +00:00
dns_rdataset_disassociate(&zsigrdataset);
}
1999-10-27 00:43:56 +00:00
}
if (db != NULL) {
1999-10-27 00:43:56 +00:00
dns_db_detach(&db);
}
if (zone != NULL) {
1999-10-27 00:43:56 +00:00
dns_zone_detach(&zone);
}
1999-10-27 00:43:56 +00:00
return (result);
}
1999-10-15 01:43:39 +00:00
isc_result_t
dns_viewlist_find(dns_viewlist_t *list, const char *name,
2020-02-13 14:44:37 -08:00
dns_rdataclass_t rdclass, dns_view_t **viewp) {
dns_view_t *view;
REQUIRE(list != NULL);
for (view = ISC_LIST_HEAD(*list); view != NULL;
2020-02-13 14:44:37 -08:00
view = ISC_LIST_NEXT(view, link))
{
if (strcmp(view->name, name) == 0 && view->rdclass == rdclass) {
break;
}
}
if (view == NULL) {
1999-10-15 01:43:39 +00:00
return (ISC_R_NOTFOUND);
}
1999-10-15 01:43:39 +00:00
dns_view_attach(view, viewp);
return (ISC_R_SUCCESS);
}
1999-10-25 13:38:00 +00:00
isc_result_t
dns_viewlist_findzone(dns_viewlist_t *list, const dns_name_t *name,
bool allclasses, dns_rdataclass_t rdclass,
2020-02-13 14:44:37 -08:00
dns_zone_t **zonep) {
dns_view_t *view;
isc_result_t result;
2020-02-13 14:44:37 -08:00
dns_zone_t *zone1 = NULL, *zone2 = NULL;
dns_zone_t **zp = NULL;
REQUIRE(list != NULL);
REQUIRE(zonep != NULL && *zonep == NULL);
for (view = ISC_LIST_HEAD(*list); view != NULL;
2020-02-13 14:44:37 -08:00
view = ISC_LIST_NEXT(view, link))
{
if (!allclasses && view->rdclass != rdclass) {
continue;
}
/*
2008-05-13 23:47:01 +00:00
* If the zone is defined in more than one view,
* treat it as not found.
*/
zp = (zone1 == NULL) ? &zone1 : &zone2;
LOCK(&view->lock);
if (view->zonetable != NULL) {
2020-02-13 14:44:37 -08:00
result = dns_zt_find(view->zonetable, name, 0, NULL,
zp);
} else {
result = ISC_R_NOTFOUND;
}
UNLOCK(&view->lock);
INSIST(result == ISC_R_SUCCESS || result == ISC_R_NOTFOUND ||
result == DNS_R_PARTIALMATCH);
/* Treat a partial match as no match */
if (result == DNS_R_PARTIALMATCH) {
dns_zone_detach(zp);
result = ISC_R_NOTFOUND;
POST(result);
}
if (zone2 != NULL) {
dns_zone_detach(&zone1);
dns_zone_detach(&zone2);
return (ISC_R_MULTIPLE);
}
}
if (zone1 != NULL) {
dns_zone_attach(zone1, zonep);
dns_zone_detach(&zone1);
return (ISC_R_SUCCESS);
}
return (ISC_R_NOTFOUND);
}
isc_result_t
2020-02-13 14:44:37 -08:00
dns_view_load(dns_view_t *view, bool stop, bool newonly) {
1999-10-25 13:38:00 +00:00
REQUIRE(DNS_VIEW_VALID(view));
REQUIRE(view->zonetable != NULL);
1999-10-25 13:38:00 +00:00
return (dns_zt_load(view->zonetable, stop, newonly));
1999-10-25 13:38:00 +00:00
}
2000-01-21 20:18:41 +00:00
2001-05-07 23:34:24 +00:00
isc_result_t
dns_view_asyncload(dns_view_t *view, bool newonly, dns_zt_allloaded_t callback,
2020-02-13 14:44:37 -08:00
void *arg) {
REQUIRE(DNS_VIEW_VALID(view));
REQUIRE(view->zonetable != NULL);
return (dns_zt_asyncload(view->zonetable, newonly, callback, arg));
}
isc_result_t
dns_view_gettsig(dns_view_t *view, const dns_name_t *keyname,
2020-02-13 14:44:37 -08:00
dns_tsigkey_t **keyp) {
isc_result_t result;
REQUIRE(keyp != NULL && *keyp == NULL);
result = dns_tsigkey_find(keyp, keyname, NULL, view->statickeys);
if (result == ISC_R_NOTFOUND) {
result = dns_tsigkey_find(keyp, keyname, NULL,
view->dynamickeys);
}
return (result);
}
isc_result_t
dns_view_gettransport(dns_view_t *view, const dns_transport_type_t type,
const dns_name_t *name, dns_transport_t **transportp) {
REQUIRE(DNS_VIEW_VALID(view));
REQUIRE(transportp != NULL && *transportp == NULL);
dns_transport_t *transport = dns_transport_find(type, name,
view->transports);
if (transport == NULL) {
return (ISC_R_NOTFOUND);
}
*transportp = transport;
return (ISC_R_SUCCESS);
}
isc_result_t
dns_view_getpeertsig(dns_view_t *view, const isc_netaddr_t *peeraddr,
2020-02-13 14:44:37 -08:00
dns_tsigkey_t **keyp) {
isc_result_t result;
2020-02-13 14:44:37 -08:00
dns_name_t *keyname = NULL;
dns_peer_t *peer = NULL;
result = dns_peerlist_peerbyaddr(view->peers, peeraddr, &peer);
if (result != ISC_R_SUCCESS) {
return (result);
}
result = dns_peer_getkey(peer, &keyname);
if (result != ISC_R_SUCCESS) {
return (result);
}
result = dns_view_gettsig(view, keyname, keyp);
return ((result == ISC_R_NOTFOUND) ? ISC_R_FAILURE : result);
}
2000-01-21 20:18:41 +00:00
isc_result_t
2020-02-13 14:44:37 -08:00
dns_view_checksig(dns_view_t *view, isc_buffer_t *source, dns_message_t *msg) {
2000-01-21 20:18:41 +00:00
REQUIRE(DNS_VIEW_VALID(view));
REQUIRE(source != NULL);
return (dns_tsig_verify(source, msg, view->statickeys,
view->dynamickeys));
2000-01-21 20:18:41 +00:00
}
isc_result_t
2020-02-13 14:44:37 -08:00
dns_view_dumpdbtostream(dns_view_t *view, FILE *fp) {
isc_result_t result;
REQUIRE(DNS_VIEW_VALID(view));
(void)fprintf(fp, ";\n; Cache dump of view '%s'\n;\n", view->name);
result = dns_master_dumptostream(view->mctx, view->cachedb, NULL,
&dns_master_style_cache,
dns_masterformat_text, NULL, fp);
if (result != ISC_R_SUCCESS) {
return (result);
}
dns_adb_dump(view->adb, fp);
dns_resolver_printbadcache(view->resolver, fp);
dns_badcache_print(view->failcache, "SERVFAIL cache", fp);
return (ISC_R_SUCCESS);
}
isc_result_t
2020-02-13 14:44:37 -08:00
dns_view_flushcache(dns_view_t *view, bool fixuponly) {
isc_result_t result;
REQUIRE(DNS_VIEW_VALID(view));
if (view->cachedb == NULL) {
return (ISC_R_SUCCESS);
}
if (!fixuponly) {
result = dns_cache_flush(view->cache);
if (result != ISC_R_SUCCESS) {
return (result);
}
}
dns_db_detach(&view->cachedb);
dns_cache_attachdb(view->cache, &view->cachedb);
if (view->resolver != NULL) {
dns_resolver_flushbadcache(view->resolver, NULL);
}
if (view->failcache != NULL) {
dns_badcache_flush(view->failcache);
}
dns_adb_flush(view->adb);
return (ISC_R_SUCCESS);
}
isc_result_t
2020-02-13 14:44:37 -08:00
dns_view_flushname(dns_view_t *view, const dns_name_t *name) {
return (dns_view_flushnode(view, name, false));
}
isc_result_t
2020-02-13 14:44:37 -08:00
dns_view_flushnode(dns_view_t *view, const dns_name_t *name, bool tree) {
isc_result_t result = ISC_R_SUCCESS;
REQUIRE(DNS_VIEW_VALID(view));
if (tree) {
if (view->adb != NULL) {
dns_adb_flushnames(view->adb, name);
}
if (view->resolver != NULL) {
dns_resolver_flushbadnames(view->resolver, name);
}
if (view->failcache != NULL) {
dns_badcache_flushtree(view->failcache, name);
}
} else {
if (view->adb != NULL) {
dns_adb_flushname(view->adb, name);
}
if (view->resolver != NULL) {
dns_resolver_flushbadcache(view->resolver, name);
}
if (view->failcache != NULL) {
dns_badcache_flushname(view->failcache, name);
}
}
if (view->cache != NULL) {
result = dns_cache_flushnode(view->cache, name, tree);
}
return (result);
}
void
2020-02-13 14:44:37 -08:00
dns_view_adddelegationonly(dns_view_t *view, const dns_name_t *name) {
dns_name_t *item;
Fix the rbt hashtable and grow it when setting max-cache-size There were several problems with rbt hashtable implementation: 1. Our internal hashing function returns uint64_t value, but it was silently truncated to unsigned int in dns_name_hash() and dns_name_fullhash() functions. As the SipHash 2-4 higher bits are more random, we need to use the upper half of the return value. 2. The hashtable implementation in rbt.c was using modulo to pick the slot number for the hash table. This has several problems because modulo is: a) slow, b) oblivious to patterns in the input data. This could lead to very uneven distribution of the hashed data in the hashtable. Combined with the single-linked lists we use, it could really hog-down the lookup and removal of the nodes from the rbt tree[a]. The Fibonacci Hashing is much better fit for the hashtable function here. For longer description, read "Fibonacci Hashing: The Optimization that the World Forgot"[b] or just look at the Linux kernel. Also this will make Diego very happy :). 3. The hashtable would rehash every time the number of nodes in the rbt tree would exceed 3 * (hashtable size). The overcommit will make the uneven distribution in the hashtable even worse, but the main problem lies in the rehashing - every time the database grows beyond the limit, each subsequent rehashing will be much slower. The mitigation here is letting the rbt know how big the cache can grown and pre-allocate the hashtable to be big enough to actually never need to rehash. This will consume more memory at the start, but since the size of the hashtable is capped to `1 << 32` (e.g. 4 mio entries), it will only consume maximum of 32GB of memory for hashtable in the worst case (and max-cache-size would need to be set to more than 4TB). Calling the dns_db_adjusthashsize() will also cap the maximum size of the hashtable to the pre-computed number of bits, so it won't try to consume more gigabytes of memory than available for the database. FIXME: What is the average size of the rbt node that gets hashed? I chose the pagesize (4k) as initial value to precompute the size of the hashtable, but the value is based on feeling and not any real data. For future work, there are more places where we use result of the hash value modulo some small number and that would benefit from Fibonacci Hashing to get better distribution. Notes: a. A doubly linked list should be used here to speedup the removal of the entries from the hashtable. b. https://probablydance.com/2018/06/16/fibonacci-hashing-the-optimization-that-the-world-forgot-or-a-better-alternative-to-integer-modulo/
2020-07-16 10:29:54 +02:00
unsigned int hash;
REQUIRE(DNS_VIEW_VALID(view));
if (view->delonly == NULL) {
2020-02-13 14:44:37 -08:00
view->delonly = isc_mem_get(view->mctx,
sizeof(dns_namelist_t) *
DNS_VIEW_DELONLYHASH);
for (hash = 0; hash < DNS_VIEW_DELONLYHASH; hash++) {
ISC_LIST_INIT(view->delonly[hash]);
}
}
hash = dns_name_hash(name, false) % DNS_VIEW_DELONLYHASH;
item = ISC_LIST_HEAD(view->delonly[hash]);
while (item != NULL && !dns_name_equal(item, name)) {
item = ISC_LIST_NEXT(item, link);
}
if (item != NULL) {
return;
}
item = isc_mem_get(view->mctx, sizeof(*item));
dns_name_init(item, NULL);
dns_name_dup(name, view->mctx, item);
ISC_LIST_APPEND(view->delonly[hash], item, link);
}
void
2020-02-13 14:44:37 -08:00
dns_view_excludedelegationonly(dns_view_t *view, const dns_name_t *name) {
dns_name_t *item;
Fix the rbt hashtable and grow it when setting max-cache-size There were several problems with rbt hashtable implementation: 1. Our internal hashing function returns uint64_t value, but it was silently truncated to unsigned int in dns_name_hash() and dns_name_fullhash() functions. As the SipHash 2-4 higher bits are more random, we need to use the upper half of the return value. 2. The hashtable implementation in rbt.c was using modulo to pick the slot number for the hash table. This has several problems because modulo is: a) slow, b) oblivious to patterns in the input data. This could lead to very uneven distribution of the hashed data in the hashtable. Combined with the single-linked lists we use, it could really hog-down the lookup and removal of the nodes from the rbt tree[a]. The Fibonacci Hashing is much better fit for the hashtable function here. For longer description, read "Fibonacci Hashing: The Optimization that the World Forgot"[b] or just look at the Linux kernel. Also this will make Diego very happy :). 3. The hashtable would rehash every time the number of nodes in the rbt tree would exceed 3 * (hashtable size). The overcommit will make the uneven distribution in the hashtable even worse, but the main problem lies in the rehashing - every time the database grows beyond the limit, each subsequent rehashing will be much slower. The mitigation here is letting the rbt know how big the cache can grown and pre-allocate the hashtable to be big enough to actually never need to rehash. This will consume more memory at the start, but since the size of the hashtable is capped to `1 << 32` (e.g. 4 mio entries), it will only consume maximum of 32GB of memory for hashtable in the worst case (and max-cache-size would need to be set to more than 4TB). Calling the dns_db_adjusthashsize() will also cap the maximum size of the hashtable to the pre-computed number of bits, so it won't try to consume more gigabytes of memory than available for the database. FIXME: What is the average size of the rbt node that gets hashed? I chose the pagesize (4k) as initial value to precompute the size of the hashtable, but the value is based on feeling and not any real data. For future work, there are more places where we use result of the hash value modulo some small number and that would benefit from Fibonacci Hashing to get better distribution. Notes: a. A doubly linked list should be used here to speedup the removal of the entries from the hashtable. b. https://probablydance.com/2018/06/16/fibonacci-hashing-the-optimization-that-the-world-forgot-or-a-better-alternative-to-integer-modulo/
2020-07-16 10:29:54 +02:00
unsigned int hash;
REQUIRE(DNS_VIEW_VALID(view));
if (view->rootexclude == NULL) {
2020-02-13 14:44:37 -08:00
view->rootexclude = isc_mem_get(view->mctx,
sizeof(dns_namelist_t) *
DNS_VIEW_DELONLYHASH);
for (hash = 0; hash < DNS_VIEW_DELONLYHASH; hash++) {
2003-09-19 13:27:18 +00:00
ISC_LIST_INIT(view->rootexclude[hash]);
}
}
hash = dns_name_hash(name, false) % DNS_VIEW_DELONLYHASH;
item = ISC_LIST_HEAD(view->rootexclude[hash]);
while (item != NULL && !dns_name_equal(item, name)) {
item = ISC_LIST_NEXT(item, link);
}
if (item != NULL) {
return;
}
item = isc_mem_get(view->mctx, sizeof(*item));
dns_name_init(item, NULL);
dns_name_dup(name, view->mctx, item);
ISC_LIST_APPEND(view->rootexclude[hash], item, link);
}
bool
2020-02-13 14:44:37 -08:00
dns_view_isdelegationonly(dns_view_t *view, const dns_name_t *name) {
dns_name_t *item;
Fix the rbt hashtable and grow it when setting max-cache-size There were several problems with rbt hashtable implementation: 1. Our internal hashing function returns uint64_t value, but it was silently truncated to unsigned int in dns_name_hash() and dns_name_fullhash() functions. As the SipHash 2-4 higher bits are more random, we need to use the upper half of the return value. 2. The hashtable implementation in rbt.c was using modulo to pick the slot number for the hash table. This has several problems because modulo is: a) slow, b) oblivious to patterns in the input data. This could lead to very uneven distribution of the hashed data in the hashtable. Combined with the single-linked lists we use, it could really hog-down the lookup and removal of the nodes from the rbt tree[a]. The Fibonacci Hashing is much better fit for the hashtable function here. For longer description, read "Fibonacci Hashing: The Optimization that the World Forgot"[b] or just look at the Linux kernel. Also this will make Diego very happy :). 3. The hashtable would rehash every time the number of nodes in the rbt tree would exceed 3 * (hashtable size). The overcommit will make the uneven distribution in the hashtable even worse, but the main problem lies in the rehashing - every time the database grows beyond the limit, each subsequent rehashing will be much slower. The mitigation here is letting the rbt know how big the cache can grown and pre-allocate the hashtable to be big enough to actually never need to rehash. This will consume more memory at the start, but since the size of the hashtable is capped to `1 << 32` (e.g. 4 mio entries), it will only consume maximum of 32GB of memory for hashtable in the worst case (and max-cache-size would need to be set to more than 4TB). Calling the dns_db_adjusthashsize() will also cap the maximum size of the hashtable to the pre-computed number of bits, so it won't try to consume more gigabytes of memory than available for the database. FIXME: What is the average size of the rbt node that gets hashed? I chose the pagesize (4k) as initial value to precompute the size of the hashtable, but the value is based on feeling and not any real data. For future work, there are more places where we use result of the hash value modulo some small number and that would benefit from Fibonacci Hashing to get better distribution. Notes: a. A doubly linked list should be used here to speedup the removal of the entries from the hashtable. b. https://probablydance.com/2018/06/16/fibonacci-hashing-the-optimization-that-the-world-forgot-or-a-better-alternative-to-integer-modulo/
2020-07-16 10:29:54 +02:00
unsigned int hash;
REQUIRE(DNS_VIEW_VALID(view));
if (!view->rootdelonly && view->delonly == NULL) {
return (false);
}
hash = dns_name_hash(name, false) % DNS_VIEW_DELONLYHASH;
if (view->rootdelonly && dns_name_countlabels(name) <= 2) {
if (view->rootexclude == NULL) {
return (true);
}
item = ISC_LIST_HEAD(view->rootexclude[hash]);
while (item != NULL && !dns_name_equal(item, name)) {
item = ISC_LIST_NEXT(item, link);
}
if (item == NULL) {
return (true);
}
}
if (view->delonly == NULL) {
return (false);
}
item = ISC_LIST_HEAD(view->delonly[hash]);
while (item != NULL && !dns_name_equal(item, name)) {
item = ISC_LIST_NEXT(item, link);
}
if (item == NULL) {
return (false);
}
return (true);
}
2008-04-01 23:47:10 +00:00
void
2020-02-13 14:44:37 -08:00
dns_view_setrootdelonly(dns_view_t *view, bool value) {
REQUIRE(DNS_VIEW_VALID(view));
view->rootdelonly = value;
}
bool
2020-02-13 14:44:37 -08:00
dns_view_getrootdelonly(dns_view_t *view) {
REQUIRE(DNS_VIEW_VALID(view));
return (view->rootdelonly);
}
isc_result_t
2020-02-13 14:44:37 -08:00
dns_view_freezezones(dns_view_t *view, bool value) {
REQUIRE(DNS_VIEW_VALID(view));
REQUIRE(view->zonetable != NULL);
return (dns_zt_freezezones(view->zonetable, view, value));
}
isc_result_t
dns_view_initntatable(dns_view_t *view, isc_taskmgr_t *taskmgr,
2020-02-13 14:44:37 -08:00
isc_timermgr_t *timermgr) {
REQUIRE(DNS_VIEW_VALID(view));
if (view->ntatable_priv != NULL) {
dns_ntatable_detach(&view->ntatable_priv);
}
return (dns_ntatable_create(view, taskmgr, timermgr,
&view->ntatable_priv));
}
isc_result_t
2020-02-13 14:44:37 -08:00
dns_view_getntatable(dns_view_t *view, dns_ntatable_t **ntp) {
REQUIRE(DNS_VIEW_VALID(view));
REQUIRE(ntp != NULL && *ntp == NULL);
if (view->ntatable_priv == NULL) {
return (ISC_R_NOTFOUND);
}
dns_ntatable_attach(view->ntatable_priv, ntp);
return (ISC_R_SUCCESS);
}
isc_result_t
2020-02-13 14:44:37 -08:00
dns_view_initsecroots(dns_view_t *view, isc_mem_t *mctx) {
REQUIRE(DNS_VIEW_VALID(view));
if (view->secroots_priv != NULL) {
dns_keytable_detach(&view->secroots_priv);
}
return (dns_keytable_create(mctx, &view->secroots_priv));
}
isc_result_t
2020-02-13 14:44:37 -08:00
dns_view_getsecroots(dns_view_t *view, dns_keytable_t **ktp) {
REQUIRE(DNS_VIEW_VALID(view));
REQUIRE(ktp != NULL && *ktp == NULL);
if (view->secroots_priv == NULL) {
return (ISC_R_NOTFOUND);
}
dns_keytable_attach(view->secroots_priv, ktp);
return (ISC_R_SUCCESS);
}
bool
dns_view_ntacovers(dns_view_t *view, isc_stdtime_t now, const dns_name_t *name,
2020-02-13 14:44:37 -08:00
const dns_name_t *anchor) {
REQUIRE(DNS_VIEW_VALID(view));
if (view->ntatable_priv == NULL) {
return (false);
}
return (dns_ntatable_covered(view->ntatable_priv, now, name, anchor));
}
isc_result_t
dns_view_issecuredomain(dns_view_t *view, const dns_name_t *name,
isc_stdtime_t now, bool checknta, bool *ntap,
2020-02-13 14:44:37 -08:00
bool *secure_domain) {
isc_result_t result;
bool secure = false;
dns_fixedname_t fn;
2020-02-13 14:44:37 -08:00
dns_name_t *anchor;
REQUIRE(DNS_VIEW_VALID(view));
if (view->secroots_priv == NULL) {
return (ISC_R_NOTFOUND);
}
anchor = dns_fixedname_initname(&fn);
result = dns_keytable_issecuredomain(view->secroots_priv, name, anchor,
&secure);
if (result != ISC_R_SUCCESS) {
return (result);
}
if (ntap != NULL) {
*ntap = false;
}
if (checknta && secure && view->ntatable_priv != NULL &&
2020-02-13 14:44:37 -08:00
dns_ntatable_covered(view->ntatable_priv, now, name, anchor))
{
if (ntap != NULL) {
*ntap = true;
}
secure = false;
}
*secure_domain = secure;
return (ISC_R_SUCCESS);
}
void
dns_view_untrust(dns_view_t *view, const dns_name_t *keyname,
2020-02-13 14:44:37 -08:00
dns_rdata_dnskey_t *dnskey) {
isc_result_t result;
dns_keytable_t *sr = NULL;
REQUIRE(DNS_VIEW_VALID(view));
REQUIRE(keyname != NULL);
REQUIRE(dnskey != NULL);
/*
* Clear the revoke bit, if set, so that the key will match what's
* in secroots now.
*/
dnskey->flags &= ~DNS_KEYFLAG_REVOKE;
result = dns_view_getsecroots(view, &sr);
if (result != ISC_R_SUCCESS) {
return;
}
result = dns_keytable_deletekey(sr, keyname, dnskey);
if (result == ISC_R_SUCCESS) {
/*
* If key was found in secroots, then it was a
* configured trust anchor, and we want to fail
* secure. If there are no other configured keys,
* then leave a null key so that we can't validate
* anymore.
*/
dns_keytable_marksecure(sr, keyname);
}
dns_keytable_detach(&sr);
}
/*
* Create path to a directory and a filename constructed from viewname.
* This is a front-end to isc_file_sanitize(), allowing backward
* compatibility to older versions when a file couldn't be expected
* to be in the specified directory but might be in the current working
* directory instead.
*
* It first tests for the existence of a file <viewname>.<suffix> in
* 'directory'. If the file does not exist, it checks again in the
* current working directory. If it does not exist there either,
* return the path inside the directory.
*
* Returns ISC_R_SUCCESS if a path to an existing file is found or
* a new path is created; returns ISC_R_NOSPACE if the path won't
* fit in 'buflen'.
*/
2017-05-02 10:58:41 -07:00
static isc_result_t
nz_legacy(const char *directory, const char *viewname, const char *suffix,
2020-02-13 14:44:37 -08:00
char *buffer, size_t buflen) {
isc_result_t result;
2020-02-13 14:44:37 -08:00
char newbuf[PATH_MAX];
result = isc_file_sanitize(directory, viewname, suffix, buffer, buflen);
if (result != ISC_R_SUCCESS) {
return (result);
} else if (directory == NULL || isc_file_exists(buffer)) {
return (ISC_R_SUCCESS);
} else {
/* Save buffer */
strlcpy(newbuf, buffer, sizeof(newbuf));
}
/*
* It isn't in the specified directory; check CWD.
*/
result = isc_file_sanitize(NULL, viewname, suffix, buffer, buflen);
if (result != ISC_R_SUCCESS || isc_file_exists(buffer)) {
return (result);
}
/*
* File does not exist in either 'directory' or CWD,
* so use the path in 'directory'.
*/
strlcpy(buffer, newbuf, buflen);
return (ISC_R_SUCCESS);
}
isc_result_t
dns_view_setnewzones(dns_view_t *view, bool allow, void *cfgctx,
2020-02-13 14:44:37 -08:00
void (*cfg_destroy)(void **), uint64_t mapsize) {
isc_result_t result = ISC_R_SUCCESS;
2020-02-13 14:44:37 -08:00
char buffer[1024];
#ifdef HAVE_LMDB
MDB_env *env = NULL;
2020-02-13 14:44:37 -08:00
int status;
#endif /* ifdef HAVE_LMDB */
2017-04-27 09:48:29 +10:00
#ifndef HAVE_LMDB
UNUSED(mapsize);
#endif /* ifndef HAVE_LMDB */
2017-04-27 09:48:29 +10:00
REQUIRE(DNS_VIEW_VALID(view));
REQUIRE((cfgctx != NULL && cfg_destroy != NULL) || !allow);
if (view->new_zone_file != NULL) {
isc_mem_free(view->mctx, view->new_zone_file);
view->new_zone_file = NULL;
}
#ifdef HAVE_LMDB
if (view->new_zone_dbenv != NULL) {
mdb_env_close((MDB_env *)view->new_zone_dbenv);
view->new_zone_dbenv = NULL;
}
if (view->new_zone_db != NULL) {
isc_mem_free(view->mctx, view->new_zone_db);
view->new_zone_db = NULL;
}
#endif /* HAVE_LMDB */
if (view->new_zone_config != NULL) {
view->cfg_destroy(&view->new_zone_config);
view->cfg_destroy = NULL;
}
if (!allow) {
return (ISC_R_SUCCESS);
}
CHECK(nz_legacy(view->new_zone_dir, view->name, "nzf", buffer,
sizeof(buffer)));
view->new_zone_file = isc_mem_strdup(view->mctx, buffer);
#ifdef HAVE_LMDB
CHECK(nz_legacy(view->new_zone_dir, view->name, "nzd", buffer,
sizeof(buffer)));
view->new_zone_db = isc_mem_strdup(view->mctx, buffer);
status = mdb_env_create(&env);
if (status != MDB_SUCCESS) {
isc_log_write(dns_lctx, DNS_LOGCATEGORY_GENERAL,
ISC_LOGMODULE_OTHER, ISC_LOG_ERROR,
"mdb_env_create failed: %s",
mdb_strerror(status));
CHECK(ISC_R_FAILURE);
}
if (mapsize != 0ULL) {
status = mdb_env_set_mapsize(env, mapsize);
if (status != MDB_SUCCESS) {
isc_log_write(dns_lctx, DNS_LOGCATEGORY_GENERAL,
ISC_LOGMODULE_OTHER, ISC_LOG_ERROR,
"mdb_env_set_mapsize failed: %s",
mdb_strerror(status));
CHECK(ISC_R_FAILURE);
}
view->new_zone_mapsize = mapsize;
}
status = mdb_env_open(env, view->new_zone_db, DNS_LMDB_FLAGS, 0600);
if (status != MDB_SUCCESS) {
isc_log_write(dns_lctx, DNS_LOGCATEGORY_GENERAL,
ISC_LOGMODULE_OTHER, ISC_LOG_ERROR,
"mdb_env_open of '%s' failed: %s",
view->new_zone_db, mdb_strerror(status));
CHECK(ISC_R_FAILURE);
}
view->new_zone_dbenv = env;
env = NULL;
#endif /* HAVE_LMDB */
view->new_zone_config = cfgctx;
view->cfg_destroy = cfg_destroy;
cleanup:
if (result != ISC_R_SUCCESS) {
if (view->new_zone_file != NULL) {
isc_mem_free(view->mctx, view->new_zone_file);
view->new_zone_file = NULL;
}
#ifdef HAVE_LMDB
if (view->new_zone_db != NULL) {
isc_mem_free(view->mctx, view->new_zone_db);
view->new_zone_db = NULL;
}
if (env != NULL) {
mdb_env_close(env);
}
#endif /* HAVE_LMDB */
view->new_zone_config = NULL;
view->cfg_destroy = NULL;
}
return (result);
}
void
2020-02-13 14:44:37 -08:00
dns_view_setnewzonedir(dns_view_t *view, const char *dir) {
REQUIRE(DNS_VIEW_VALID(view));
if (view->new_zone_dir != NULL) {
isc_mem_free(view->mctx, view->new_zone_dir);
view->new_zone_dir = NULL;
}
if (dir == NULL) {
return;
}
view->new_zone_dir = isc_mem_strdup(view->mctx, dir);
}
const char *
2020-02-13 14:44:37 -08:00
dns_view_getnewzonedir(dns_view_t *view) {
REQUIRE(DNS_VIEW_VALID(view));
return (view->new_zone_dir);
}
isc_result_t
dns_view_searchdlz(dns_view_t *view, const dns_name_t *name,
unsigned int minlabels, dns_clientinfomethods_t *methods,
2020-02-13 14:44:37 -08:00
dns_clientinfo_t *clientinfo, dns_db_t **dbp) {
dns_fixedname_t fname;
dns_name_t *zonename;
unsigned int namelabels;
unsigned int i;
isc_result_t result;
dns_dlzfindzone_t findzone;
2020-02-13 14:44:37 -08:00
dns_dlzdb_t *dlzdb;
dns_db_t *db, *best = NULL;
/*
* Performs checks to make sure data is as we expect it to be.
*/
REQUIRE(DNS_VIEW_VALID(view));
REQUIRE(name != NULL);
REQUIRE(dbp != NULL && *dbp == NULL);
/* setup a "fixed" dns name */
zonename = dns_fixedname_initname(&fname);
/* count the number of labels in the name */
namelabels = dns_name_countlabels(name);
for (dlzdb = ISC_LIST_HEAD(view->dlz_searched); dlzdb != NULL;
2020-02-13 14:44:37 -08:00
dlzdb = ISC_LIST_NEXT(dlzdb, link))
{
REQUIRE(DNS_DLZ_VALID(dlzdb));
/*
* loop through starting with the longest domain name and
* trying shorter names portions of the name until we find a
* match, have an error, or are below the 'minlabels'
* threshold. minlabels is 0, if neither the standard
* database nor any previous DLZ database had a zone name
* match. Otherwise minlabels is the number of labels
* in that name. We need to beat that for a "better"
* match for this DLZ database to be authoritative.
*/
for (i = namelabels; i > minlabels && i > 1; i--) {
if (i == namelabels) {
dns_name_copy(name, zonename);
} else {
dns_name_split(name, i, NULL, zonename);
}
/* ask SDLZ driver if the zone is supported */
db = NULL;
findzone = dlzdb->implementation->methods->findzone;
result = (*findzone)(dlzdb->implementation->driverarg,
dlzdb->dbdata, dlzdb->mctx,
view->rdclass, zonename, methods,
clientinfo, &db);
if (result != ISC_R_NOTFOUND) {
if (best != NULL) {
dns_db_detach(&best);
}
if (result == ISC_R_SUCCESS) {
INSIST(db != NULL);
dns_db_attach(db, &best);
dns_db_detach(&db);
minlabels = i;
} else {
if (db != NULL) {
dns_db_detach(&db);
}
break;
}
} else if (db != NULL) {
dns_db_detach(&db);
}
}
}
if (best != NULL) {
dns_db_attach(best, dbp);
dns_db_detach(&best);
return (ISC_R_SUCCESS);
}
return (ISC_R_NOTFOUND);
}
uint32_t
2020-02-13 14:44:37 -08:00
dns_view_getfailttl(dns_view_t *view) {
REQUIRE(DNS_VIEW_VALID(view));
return (view->fail_ttl);
}
void
2020-02-13 14:44:37 -08:00
dns_view_setfailttl(dns_view_t *view, uint32_t fail_ttl) {
REQUIRE(DNS_VIEW_VALID(view));
view->fail_ttl = fail_ttl;
}
isc_result_t
2020-02-13 14:44:37 -08:00
dns_view_saventa(dns_view_t *view) {
isc_result_t result;
bool removefile = false;
dns_ntatable_t *ntatable = NULL;
2020-02-13 14:44:37 -08:00
FILE *fp = NULL;
REQUIRE(DNS_VIEW_VALID(view));
if (view->nta_lifetime == 0) {
return (ISC_R_SUCCESS);
}
/* Open NTA save file for overwrite. */
CHECK(isc_stdio_open(view->nta_file, "w", &fp));
result = dns_view_getntatable(view, &ntatable);
if (result == ISC_R_NOTFOUND) {
removefile = true;
result = ISC_R_SUCCESS;
goto cleanup;
} else {
2015-01-12 23:45:21 +00:00
CHECK(result);
}
result = dns_ntatable_save(ntatable, fp);
if (result == ISC_R_NOTFOUND) {
removefile = true;
result = ISC_R_SUCCESS;
} else if (result == ISC_R_SUCCESS) {
result = isc_stdio_close(fp);
fp = NULL;
}
cleanup:
if (ntatable != NULL) {
dns_ntatable_detach(&ntatable);
}
if (fp != NULL) {
(void)isc_stdio_close(fp);
}
/* Don't leave half-baked NTA save files lying around. */
if (result != ISC_R_SUCCESS || removefile) {
(void)isc_file_remove(view->nta_file);
}
return (result);
}
#define TSTR(t) ((t).value.as_textregion.base)
#define TLEN(t) ((t).value.as_textregion.length)
isc_result_t
2020-02-13 14:44:37 -08:00
dns_view_loadnta(dns_view_t *view) {
isc_result_t result;
dns_ntatable_t *ntatable = NULL;
2020-02-13 14:44:37 -08:00
isc_lex_t *lex = NULL;
isc_token_t token;
isc_stdtime_t now;
REQUIRE(DNS_VIEW_VALID(view));
if (view->nta_lifetime == 0) {
return (ISC_R_SUCCESS);
}
CHECK(isc_lex_create(view->mctx, 1025, &lex));
CHECK(isc_lex_openfile(lex, view->nta_file));
CHECK(dns_view_getntatable(view, &ntatable));
isc_stdtime_get(&now);
for (;;) {
2020-02-13 14:44:37 -08:00
int options = (ISC_LEXOPT_EOL | ISC_LEXOPT_EOF);
char *name, *type, *timestamp;
size_t len;
dns_fixedname_t fn;
const dns_name_t *ntaname;
2020-02-13 14:44:37 -08:00
isc_buffer_t b;
isc_stdtime_t t;
bool forced;
CHECK(isc_lex_gettoken(lex, options, &token));
if (token.type == isc_tokentype_eof) {
break;
} else if (token.type != isc_tokentype_string) {
CHECK(ISC_R_UNEXPECTEDTOKEN);
}
name = TSTR(token);
len = TLEN(token);
if (strcmp(name, ".") == 0) {
ntaname = dns_rootname;
} else {
dns_name_t *fname;
fname = dns_fixedname_initname(&fn);
2015-04-17 11:39:26 +02:00
isc_buffer_init(&b, name, (unsigned int)len);
isc_buffer_add(&b, (unsigned int)len);
CHECK(dns_name_fromtext(fname, &b, dns_rootname, 0,
NULL));
ntaname = fname;
}
CHECK(isc_lex_gettoken(lex, options, &token));
if (token.type != isc_tokentype_string) {
CHECK(ISC_R_UNEXPECTEDTOKEN);
}
type = TSTR(token);
if (strcmp(type, "regular") == 0) {
forced = false;
} else if (strcmp(type, "forced") == 0) {
forced = true;
} else {
CHECK(ISC_R_UNEXPECTEDTOKEN);
}
CHECK(isc_lex_gettoken(lex, options, &token));
if (token.type != isc_tokentype_string) {
CHECK(ISC_R_UNEXPECTEDTOKEN);
}
timestamp = TSTR(token);
CHECK(dns_time32_fromtext(timestamp, &t));
CHECK(isc_lex_gettoken(lex, options, &token));
if (token.type != isc_tokentype_eol &&
token.type != isc_tokentype_eof) {
CHECK(ISC_R_UNEXPECTEDTOKEN);
}
if (now <= t) {
if (t > (now + 604800)) {
t = now + 604800;
}
(void)dns_ntatable_add(ntatable, ntaname, forced, 0, t);
2015-06-08 13:57:24 +02:00
} else {
char nb[DNS_NAME_FORMATSIZE];
dns_name_format(ntaname, nb, sizeof(nb));
isc_log_write(dns_lctx, DNS_LOGCATEGORY_DNSSEC,
DNS_LOGMODULE_NTA, ISC_LOG_INFO,
"ignoring expired NTA at %s", nb);
}
}
cleanup:
if (ntatable != NULL) {
dns_ntatable_detach(&ntatable);
}
if (lex != NULL) {
isc_lex_close(lex);
isc_lex_destroy(&lex);
}
return (result);
}
void
2020-02-13 14:44:37 -08:00
dns_view_setviewcommit(dns_view_t *view) {
Address lock-order-inversion Obtain references to view->redirect and view->managed_keys then release view->lock so dns_zone_setviewcommit and dns_zone_setviewrevert can obtain the view->lock while holding zone->lock. WARNING: ThreadSanitizer: lock-order-inversion (potential deadlock) (pid=9132) Cycle in lock order graph: M987831431424375936 (0x000000000000) => M1012319771577875480 (0x000000000000) => M987831431424375936 Mutex M1012319771577875480 acquired here while holding mutex M987831431424375936 in thread T2: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 dns_zone_setviewcommit /builds/isc-projects/bind9/lib/dns/zone.c:1571:2 (libdns.so.1110+0x1d74eb) #2 dns_view_setviewcommit /builds/isc-projects/bind9/lib/dns/view.c:2388:3 (libdns.so.1110+0x1cfe29) #3 load_configuration /builds/isc-projects/bind9/bin/named/./server.c:8188:3 (named+0x51eadd) #4 loadconfig /builds/isc-projects/bind9/bin/named/./server.c:9438:11 (named+0x510c66) #5 ns_server_reconfigcommand /builds/isc-projects/bind9/bin/named/./server.c:9773:2 (named+0x510b41) #6 ns_control_docommand /builds/isc-projects/bind9/bin/named/control.c:243:12 (named+0x4e451a) #7 control_recvmessage /builds/isc-projects/bind9/bin/named/controlconf.c:465:13 (named+0x4e9056) #8 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #9 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Mutex M987831431424375936 previously acquired by the same thread here: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 dns_view_setviewcommit /builds/isc-projects/bind9/lib/dns/view.c:2382:2 (libdns.so.1110+0x1cfde7) #2 load_configuration /builds/isc-projects/bind9/bin/named/./server.c:8188:3 (named+0x51eadd) #3 loadconfig /builds/isc-projects/bind9/bin/named/./server.c:9438:11 (named+0x510c66) #4 ns_server_reconfigcommand /builds/isc-projects/bind9/bin/named/./server.c:9773:2 (named+0x510b41) #5 ns_control_docommand /builds/isc-projects/bind9/bin/named/control.c:243:12 (named+0x4e451a) #6 control_recvmessage /builds/isc-projects/bind9/bin/named/controlconf.c:465:13 (named+0x4e9056) #7 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #8 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Mutex M987831431424375936 acquired here while holding mutex M1012319771577875480 in thread T7: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 dns_view_findzonecut2 /builds/isc-projects/bind9/lib/dns/view.c:1300:2 (libdns.so.1110+0x1cc93a) #2 dns_view_findzonecut /builds/isc-projects/bind9/lib/dns/view.c:1261:9 (libdns.so.1110+0x1cc864) #3 fctx_create /builds/isc-projects/bind9/lib/dns/resolver.c:4459:13 (libdns.so.1110+0x1779d3) #4 dns_resolver_createfetch3 /builds/isc-projects/bind9/lib/dns/resolver.c:9628:12 (libdns.so.1110+0x176cb6) #5 dns_resolver_createfetch /builds/isc-projects/bind9/lib/dns/resolver.c:9504:10 (libdns.so.1110+0x174e17) #6 zone_refreshkeys /builds/isc-projects/bind9/lib/dns/zone.c:10061:12 (libdns.so.1110+0x2055a5) #7 zone_maintenance /builds/isc-projects/bind9/lib/dns/zone.c:10274:5 (libdns.so.1110+0x203a78) #8 zone_timer /builds/isc-projects/bind9/lib/dns/zone.c:13106:2 (libdns.so.1110+0x1e815a) #9 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #10 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Mutex M1012319771577875480 previously acquired by the same thread here: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 zone_refreshkeys /builds/isc-projects/bind9/lib/dns/zone.c:9951:2 (libdns.so.1110+0x204dc3) #2 zone_maintenance /builds/isc-projects/bind9/lib/dns/zone.c:10274:5 (libdns.so.1110+0x203a78) #3 zone_timer /builds/isc-projects/bind9/lib/dns/zone.c:13106:2 (libdns.so.1110+0x1e815a) #4 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #5 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Thread T2 'isc-worker0001' (tid=9163, running) created by main thread at: #0 pthread_create <null> (named+0x446edb) #1 isc_thread_create /builds/isc-projects/bind9/lib/isc/pthreads/thread.c:60:8 (libisc.so.1107+0x726d8) #2 isc__taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:1468:7 (libisc.so.1107+0x4d635) #3 isc_taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:2109:11 (libisc.so.1107+0x4f587) #4 create_managers /builds/isc-projects/bind9/bin/named/./main.c:886:11 (named+0x4f1a97) #5 setup /builds/isc-projects/bind9/bin/named/./main.c:1305:11 (named+0x4f05ee) #6 main /builds/isc-projects/bind9/bin/named/./main.c:1556:2 (named+0x4ef12d) Thread T7 'isc-worker0006' (tid=9168, running) created by main thread at: #0 pthread_create <null> (named+0x446edb) #1 isc_thread_create /builds/isc-projects/bind9/lib/isc/pthreads/thread.c:60:8 (libisc.so.1107+0x726d8) #2 isc__taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:1468:7 (libisc.so.1107+0x4d635) #3 isc_taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:2109:11 (libisc.so.1107+0x4f587) #4 create_managers /builds/isc-projects/bind9/bin/named/./main.c:886:11 (named+0x4f1a97) #5 setup /builds/isc-projects/bind9/bin/named/./main.c:1305:11 (named+0x4f05ee) #6 main /builds/isc-projects/bind9/bin/named/./main.c:1556:2 (named+0x4ef12d) SUMMARY: ThreadSanitizer: lock-order-inversion (potential deadlock) (/builds/isc-projects/bind9/bin/named/.libs/named+0x4642a6) in pthread_mutex_lock
2020-08-24 11:44:09 +10:00
dns_zone_t *redirect = NULL, *managed_keys = NULL;
REQUIRE(DNS_VIEW_VALID(view));
LOCK(&view->lock);
if (view->redirect != NULL) {
Address lock-order-inversion Obtain references to view->redirect and view->managed_keys then release view->lock so dns_zone_setviewcommit and dns_zone_setviewrevert can obtain the view->lock while holding zone->lock. WARNING: ThreadSanitizer: lock-order-inversion (potential deadlock) (pid=9132) Cycle in lock order graph: M987831431424375936 (0x000000000000) => M1012319771577875480 (0x000000000000) => M987831431424375936 Mutex M1012319771577875480 acquired here while holding mutex M987831431424375936 in thread T2: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 dns_zone_setviewcommit /builds/isc-projects/bind9/lib/dns/zone.c:1571:2 (libdns.so.1110+0x1d74eb) #2 dns_view_setviewcommit /builds/isc-projects/bind9/lib/dns/view.c:2388:3 (libdns.so.1110+0x1cfe29) #3 load_configuration /builds/isc-projects/bind9/bin/named/./server.c:8188:3 (named+0x51eadd) #4 loadconfig /builds/isc-projects/bind9/bin/named/./server.c:9438:11 (named+0x510c66) #5 ns_server_reconfigcommand /builds/isc-projects/bind9/bin/named/./server.c:9773:2 (named+0x510b41) #6 ns_control_docommand /builds/isc-projects/bind9/bin/named/control.c:243:12 (named+0x4e451a) #7 control_recvmessage /builds/isc-projects/bind9/bin/named/controlconf.c:465:13 (named+0x4e9056) #8 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #9 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Mutex M987831431424375936 previously acquired by the same thread here: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 dns_view_setviewcommit /builds/isc-projects/bind9/lib/dns/view.c:2382:2 (libdns.so.1110+0x1cfde7) #2 load_configuration /builds/isc-projects/bind9/bin/named/./server.c:8188:3 (named+0x51eadd) #3 loadconfig /builds/isc-projects/bind9/bin/named/./server.c:9438:11 (named+0x510c66) #4 ns_server_reconfigcommand /builds/isc-projects/bind9/bin/named/./server.c:9773:2 (named+0x510b41) #5 ns_control_docommand /builds/isc-projects/bind9/bin/named/control.c:243:12 (named+0x4e451a) #6 control_recvmessage /builds/isc-projects/bind9/bin/named/controlconf.c:465:13 (named+0x4e9056) #7 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #8 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Mutex M987831431424375936 acquired here while holding mutex M1012319771577875480 in thread T7: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 dns_view_findzonecut2 /builds/isc-projects/bind9/lib/dns/view.c:1300:2 (libdns.so.1110+0x1cc93a) #2 dns_view_findzonecut /builds/isc-projects/bind9/lib/dns/view.c:1261:9 (libdns.so.1110+0x1cc864) #3 fctx_create /builds/isc-projects/bind9/lib/dns/resolver.c:4459:13 (libdns.so.1110+0x1779d3) #4 dns_resolver_createfetch3 /builds/isc-projects/bind9/lib/dns/resolver.c:9628:12 (libdns.so.1110+0x176cb6) #5 dns_resolver_createfetch /builds/isc-projects/bind9/lib/dns/resolver.c:9504:10 (libdns.so.1110+0x174e17) #6 zone_refreshkeys /builds/isc-projects/bind9/lib/dns/zone.c:10061:12 (libdns.so.1110+0x2055a5) #7 zone_maintenance /builds/isc-projects/bind9/lib/dns/zone.c:10274:5 (libdns.so.1110+0x203a78) #8 zone_timer /builds/isc-projects/bind9/lib/dns/zone.c:13106:2 (libdns.so.1110+0x1e815a) #9 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #10 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Mutex M1012319771577875480 previously acquired by the same thread here: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 zone_refreshkeys /builds/isc-projects/bind9/lib/dns/zone.c:9951:2 (libdns.so.1110+0x204dc3) #2 zone_maintenance /builds/isc-projects/bind9/lib/dns/zone.c:10274:5 (libdns.so.1110+0x203a78) #3 zone_timer /builds/isc-projects/bind9/lib/dns/zone.c:13106:2 (libdns.so.1110+0x1e815a) #4 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #5 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Thread T2 'isc-worker0001' (tid=9163, running) created by main thread at: #0 pthread_create <null> (named+0x446edb) #1 isc_thread_create /builds/isc-projects/bind9/lib/isc/pthreads/thread.c:60:8 (libisc.so.1107+0x726d8) #2 isc__taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:1468:7 (libisc.so.1107+0x4d635) #3 isc_taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:2109:11 (libisc.so.1107+0x4f587) #4 create_managers /builds/isc-projects/bind9/bin/named/./main.c:886:11 (named+0x4f1a97) #5 setup /builds/isc-projects/bind9/bin/named/./main.c:1305:11 (named+0x4f05ee) #6 main /builds/isc-projects/bind9/bin/named/./main.c:1556:2 (named+0x4ef12d) Thread T7 'isc-worker0006' (tid=9168, running) created by main thread at: #0 pthread_create <null> (named+0x446edb) #1 isc_thread_create /builds/isc-projects/bind9/lib/isc/pthreads/thread.c:60:8 (libisc.so.1107+0x726d8) #2 isc__taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:1468:7 (libisc.so.1107+0x4d635) #3 isc_taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:2109:11 (libisc.so.1107+0x4f587) #4 create_managers /builds/isc-projects/bind9/bin/named/./main.c:886:11 (named+0x4f1a97) #5 setup /builds/isc-projects/bind9/bin/named/./main.c:1305:11 (named+0x4f05ee) #6 main /builds/isc-projects/bind9/bin/named/./main.c:1556:2 (named+0x4ef12d) SUMMARY: ThreadSanitizer: lock-order-inversion (potential deadlock) (/builds/isc-projects/bind9/bin/named/.libs/named+0x4642a6) in pthread_mutex_lock
2020-08-24 11:44:09 +10:00
dns_zone_attach(view->redirect, &redirect);
}
if (view->managed_keys != NULL) {
Address lock-order-inversion Obtain references to view->redirect and view->managed_keys then release view->lock so dns_zone_setviewcommit and dns_zone_setviewrevert can obtain the view->lock while holding zone->lock. WARNING: ThreadSanitizer: lock-order-inversion (potential deadlock) (pid=9132) Cycle in lock order graph: M987831431424375936 (0x000000000000) => M1012319771577875480 (0x000000000000) => M987831431424375936 Mutex M1012319771577875480 acquired here while holding mutex M987831431424375936 in thread T2: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 dns_zone_setviewcommit /builds/isc-projects/bind9/lib/dns/zone.c:1571:2 (libdns.so.1110+0x1d74eb) #2 dns_view_setviewcommit /builds/isc-projects/bind9/lib/dns/view.c:2388:3 (libdns.so.1110+0x1cfe29) #3 load_configuration /builds/isc-projects/bind9/bin/named/./server.c:8188:3 (named+0x51eadd) #4 loadconfig /builds/isc-projects/bind9/bin/named/./server.c:9438:11 (named+0x510c66) #5 ns_server_reconfigcommand /builds/isc-projects/bind9/bin/named/./server.c:9773:2 (named+0x510b41) #6 ns_control_docommand /builds/isc-projects/bind9/bin/named/control.c:243:12 (named+0x4e451a) #7 control_recvmessage /builds/isc-projects/bind9/bin/named/controlconf.c:465:13 (named+0x4e9056) #8 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #9 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Mutex M987831431424375936 previously acquired by the same thread here: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 dns_view_setviewcommit /builds/isc-projects/bind9/lib/dns/view.c:2382:2 (libdns.so.1110+0x1cfde7) #2 load_configuration /builds/isc-projects/bind9/bin/named/./server.c:8188:3 (named+0x51eadd) #3 loadconfig /builds/isc-projects/bind9/bin/named/./server.c:9438:11 (named+0x510c66) #4 ns_server_reconfigcommand /builds/isc-projects/bind9/bin/named/./server.c:9773:2 (named+0x510b41) #5 ns_control_docommand /builds/isc-projects/bind9/bin/named/control.c:243:12 (named+0x4e451a) #6 control_recvmessage /builds/isc-projects/bind9/bin/named/controlconf.c:465:13 (named+0x4e9056) #7 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #8 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Mutex M987831431424375936 acquired here while holding mutex M1012319771577875480 in thread T7: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 dns_view_findzonecut2 /builds/isc-projects/bind9/lib/dns/view.c:1300:2 (libdns.so.1110+0x1cc93a) #2 dns_view_findzonecut /builds/isc-projects/bind9/lib/dns/view.c:1261:9 (libdns.so.1110+0x1cc864) #3 fctx_create /builds/isc-projects/bind9/lib/dns/resolver.c:4459:13 (libdns.so.1110+0x1779d3) #4 dns_resolver_createfetch3 /builds/isc-projects/bind9/lib/dns/resolver.c:9628:12 (libdns.so.1110+0x176cb6) #5 dns_resolver_createfetch /builds/isc-projects/bind9/lib/dns/resolver.c:9504:10 (libdns.so.1110+0x174e17) #6 zone_refreshkeys /builds/isc-projects/bind9/lib/dns/zone.c:10061:12 (libdns.so.1110+0x2055a5) #7 zone_maintenance /builds/isc-projects/bind9/lib/dns/zone.c:10274:5 (libdns.so.1110+0x203a78) #8 zone_timer /builds/isc-projects/bind9/lib/dns/zone.c:13106:2 (libdns.so.1110+0x1e815a) #9 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #10 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Mutex M1012319771577875480 previously acquired by the same thread here: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 zone_refreshkeys /builds/isc-projects/bind9/lib/dns/zone.c:9951:2 (libdns.so.1110+0x204dc3) #2 zone_maintenance /builds/isc-projects/bind9/lib/dns/zone.c:10274:5 (libdns.so.1110+0x203a78) #3 zone_timer /builds/isc-projects/bind9/lib/dns/zone.c:13106:2 (libdns.so.1110+0x1e815a) #4 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #5 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Thread T2 'isc-worker0001' (tid=9163, running) created by main thread at: #0 pthread_create <null> (named+0x446edb) #1 isc_thread_create /builds/isc-projects/bind9/lib/isc/pthreads/thread.c:60:8 (libisc.so.1107+0x726d8) #2 isc__taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:1468:7 (libisc.so.1107+0x4d635) #3 isc_taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:2109:11 (libisc.so.1107+0x4f587) #4 create_managers /builds/isc-projects/bind9/bin/named/./main.c:886:11 (named+0x4f1a97) #5 setup /builds/isc-projects/bind9/bin/named/./main.c:1305:11 (named+0x4f05ee) #6 main /builds/isc-projects/bind9/bin/named/./main.c:1556:2 (named+0x4ef12d) Thread T7 'isc-worker0006' (tid=9168, running) created by main thread at: #0 pthread_create <null> (named+0x446edb) #1 isc_thread_create /builds/isc-projects/bind9/lib/isc/pthreads/thread.c:60:8 (libisc.so.1107+0x726d8) #2 isc__taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:1468:7 (libisc.so.1107+0x4d635) #3 isc_taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:2109:11 (libisc.so.1107+0x4f587) #4 create_managers /builds/isc-projects/bind9/bin/named/./main.c:886:11 (named+0x4f1a97) #5 setup /builds/isc-projects/bind9/bin/named/./main.c:1305:11 (named+0x4f05ee) #6 main /builds/isc-projects/bind9/bin/named/./main.c:1556:2 (named+0x4ef12d) SUMMARY: ThreadSanitizer: lock-order-inversion (potential deadlock) (/builds/isc-projects/bind9/bin/named/.libs/named+0x4642a6) in pthread_mutex_lock
2020-08-24 11:44:09 +10:00
dns_zone_attach(view->managed_keys, &managed_keys);
}
UNLOCK(&view->lock);
Address lock-order-inversion Obtain references to view->redirect and view->managed_keys then release view->lock so dns_zone_setviewcommit and dns_zone_setviewrevert can obtain the view->lock while holding zone->lock. WARNING: ThreadSanitizer: lock-order-inversion (potential deadlock) (pid=9132) Cycle in lock order graph: M987831431424375936 (0x000000000000) => M1012319771577875480 (0x000000000000) => M987831431424375936 Mutex M1012319771577875480 acquired here while holding mutex M987831431424375936 in thread T2: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 dns_zone_setviewcommit /builds/isc-projects/bind9/lib/dns/zone.c:1571:2 (libdns.so.1110+0x1d74eb) #2 dns_view_setviewcommit /builds/isc-projects/bind9/lib/dns/view.c:2388:3 (libdns.so.1110+0x1cfe29) #3 load_configuration /builds/isc-projects/bind9/bin/named/./server.c:8188:3 (named+0x51eadd) #4 loadconfig /builds/isc-projects/bind9/bin/named/./server.c:9438:11 (named+0x510c66) #5 ns_server_reconfigcommand /builds/isc-projects/bind9/bin/named/./server.c:9773:2 (named+0x510b41) #6 ns_control_docommand /builds/isc-projects/bind9/bin/named/control.c:243:12 (named+0x4e451a) #7 control_recvmessage /builds/isc-projects/bind9/bin/named/controlconf.c:465:13 (named+0x4e9056) #8 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #9 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Mutex M987831431424375936 previously acquired by the same thread here: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 dns_view_setviewcommit /builds/isc-projects/bind9/lib/dns/view.c:2382:2 (libdns.so.1110+0x1cfde7) #2 load_configuration /builds/isc-projects/bind9/bin/named/./server.c:8188:3 (named+0x51eadd) #3 loadconfig /builds/isc-projects/bind9/bin/named/./server.c:9438:11 (named+0x510c66) #4 ns_server_reconfigcommand /builds/isc-projects/bind9/bin/named/./server.c:9773:2 (named+0x510b41) #5 ns_control_docommand /builds/isc-projects/bind9/bin/named/control.c:243:12 (named+0x4e451a) #6 control_recvmessage /builds/isc-projects/bind9/bin/named/controlconf.c:465:13 (named+0x4e9056) #7 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #8 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Mutex M987831431424375936 acquired here while holding mutex M1012319771577875480 in thread T7: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 dns_view_findzonecut2 /builds/isc-projects/bind9/lib/dns/view.c:1300:2 (libdns.so.1110+0x1cc93a) #2 dns_view_findzonecut /builds/isc-projects/bind9/lib/dns/view.c:1261:9 (libdns.so.1110+0x1cc864) #3 fctx_create /builds/isc-projects/bind9/lib/dns/resolver.c:4459:13 (libdns.so.1110+0x1779d3) #4 dns_resolver_createfetch3 /builds/isc-projects/bind9/lib/dns/resolver.c:9628:12 (libdns.so.1110+0x176cb6) #5 dns_resolver_createfetch /builds/isc-projects/bind9/lib/dns/resolver.c:9504:10 (libdns.so.1110+0x174e17) #6 zone_refreshkeys /builds/isc-projects/bind9/lib/dns/zone.c:10061:12 (libdns.so.1110+0x2055a5) #7 zone_maintenance /builds/isc-projects/bind9/lib/dns/zone.c:10274:5 (libdns.so.1110+0x203a78) #8 zone_timer /builds/isc-projects/bind9/lib/dns/zone.c:13106:2 (libdns.so.1110+0x1e815a) #9 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #10 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Mutex M1012319771577875480 previously acquired by the same thread here: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 zone_refreshkeys /builds/isc-projects/bind9/lib/dns/zone.c:9951:2 (libdns.so.1110+0x204dc3) #2 zone_maintenance /builds/isc-projects/bind9/lib/dns/zone.c:10274:5 (libdns.so.1110+0x203a78) #3 zone_timer /builds/isc-projects/bind9/lib/dns/zone.c:13106:2 (libdns.so.1110+0x1e815a) #4 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #5 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Thread T2 'isc-worker0001' (tid=9163, running) created by main thread at: #0 pthread_create <null> (named+0x446edb) #1 isc_thread_create /builds/isc-projects/bind9/lib/isc/pthreads/thread.c:60:8 (libisc.so.1107+0x726d8) #2 isc__taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:1468:7 (libisc.so.1107+0x4d635) #3 isc_taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:2109:11 (libisc.so.1107+0x4f587) #4 create_managers /builds/isc-projects/bind9/bin/named/./main.c:886:11 (named+0x4f1a97) #5 setup /builds/isc-projects/bind9/bin/named/./main.c:1305:11 (named+0x4f05ee) #6 main /builds/isc-projects/bind9/bin/named/./main.c:1556:2 (named+0x4ef12d) Thread T7 'isc-worker0006' (tid=9168, running) created by main thread at: #0 pthread_create <null> (named+0x446edb) #1 isc_thread_create /builds/isc-projects/bind9/lib/isc/pthreads/thread.c:60:8 (libisc.so.1107+0x726d8) #2 isc__taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:1468:7 (libisc.so.1107+0x4d635) #3 isc_taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:2109:11 (libisc.so.1107+0x4f587) #4 create_managers /builds/isc-projects/bind9/bin/named/./main.c:886:11 (named+0x4f1a97) #5 setup /builds/isc-projects/bind9/bin/named/./main.c:1305:11 (named+0x4f05ee) #6 main /builds/isc-projects/bind9/bin/named/./main.c:1556:2 (named+0x4ef12d) SUMMARY: ThreadSanitizer: lock-order-inversion (potential deadlock) (/builds/isc-projects/bind9/bin/named/.libs/named+0x4642a6) in pthread_mutex_lock
2020-08-24 11:44:09 +10:00
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
if (view->zonetable != NULL) {
dns_zt_setviewcommit(view->zonetable);
}
Address lock-order-inversion Obtain references to view->redirect and view->managed_keys then release view->lock so dns_zone_setviewcommit and dns_zone_setviewrevert can obtain the view->lock while holding zone->lock. WARNING: ThreadSanitizer: lock-order-inversion (potential deadlock) (pid=9132) Cycle in lock order graph: M987831431424375936 (0x000000000000) => M1012319771577875480 (0x000000000000) => M987831431424375936 Mutex M1012319771577875480 acquired here while holding mutex M987831431424375936 in thread T2: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 dns_zone_setviewcommit /builds/isc-projects/bind9/lib/dns/zone.c:1571:2 (libdns.so.1110+0x1d74eb) #2 dns_view_setviewcommit /builds/isc-projects/bind9/lib/dns/view.c:2388:3 (libdns.so.1110+0x1cfe29) #3 load_configuration /builds/isc-projects/bind9/bin/named/./server.c:8188:3 (named+0x51eadd) #4 loadconfig /builds/isc-projects/bind9/bin/named/./server.c:9438:11 (named+0x510c66) #5 ns_server_reconfigcommand /builds/isc-projects/bind9/bin/named/./server.c:9773:2 (named+0x510b41) #6 ns_control_docommand /builds/isc-projects/bind9/bin/named/control.c:243:12 (named+0x4e451a) #7 control_recvmessage /builds/isc-projects/bind9/bin/named/controlconf.c:465:13 (named+0x4e9056) #8 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #9 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Mutex M987831431424375936 previously acquired by the same thread here: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 dns_view_setviewcommit /builds/isc-projects/bind9/lib/dns/view.c:2382:2 (libdns.so.1110+0x1cfde7) #2 load_configuration /builds/isc-projects/bind9/bin/named/./server.c:8188:3 (named+0x51eadd) #3 loadconfig /builds/isc-projects/bind9/bin/named/./server.c:9438:11 (named+0x510c66) #4 ns_server_reconfigcommand /builds/isc-projects/bind9/bin/named/./server.c:9773:2 (named+0x510b41) #5 ns_control_docommand /builds/isc-projects/bind9/bin/named/control.c:243:12 (named+0x4e451a) #6 control_recvmessage /builds/isc-projects/bind9/bin/named/controlconf.c:465:13 (named+0x4e9056) #7 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #8 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Mutex M987831431424375936 acquired here while holding mutex M1012319771577875480 in thread T7: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 dns_view_findzonecut2 /builds/isc-projects/bind9/lib/dns/view.c:1300:2 (libdns.so.1110+0x1cc93a) #2 dns_view_findzonecut /builds/isc-projects/bind9/lib/dns/view.c:1261:9 (libdns.so.1110+0x1cc864) #3 fctx_create /builds/isc-projects/bind9/lib/dns/resolver.c:4459:13 (libdns.so.1110+0x1779d3) #4 dns_resolver_createfetch3 /builds/isc-projects/bind9/lib/dns/resolver.c:9628:12 (libdns.so.1110+0x176cb6) #5 dns_resolver_createfetch /builds/isc-projects/bind9/lib/dns/resolver.c:9504:10 (libdns.so.1110+0x174e17) #6 zone_refreshkeys /builds/isc-projects/bind9/lib/dns/zone.c:10061:12 (libdns.so.1110+0x2055a5) #7 zone_maintenance /builds/isc-projects/bind9/lib/dns/zone.c:10274:5 (libdns.so.1110+0x203a78) #8 zone_timer /builds/isc-projects/bind9/lib/dns/zone.c:13106:2 (libdns.so.1110+0x1e815a) #9 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #10 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Mutex M1012319771577875480 previously acquired by the same thread here: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 zone_refreshkeys /builds/isc-projects/bind9/lib/dns/zone.c:9951:2 (libdns.so.1110+0x204dc3) #2 zone_maintenance /builds/isc-projects/bind9/lib/dns/zone.c:10274:5 (libdns.so.1110+0x203a78) #3 zone_timer /builds/isc-projects/bind9/lib/dns/zone.c:13106:2 (libdns.so.1110+0x1e815a) #4 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #5 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Thread T2 'isc-worker0001' (tid=9163, running) created by main thread at: #0 pthread_create <null> (named+0x446edb) #1 isc_thread_create /builds/isc-projects/bind9/lib/isc/pthreads/thread.c:60:8 (libisc.so.1107+0x726d8) #2 isc__taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:1468:7 (libisc.so.1107+0x4d635) #3 isc_taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:2109:11 (libisc.so.1107+0x4f587) #4 create_managers /builds/isc-projects/bind9/bin/named/./main.c:886:11 (named+0x4f1a97) #5 setup /builds/isc-projects/bind9/bin/named/./main.c:1305:11 (named+0x4f05ee) #6 main /builds/isc-projects/bind9/bin/named/./main.c:1556:2 (named+0x4ef12d) Thread T7 'isc-worker0006' (tid=9168, running) created by main thread at: #0 pthread_create <null> (named+0x446edb) #1 isc_thread_create /builds/isc-projects/bind9/lib/isc/pthreads/thread.c:60:8 (libisc.so.1107+0x726d8) #2 isc__taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:1468:7 (libisc.so.1107+0x4d635) #3 isc_taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:2109:11 (libisc.so.1107+0x4f587) #4 create_managers /builds/isc-projects/bind9/bin/named/./main.c:886:11 (named+0x4f1a97) #5 setup /builds/isc-projects/bind9/bin/named/./main.c:1305:11 (named+0x4f05ee) #6 main /builds/isc-projects/bind9/bin/named/./main.c:1556:2 (named+0x4ef12d) SUMMARY: ThreadSanitizer: lock-order-inversion (potential deadlock) (/builds/isc-projects/bind9/bin/named/.libs/named+0x4642a6) in pthread_mutex_lock
2020-08-24 11:44:09 +10:00
if (redirect != NULL) {
dns_zone_setviewcommit(redirect);
dns_zone_detach(&redirect);
}
if (managed_keys != NULL) {
dns_zone_setviewcommit(managed_keys);
dns_zone_detach(&managed_keys);
}
}
void
2020-02-13 14:44:37 -08:00
dns_view_setviewrevert(dns_view_t *view) {
Address lock-order-inversion Obtain references to view->redirect and view->managed_keys then release view->lock so dns_zone_setviewcommit and dns_zone_setviewrevert can obtain the view->lock while holding zone->lock. WARNING: ThreadSanitizer: lock-order-inversion (potential deadlock) (pid=9132) Cycle in lock order graph: M987831431424375936 (0x000000000000) => M1012319771577875480 (0x000000000000) => M987831431424375936 Mutex M1012319771577875480 acquired here while holding mutex M987831431424375936 in thread T2: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 dns_zone_setviewcommit /builds/isc-projects/bind9/lib/dns/zone.c:1571:2 (libdns.so.1110+0x1d74eb) #2 dns_view_setviewcommit /builds/isc-projects/bind9/lib/dns/view.c:2388:3 (libdns.so.1110+0x1cfe29) #3 load_configuration /builds/isc-projects/bind9/bin/named/./server.c:8188:3 (named+0x51eadd) #4 loadconfig /builds/isc-projects/bind9/bin/named/./server.c:9438:11 (named+0x510c66) #5 ns_server_reconfigcommand /builds/isc-projects/bind9/bin/named/./server.c:9773:2 (named+0x510b41) #6 ns_control_docommand /builds/isc-projects/bind9/bin/named/control.c:243:12 (named+0x4e451a) #7 control_recvmessage /builds/isc-projects/bind9/bin/named/controlconf.c:465:13 (named+0x4e9056) #8 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #9 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Mutex M987831431424375936 previously acquired by the same thread here: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 dns_view_setviewcommit /builds/isc-projects/bind9/lib/dns/view.c:2382:2 (libdns.so.1110+0x1cfde7) #2 load_configuration /builds/isc-projects/bind9/bin/named/./server.c:8188:3 (named+0x51eadd) #3 loadconfig /builds/isc-projects/bind9/bin/named/./server.c:9438:11 (named+0x510c66) #4 ns_server_reconfigcommand /builds/isc-projects/bind9/bin/named/./server.c:9773:2 (named+0x510b41) #5 ns_control_docommand /builds/isc-projects/bind9/bin/named/control.c:243:12 (named+0x4e451a) #6 control_recvmessage /builds/isc-projects/bind9/bin/named/controlconf.c:465:13 (named+0x4e9056) #7 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #8 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Mutex M987831431424375936 acquired here while holding mutex M1012319771577875480 in thread T7: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 dns_view_findzonecut2 /builds/isc-projects/bind9/lib/dns/view.c:1300:2 (libdns.so.1110+0x1cc93a) #2 dns_view_findzonecut /builds/isc-projects/bind9/lib/dns/view.c:1261:9 (libdns.so.1110+0x1cc864) #3 fctx_create /builds/isc-projects/bind9/lib/dns/resolver.c:4459:13 (libdns.so.1110+0x1779d3) #4 dns_resolver_createfetch3 /builds/isc-projects/bind9/lib/dns/resolver.c:9628:12 (libdns.so.1110+0x176cb6) #5 dns_resolver_createfetch /builds/isc-projects/bind9/lib/dns/resolver.c:9504:10 (libdns.so.1110+0x174e17) #6 zone_refreshkeys /builds/isc-projects/bind9/lib/dns/zone.c:10061:12 (libdns.so.1110+0x2055a5) #7 zone_maintenance /builds/isc-projects/bind9/lib/dns/zone.c:10274:5 (libdns.so.1110+0x203a78) #8 zone_timer /builds/isc-projects/bind9/lib/dns/zone.c:13106:2 (libdns.so.1110+0x1e815a) #9 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #10 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Mutex M1012319771577875480 previously acquired by the same thread here: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 zone_refreshkeys /builds/isc-projects/bind9/lib/dns/zone.c:9951:2 (libdns.so.1110+0x204dc3) #2 zone_maintenance /builds/isc-projects/bind9/lib/dns/zone.c:10274:5 (libdns.so.1110+0x203a78) #3 zone_timer /builds/isc-projects/bind9/lib/dns/zone.c:13106:2 (libdns.so.1110+0x1e815a) #4 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #5 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Thread T2 'isc-worker0001' (tid=9163, running) created by main thread at: #0 pthread_create <null> (named+0x446edb) #1 isc_thread_create /builds/isc-projects/bind9/lib/isc/pthreads/thread.c:60:8 (libisc.so.1107+0x726d8) #2 isc__taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:1468:7 (libisc.so.1107+0x4d635) #3 isc_taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:2109:11 (libisc.so.1107+0x4f587) #4 create_managers /builds/isc-projects/bind9/bin/named/./main.c:886:11 (named+0x4f1a97) #5 setup /builds/isc-projects/bind9/bin/named/./main.c:1305:11 (named+0x4f05ee) #6 main /builds/isc-projects/bind9/bin/named/./main.c:1556:2 (named+0x4ef12d) Thread T7 'isc-worker0006' (tid=9168, running) created by main thread at: #0 pthread_create <null> (named+0x446edb) #1 isc_thread_create /builds/isc-projects/bind9/lib/isc/pthreads/thread.c:60:8 (libisc.so.1107+0x726d8) #2 isc__taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:1468:7 (libisc.so.1107+0x4d635) #3 isc_taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:2109:11 (libisc.so.1107+0x4f587) #4 create_managers /builds/isc-projects/bind9/bin/named/./main.c:886:11 (named+0x4f1a97) #5 setup /builds/isc-projects/bind9/bin/named/./main.c:1305:11 (named+0x4f05ee) #6 main /builds/isc-projects/bind9/bin/named/./main.c:1556:2 (named+0x4ef12d) SUMMARY: ThreadSanitizer: lock-order-inversion (potential deadlock) (/builds/isc-projects/bind9/bin/named/.libs/named+0x4642a6) in pthread_mutex_lock
2020-08-24 11:44:09 +10:00
dns_zone_t *redirect = NULL, *managed_keys = NULL;
dns_zt_t *zonetable;
REQUIRE(DNS_VIEW_VALID(view));
/*
* dns_zt_setviewrevert() attempts to lock this view, so we must
* release the lock.
*/
LOCK(&view->lock);
if (view->redirect != NULL) {
Address lock-order-inversion Obtain references to view->redirect and view->managed_keys then release view->lock so dns_zone_setviewcommit and dns_zone_setviewrevert can obtain the view->lock while holding zone->lock. WARNING: ThreadSanitizer: lock-order-inversion (potential deadlock) (pid=9132) Cycle in lock order graph: M987831431424375936 (0x000000000000) => M1012319771577875480 (0x000000000000) => M987831431424375936 Mutex M1012319771577875480 acquired here while holding mutex M987831431424375936 in thread T2: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 dns_zone_setviewcommit /builds/isc-projects/bind9/lib/dns/zone.c:1571:2 (libdns.so.1110+0x1d74eb) #2 dns_view_setviewcommit /builds/isc-projects/bind9/lib/dns/view.c:2388:3 (libdns.so.1110+0x1cfe29) #3 load_configuration /builds/isc-projects/bind9/bin/named/./server.c:8188:3 (named+0x51eadd) #4 loadconfig /builds/isc-projects/bind9/bin/named/./server.c:9438:11 (named+0x510c66) #5 ns_server_reconfigcommand /builds/isc-projects/bind9/bin/named/./server.c:9773:2 (named+0x510b41) #6 ns_control_docommand /builds/isc-projects/bind9/bin/named/control.c:243:12 (named+0x4e451a) #7 control_recvmessage /builds/isc-projects/bind9/bin/named/controlconf.c:465:13 (named+0x4e9056) #8 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #9 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Mutex M987831431424375936 previously acquired by the same thread here: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 dns_view_setviewcommit /builds/isc-projects/bind9/lib/dns/view.c:2382:2 (libdns.so.1110+0x1cfde7) #2 load_configuration /builds/isc-projects/bind9/bin/named/./server.c:8188:3 (named+0x51eadd) #3 loadconfig /builds/isc-projects/bind9/bin/named/./server.c:9438:11 (named+0x510c66) #4 ns_server_reconfigcommand /builds/isc-projects/bind9/bin/named/./server.c:9773:2 (named+0x510b41) #5 ns_control_docommand /builds/isc-projects/bind9/bin/named/control.c:243:12 (named+0x4e451a) #6 control_recvmessage /builds/isc-projects/bind9/bin/named/controlconf.c:465:13 (named+0x4e9056) #7 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #8 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Mutex M987831431424375936 acquired here while holding mutex M1012319771577875480 in thread T7: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 dns_view_findzonecut2 /builds/isc-projects/bind9/lib/dns/view.c:1300:2 (libdns.so.1110+0x1cc93a) #2 dns_view_findzonecut /builds/isc-projects/bind9/lib/dns/view.c:1261:9 (libdns.so.1110+0x1cc864) #3 fctx_create /builds/isc-projects/bind9/lib/dns/resolver.c:4459:13 (libdns.so.1110+0x1779d3) #4 dns_resolver_createfetch3 /builds/isc-projects/bind9/lib/dns/resolver.c:9628:12 (libdns.so.1110+0x176cb6) #5 dns_resolver_createfetch /builds/isc-projects/bind9/lib/dns/resolver.c:9504:10 (libdns.so.1110+0x174e17) #6 zone_refreshkeys /builds/isc-projects/bind9/lib/dns/zone.c:10061:12 (libdns.so.1110+0x2055a5) #7 zone_maintenance /builds/isc-projects/bind9/lib/dns/zone.c:10274:5 (libdns.so.1110+0x203a78) #8 zone_timer /builds/isc-projects/bind9/lib/dns/zone.c:13106:2 (libdns.so.1110+0x1e815a) #9 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #10 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Mutex M1012319771577875480 previously acquired by the same thread here: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 zone_refreshkeys /builds/isc-projects/bind9/lib/dns/zone.c:9951:2 (libdns.so.1110+0x204dc3) #2 zone_maintenance /builds/isc-projects/bind9/lib/dns/zone.c:10274:5 (libdns.so.1110+0x203a78) #3 zone_timer /builds/isc-projects/bind9/lib/dns/zone.c:13106:2 (libdns.so.1110+0x1e815a) #4 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #5 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Thread T2 'isc-worker0001' (tid=9163, running) created by main thread at: #0 pthread_create <null> (named+0x446edb) #1 isc_thread_create /builds/isc-projects/bind9/lib/isc/pthreads/thread.c:60:8 (libisc.so.1107+0x726d8) #2 isc__taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:1468:7 (libisc.so.1107+0x4d635) #3 isc_taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:2109:11 (libisc.so.1107+0x4f587) #4 create_managers /builds/isc-projects/bind9/bin/named/./main.c:886:11 (named+0x4f1a97) #5 setup /builds/isc-projects/bind9/bin/named/./main.c:1305:11 (named+0x4f05ee) #6 main /builds/isc-projects/bind9/bin/named/./main.c:1556:2 (named+0x4ef12d) Thread T7 'isc-worker0006' (tid=9168, running) created by main thread at: #0 pthread_create <null> (named+0x446edb) #1 isc_thread_create /builds/isc-projects/bind9/lib/isc/pthreads/thread.c:60:8 (libisc.so.1107+0x726d8) #2 isc__taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:1468:7 (libisc.so.1107+0x4d635) #3 isc_taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:2109:11 (libisc.so.1107+0x4f587) #4 create_managers /builds/isc-projects/bind9/bin/named/./main.c:886:11 (named+0x4f1a97) #5 setup /builds/isc-projects/bind9/bin/named/./main.c:1305:11 (named+0x4f05ee) #6 main /builds/isc-projects/bind9/bin/named/./main.c:1556:2 (named+0x4ef12d) SUMMARY: ThreadSanitizer: lock-order-inversion (potential deadlock) (/builds/isc-projects/bind9/bin/named/.libs/named+0x4642a6) in pthread_mutex_lock
2020-08-24 11:44:09 +10:00
dns_zone_attach(view->redirect, &redirect);
}
if (view->managed_keys != NULL) {
Address lock-order-inversion Obtain references to view->redirect and view->managed_keys then release view->lock so dns_zone_setviewcommit and dns_zone_setviewrevert can obtain the view->lock while holding zone->lock. WARNING: ThreadSanitizer: lock-order-inversion (potential deadlock) (pid=9132) Cycle in lock order graph: M987831431424375936 (0x000000000000) => M1012319771577875480 (0x000000000000) => M987831431424375936 Mutex M1012319771577875480 acquired here while holding mutex M987831431424375936 in thread T2: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 dns_zone_setviewcommit /builds/isc-projects/bind9/lib/dns/zone.c:1571:2 (libdns.so.1110+0x1d74eb) #2 dns_view_setviewcommit /builds/isc-projects/bind9/lib/dns/view.c:2388:3 (libdns.so.1110+0x1cfe29) #3 load_configuration /builds/isc-projects/bind9/bin/named/./server.c:8188:3 (named+0x51eadd) #4 loadconfig /builds/isc-projects/bind9/bin/named/./server.c:9438:11 (named+0x510c66) #5 ns_server_reconfigcommand /builds/isc-projects/bind9/bin/named/./server.c:9773:2 (named+0x510b41) #6 ns_control_docommand /builds/isc-projects/bind9/bin/named/control.c:243:12 (named+0x4e451a) #7 control_recvmessage /builds/isc-projects/bind9/bin/named/controlconf.c:465:13 (named+0x4e9056) #8 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #9 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Mutex M987831431424375936 previously acquired by the same thread here: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 dns_view_setviewcommit /builds/isc-projects/bind9/lib/dns/view.c:2382:2 (libdns.so.1110+0x1cfde7) #2 load_configuration /builds/isc-projects/bind9/bin/named/./server.c:8188:3 (named+0x51eadd) #3 loadconfig /builds/isc-projects/bind9/bin/named/./server.c:9438:11 (named+0x510c66) #4 ns_server_reconfigcommand /builds/isc-projects/bind9/bin/named/./server.c:9773:2 (named+0x510b41) #5 ns_control_docommand /builds/isc-projects/bind9/bin/named/control.c:243:12 (named+0x4e451a) #6 control_recvmessage /builds/isc-projects/bind9/bin/named/controlconf.c:465:13 (named+0x4e9056) #7 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #8 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Mutex M987831431424375936 acquired here while holding mutex M1012319771577875480 in thread T7: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 dns_view_findzonecut2 /builds/isc-projects/bind9/lib/dns/view.c:1300:2 (libdns.so.1110+0x1cc93a) #2 dns_view_findzonecut /builds/isc-projects/bind9/lib/dns/view.c:1261:9 (libdns.so.1110+0x1cc864) #3 fctx_create /builds/isc-projects/bind9/lib/dns/resolver.c:4459:13 (libdns.so.1110+0x1779d3) #4 dns_resolver_createfetch3 /builds/isc-projects/bind9/lib/dns/resolver.c:9628:12 (libdns.so.1110+0x176cb6) #5 dns_resolver_createfetch /builds/isc-projects/bind9/lib/dns/resolver.c:9504:10 (libdns.so.1110+0x174e17) #6 zone_refreshkeys /builds/isc-projects/bind9/lib/dns/zone.c:10061:12 (libdns.so.1110+0x2055a5) #7 zone_maintenance /builds/isc-projects/bind9/lib/dns/zone.c:10274:5 (libdns.so.1110+0x203a78) #8 zone_timer /builds/isc-projects/bind9/lib/dns/zone.c:13106:2 (libdns.so.1110+0x1e815a) #9 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #10 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Mutex M1012319771577875480 previously acquired by the same thread here: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 zone_refreshkeys /builds/isc-projects/bind9/lib/dns/zone.c:9951:2 (libdns.so.1110+0x204dc3) #2 zone_maintenance /builds/isc-projects/bind9/lib/dns/zone.c:10274:5 (libdns.so.1110+0x203a78) #3 zone_timer /builds/isc-projects/bind9/lib/dns/zone.c:13106:2 (libdns.so.1110+0x1e815a) #4 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #5 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Thread T2 'isc-worker0001' (tid=9163, running) created by main thread at: #0 pthread_create <null> (named+0x446edb) #1 isc_thread_create /builds/isc-projects/bind9/lib/isc/pthreads/thread.c:60:8 (libisc.so.1107+0x726d8) #2 isc__taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:1468:7 (libisc.so.1107+0x4d635) #3 isc_taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:2109:11 (libisc.so.1107+0x4f587) #4 create_managers /builds/isc-projects/bind9/bin/named/./main.c:886:11 (named+0x4f1a97) #5 setup /builds/isc-projects/bind9/bin/named/./main.c:1305:11 (named+0x4f05ee) #6 main /builds/isc-projects/bind9/bin/named/./main.c:1556:2 (named+0x4ef12d) Thread T7 'isc-worker0006' (tid=9168, running) created by main thread at: #0 pthread_create <null> (named+0x446edb) #1 isc_thread_create /builds/isc-projects/bind9/lib/isc/pthreads/thread.c:60:8 (libisc.so.1107+0x726d8) #2 isc__taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:1468:7 (libisc.so.1107+0x4d635) #3 isc_taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:2109:11 (libisc.so.1107+0x4f587) #4 create_managers /builds/isc-projects/bind9/bin/named/./main.c:886:11 (named+0x4f1a97) #5 setup /builds/isc-projects/bind9/bin/named/./main.c:1305:11 (named+0x4f05ee) #6 main /builds/isc-projects/bind9/bin/named/./main.c:1556:2 (named+0x4ef12d) SUMMARY: ThreadSanitizer: lock-order-inversion (potential deadlock) (/builds/isc-projects/bind9/bin/named/.libs/named+0x4642a6) in pthread_mutex_lock
2020-08-24 11:44:09 +10:00
dns_zone_attach(view->managed_keys, &managed_keys);
}
zonetable = view->zonetable;
UNLOCK(&view->lock);
Address lock-order-inversion Obtain references to view->redirect and view->managed_keys then release view->lock so dns_zone_setviewcommit and dns_zone_setviewrevert can obtain the view->lock while holding zone->lock. WARNING: ThreadSanitizer: lock-order-inversion (potential deadlock) (pid=9132) Cycle in lock order graph: M987831431424375936 (0x000000000000) => M1012319771577875480 (0x000000000000) => M987831431424375936 Mutex M1012319771577875480 acquired here while holding mutex M987831431424375936 in thread T2: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 dns_zone_setviewcommit /builds/isc-projects/bind9/lib/dns/zone.c:1571:2 (libdns.so.1110+0x1d74eb) #2 dns_view_setviewcommit /builds/isc-projects/bind9/lib/dns/view.c:2388:3 (libdns.so.1110+0x1cfe29) #3 load_configuration /builds/isc-projects/bind9/bin/named/./server.c:8188:3 (named+0x51eadd) #4 loadconfig /builds/isc-projects/bind9/bin/named/./server.c:9438:11 (named+0x510c66) #5 ns_server_reconfigcommand /builds/isc-projects/bind9/bin/named/./server.c:9773:2 (named+0x510b41) #6 ns_control_docommand /builds/isc-projects/bind9/bin/named/control.c:243:12 (named+0x4e451a) #7 control_recvmessage /builds/isc-projects/bind9/bin/named/controlconf.c:465:13 (named+0x4e9056) #8 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #9 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Mutex M987831431424375936 previously acquired by the same thread here: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 dns_view_setviewcommit /builds/isc-projects/bind9/lib/dns/view.c:2382:2 (libdns.so.1110+0x1cfde7) #2 load_configuration /builds/isc-projects/bind9/bin/named/./server.c:8188:3 (named+0x51eadd) #3 loadconfig /builds/isc-projects/bind9/bin/named/./server.c:9438:11 (named+0x510c66) #4 ns_server_reconfigcommand /builds/isc-projects/bind9/bin/named/./server.c:9773:2 (named+0x510b41) #5 ns_control_docommand /builds/isc-projects/bind9/bin/named/control.c:243:12 (named+0x4e451a) #6 control_recvmessage /builds/isc-projects/bind9/bin/named/controlconf.c:465:13 (named+0x4e9056) #7 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #8 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Mutex M987831431424375936 acquired here while holding mutex M1012319771577875480 in thread T7: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 dns_view_findzonecut2 /builds/isc-projects/bind9/lib/dns/view.c:1300:2 (libdns.so.1110+0x1cc93a) #2 dns_view_findzonecut /builds/isc-projects/bind9/lib/dns/view.c:1261:9 (libdns.so.1110+0x1cc864) #3 fctx_create /builds/isc-projects/bind9/lib/dns/resolver.c:4459:13 (libdns.so.1110+0x1779d3) #4 dns_resolver_createfetch3 /builds/isc-projects/bind9/lib/dns/resolver.c:9628:12 (libdns.so.1110+0x176cb6) #5 dns_resolver_createfetch /builds/isc-projects/bind9/lib/dns/resolver.c:9504:10 (libdns.so.1110+0x174e17) #6 zone_refreshkeys /builds/isc-projects/bind9/lib/dns/zone.c:10061:12 (libdns.so.1110+0x2055a5) #7 zone_maintenance /builds/isc-projects/bind9/lib/dns/zone.c:10274:5 (libdns.so.1110+0x203a78) #8 zone_timer /builds/isc-projects/bind9/lib/dns/zone.c:13106:2 (libdns.so.1110+0x1e815a) #9 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #10 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Mutex M1012319771577875480 previously acquired by the same thread here: #0 pthread_mutex_lock <null> (named+0x4642a6) #1 zone_refreshkeys /builds/isc-projects/bind9/lib/dns/zone.c:9951:2 (libdns.so.1110+0x204dc3) #2 zone_maintenance /builds/isc-projects/bind9/lib/dns/zone.c:10274:5 (libdns.so.1110+0x203a78) #3 zone_timer /builds/isc-projects/bind9/lib/dns/zone.c:13106:2 (libdns.so.1110+0x1e815a) #4 dispatch /builds/isc-projects/bind9/lib/isc/task.c:1157:7 (libisc.so.1107+0x507d5) #5 run /builds/isc-projects/bind9/lib/isc/task.c:1331:2 (libisc.so.1107+0x4d729) Thread T2 'isc-worker0001' (tid=9163, running) created by main thread at: #0 pthread_create <null> (named+0x446edb) #1 isc_thread_create /builds/isc-projects/bind9/lib/isc/pthreads/thread.c:60:8 (libisc.so.1107+0x726d8) #2 isc__taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:1468:7 (libisc.so.1107+0x4d635) #3 isc_taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:2109:11 (libisc.so.1107+0x4f587) #4 create_managers /builds/isc-projects/bind9/bin/named/./main.c:886:11 (named+0x4f1a97) #5 setup /builds/isc-projects/bind9/bin/named/./main.c:1305:11 (named+0x4f05ee) #6 main /builds/isc-projects/bind9/bin/named/./main.c:1556:2 (named+0x4ef12d) Thread T7 'isc-worker0006' (tid=9168, running) created by main thread at: #0 pthread_create <null> (named+0x446edb) #1 isc_thread_create /builds/isc-projects/bind9/lib/isc/pthreads/thread.c:60:8 (libisc.so.1107+0x726d8) #2 isc__taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:1468:7 (libisc.so.1107+0x4d635) #3 isc_taskmgr_create /builds/isc-projects/bind9/lib/isc/task.c:2109:11 (libisc.so.1107+0x4f587) #4 create_managers /builds/isc-projects/bind9/bin/named/./main.c:886:11 (named+0x4f1a97) #5 setup /builds/isc-projects/bind9/bin/named/./main.c:1305:11 (named+0x4f05ee) #6 main /builds/isc-projects/bind9/bin/named/./main.c:1556:2 (named+0x4ef12d) SUMMARY: ThreadSanitizer: lock-order-inversion (potential deadlock) (/builds/isc-projects/bind9/bin/named/.libs/named+0x4642a6) in pthread_mutex_lock
2020-08-24 11:44:09 +10:00
if (redirect != NULL) {
dns_zone_setviewrevert(redirect);
dns_zone_detach(&redirect);
}
if (managed_keys != NULL) {
dns_zone_setviewrevert(managed_keys);
dns_zone_detach(&managed_keys);
}
if (zonetable != NULL) {
dns_zt_setviewrevert(zonetable);
}
}
bool
dns_view_staleanswerenabled(dns_view_t *view) {
uint32_t stale_ttl = 0;
bool result = false;
REQUIRE(DNS_VIEW_VALID(view));
if (dns_db_getservestalettl(view->cachedb, &stale_ttl) != ISC_R_SUCCESS)
{
return (false);
}
if (stale_ttl > 0) {
if (view->staleanswersok == dns_stale_answer_yes) {
result = true;
} else if (view->staleanswersok == dns_stale_answer_conf) {
result = view->staleanswersenable;
}
}
return (result);
}
void
dns_view_flushonshutdown(dns_view_t *view, bool flush) {
REQUIRE(DNS_VIEW_VALID(view));
view->flush = flush;
}