2
0
mirror of https://gitlab.isc.org/isc-projects/bind9 synced 2025-08-23 18:49:54 +00:00
bind/lib/dns/request.c

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

1066 lines
26 KiB
C
Raw Normal View History

2000-02-24 14:31:43 +00:00
/*
2012-02-07 23:47:24 +00:00
* Copyright (C) Internet Systems Consortium, Inc. ("ISC")
2000-02-24 14:31:43 +00:00
*
* SPDX-License-Identifier: MPL-2.0
*
2000-02-24 14:31:43 +00:00
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, you can obtain one at https://mozilla.org/MPL/2.0/.
*
2000-02-24 14:31:43 +00:00
* See the COPYRIGHT file distributed with this work for additional
* information regarding copyright ownership.
2000-02-24 14:31:43 +00:00
*/
/*! \file */
2000-06-22 22:00:42 +00:00
#include <inttypes.h>
#include <stdbool.h>
#include <isc/async.h>
#include <isc/log.h>
#include <isc/loop.h>
#include <isc/magic.h>
#include <isc/mem.h>
#include <isc/netmgr.h>
#include <isc/result.h>
#include <isc/thread.h>
#include <isc/tls.h>
2000-03-13 20:43:39 +00:00
#include <isc/util.h>
#include <dns/acl.h>
#include <dns/compress.h>
2000-03-13 20:43:39 +00:00
#include <dns/dispatch.h>
#include <dns/message.h>
#include <dns/rdata.h>
#include <dns/rdatastruct.h>
2000-02-24 14:31:43 +00:00
#include <dns/request.h>
#include <dns/transport.h>
#include <dns/tsig.h>
2000-02-24 14:31:43 +00:00
#define REQUESTMGR_MAGIC ISC_MAGIC('R', 'q', 'u', 'M')
#define VALID_REQUESTMGR(mgr) ISC_MAGIC_VALID(mgr, REQUESTMGR_MAGIC)
2000-02-24 14:31:43 +00:00
#define REQUEST_MAGIC ISC_MAGIC('R', 'q', 'u', '!')
#define VALID_REQUEST(request) ISC_MAGIC_VALID(request, REQUEST_MAGIC)
2000-02-24 14:31:43 +00:00
2000-03-20 12:22:02 +00:00
typedef ISC_LIST(dns_request_t) dns_requestlist_t;
2000-02-24 14:31:43 +00:00
struct dns_requestmgr {
unsigned int magic;
isc_mem_t *mctx;
isc_refcount_t references;
isc_loopmgr_t *loopmgr;
atomic_bool shuttingdown;
2000-02-24 14:31:43 +00:00
dns_dispatchmgr_t *dispatchmgr;
dns_dispatchset_t *dispatches4;
dns_dispatchset_t *dispatches6;
dns_requestlist_t *requests;
2000-02-24 14:31:43 +00:00
};
struct dns_request {
unsigned int magic;
isc_refcount_t references;
isc_mem_t *mctx;
int32_t flags;
isc_loop_t *loop;
unsigned int tid;
isc_result_t result;
isc_job_cb cb;
void *arg;
ISC_LINK(dns_request_t) link;
isc_buffer_t *query;
isc_buffer_t *answer;
dns_dispatch_t *dispatch;
dns_dispentry_t *dispentry;
dns_requestmgr_t *requestmgr;
isc_buffer_t *tsig;
dns_tsigkey_t *tsigkey;
isc_sockaddr_t destaddr;
unsigned int connect_timeout;
unsigned int timeout;
2002-11-12 23:58:14 +00:00
unsigned int udpcount;
2000-03-13 20:43:39 +00:00
};
2000-02-24 14:31:43 +00:00
#define DNS_REQUEST_F_CONNECTING (1 << 0)
#define DNS_REQUEST_F_SENDING (1 << 1)
#define DNS_REQUEST_F_COMPLETE (1 << 2)
#define DNS_REQUEST_F_TCP (1 << 3)
2000-03-20 12:22:02 +00:00
#define DNS_REQUEST_CONNECTING(r) (((r)->flags & DNS_REQUEST_F_CONNECTING) != 0)
2000-12-31 05:05:34 +00:00
#define DNS_REQUEST_SENDING(r) (((r)->flags & DNS_REQUEST_F_SENDING) != 0)
#define DNS_REQUEST_COMPLETE(r) (((r)->flags & DNS_REQUEST_F_COMPLETE) != 0)
2000-03-20 12:22:02 +00:00
2000-02-24 14:31:43 +00:00
/***
*** Forward
***/
static isc_result_t
req_render(dns_message_t *message, isc_buffer_t **buffer, unsigned int options,
isc_mem_t *mctx);
2000-03-20 12:22:02 +00:00
static void
req_response(isc_result_t result, isc_region_t *region, void *arg);
2000-03-20 12:22:02 +00:00
static void
req_senddone(isc_result_t eresult, isc_region_t *region, void *arg);
2000-03-20 12:22:02 +00:00
static void
req_cleanup(dns_request_t *request);
static void
2000-03-20 12:22:02 +00:00
req_sendevent(dns_request_t *request, isc_result_t result);
static void
req_connected(isc_result_t eresult, isc_region_t *region, void *arg);
2000-03-20 12:22:02 +00:00
static void
req_destroy(dns_request_t *request);
static void
req_log(int level, const char *fmt, ...) ISC_FORMAT_PRINTF(2, 3);
2000-02-24 14:31:43 +00:00
/***
*** Public
***/
isc_result_t
dns_requestmgr_create(isc_mem_t *mctx, isc_loopmgr_t *loopmgr,
dns_dispatchmgr_t *dispatchmgr,
2000-03-20 12:22:02 +00:00
dns_dispatch_t *dispatchv4, dns_dispatch_t *dispatchv6,
dns_requestmgr_t **requestmgrp) {
2000-02-24 14:31:43 +00:00
REQUIRE(requestmgrp != NULL && *requestmgrp == NULL);
REQUIRE(dispatchmgr != NULL);
2020-08-06 12:21:50 +10:00
req_log(ISC_LOG_DEBUG(3), "%s", __func__);
2000-02-24 14:31:43 +00:00
dns_requestmgr_t *requestmgr = isc_mem_get(mctx, sizeof(*requestmgr));
*requestmgr = (dns_requestmgr_t){
.magic = REQUESTMGR_MAGIC,
.loopmgr = loopmgr,
};
isc_mem_attach(mctx, &requestmgr->mctx);
2018-11-16 15:33:22 +01:00
uint32_t nloops = isc_loopmgr_nloops(requestmgr->loopmgr);
requestmgr->requests = isc_mem_cget(requestmgr->mctx, nloops,
sizeof(requestmgr->requests[0]));
for (size_t i = 0; i < nloops; i++) {
ISC_LIST_INIT(requestmgr->requests[i]);
/* unreferenced in requests_shutdown() */
isc_loop_ref(isc_loop_get(requestmgr->loopmgr, i));
}
dns_dispatchmgr_attach(dispatchmgr, &requestmgr->dispatchmgr);
2000-03-13 20:43:39 +00:00
if (dispatchv4 != NULL) {
dns_dispatchset_create(requestmgr->mctx, dispatchv4,
&requestmgr->dispatches4,
isc_loopmgr_nloops(requestmgr->loopmgr));
}
2000-02-24 14:31:43 +00:00
if (dispatchv6 != NULL) {
dns_dispatchset_create(requestmgr->mctx, dispatchv6,
&requestmgr->dispatches6,
isc_loopmgr_nloops(requestmgr->loopmgr));
}
isc_refcount_init(&requestmgr->references, 1);
req_log(ISC_LOG_DEBUG(3), "%s: %p", __func__, requestmgr);
*requestmgrp = requestmgr;
2000-02-24 14:31:43 +00:00
return ISC_R_SUCCESS;
}
static void
requests_shutdown(void *arg) {
dns_requestmgr_t *requestmgr = arg;
uint32_t tid = isc_tid();
ISC_LIST_FOREACH_SAFE (requestmgr->requests[tid], request, link) {
req_log(ISC_LOG_DEBUG(3), "%s(%" PRIu32 ": request %p",
__func__, tid, request);
if (DNS_REQUEST_COMPLETE(request)) {
/* The callback has been already scheduled */
continue;
}
req_sendevent(request, ISC_R_SHUTTINGDOWN);
}
isc_loop_unref(isc_loop_get(requestmgr->loopmgr, tid));
dns_requestmgr_detach(&requestmgr);
2000-02-24 14:31:43 +00:00
}
void
dns_requestmgr_shutdown(dns_requestmgr_t *requestmgr) {
bool first;
REQUIRE(VALID_REQUESTMGR(requestmgr));
req_log(ISC_LOG_DEBUG(3), "%s: %p", __func__, requestmgr);
2000-02-24 14:31:43 +00:00
rcu_read_lock();
first = atomic_compare_exchange_strong(&requestmgr->shuttingdown,
&(bool){ false }, true);
rcu_read_unlock();
if (!first) {
return;
}
/*
* Wait until all dns_request_create{raw}() are finished, so
* there will be no new requests added to the lists.
*/
synchronize_rcu();
2000-02-24 14:31:43 +00:00
uint32_t tid = isc_tid();
uint32_t nloops = isc_loopmgr_nloops(requestmgr->loopmgr);
for (size_t i = 0; i < nloops; i++) {
dns_requestmgr_ref(requestmgr);
if (i == tid) {
/* Run the current loop synchronously */
requests_shutdown(requestmgr);
continue;
}
2000-02-24 14:31:43 +00:00
isc_loop_t *loop = isc_loop_get(requestmgr->loopmgr, i);
isc_async_run(loop, requests_shutdown, requestmgr);
}
2000-02-24 14:31:43 +00:00
}
static void
requestmgr_destroy(dns_requestmgr_t *requestmgr) {
req_log(ISC_LOG_DEBUG(3), "%s", __func__);
INSIST(atomic_load(&requestmgr->shuttingdown));
size_t nloops = isc_loopmgr_nloops(requestmgr->loopmgr);
for (size_t i = 0; i < nloops; i++) {
INSIST(ISC_LIST_EMPTY(requestmgr->requests[i]));
}
2023-08-23 08:56:31 +02:00
isc_mem_cput(requestmgr->mctx, requestmgr->requests, nloops,
sizeof(requestmgr->requests[0]));
if (requestmgr->dispatches4 != NULL) {
dns_dispatchset_destroy(&requestmgr->dispatches4);
}
if (requestmgr->dispatches6 != NULL) {
dns_dispatchset_destroy(&requestmgr->dispatches6);
}
if (requestmgr->dispatchmgr != NULL) {
dns_dispatchmgr_detach(&requestmgr->dispatchmgr);
}
2000-03-13 20:43:39 +00:00
requestmgr->magic = 0;
isc_mem_putanddetach(&requestmgr->mctx, requestmgr,
sizeof(*requestmgr));
2000-02-24 14:31:43 +00:00
}
#if DNS_REQUEST_TRACE
ISC_REFCOUNT_TRACE_IMPL(dns_requestmgr, requestmgr_destroy);
#else
ISC_REFCOUNT_IMPL(dns_requestmgr, requestmgr_destroy);
#endif
static void
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
req_send(dns_request_t *request) {
isc_region_t r;
2000-03-20 12:22:02 +00:00
req_log(ISC_LOG_DEBUG(3), "%s: request %p", __func__, request);
2000-04-28 22:07:29 +00:00
REQUIRE(VALID_REQUEST(request));
isc_buffer_usedregion(request->query, &r);
Dispatch API simplification - Many dispatch attributes can be set implicitly instead of being passed in. we can infer whether to set DNS_DISPATCHATTR_TCP or _UDP from whether we're calling dns_dispatch_createtcp() or _createudp(). we can also infer DNS_DISPATCHATTR_IPV4 or _IPV6 from the addresses or the socket that were passed in. - We no longer use dup'd sockets in UDP dispatches, so the 'dup_socket' parameter has been removed from dns_dispatch_createudp(), along with the code implementing it. also removed isc_socket_dup() since it no longer has any callers. - The 'buffersize' parameter was ignored and has now been removed; buffersize is now fixed at 4096. - Maxbuffers and maxrequests don't need to be passed in on every call to dns_dispatch_createtcp() and _createudp(). In all current uses, the value for mgr->maxbuffers will either be raised once from its default of 20000 to 32768, or else left alone. (passing in a value lower than 20000 does not lower it.) there isn't enough difference between these values for there to be any need to configure this. The value for disp->maxrequests controls both the quota of concurrent requests for a dispatch and also the size of the dispatch socket memory pool. it's not clear that this quota is necessary at all. the memory pool size currently starts at 32768, but is sometimes lowered to 4096, which is definitely unnecessary. This commit sets both values permanently to 32768. - Previously TCP dispatches allocated their own separate QID table, which didn't incorporate a port table. this commit removes per-dispatch QID tables and shares the same table between all dispatches. since dispatches are created for each TCP socket, this may speed up the dispatch allocation process. there may be a slight increase in lock contention since all dispatches are sharing a single QID table, but since TCP sockets are used less often than UDP sockets (which were already sharing a QID table), it should not be a substantial change. - The dispatch port table was being used to determine whether a port was already in use; if so, then a UDP socket would be bound with REUSEADDR. this commit removes the port table, and always binds UDP sockets that way.
2020-12-17 00:43:00 -08:00
request->flags |= DNS_REQUEST_F_SENDING;
/* detached in req_senddone() */
dns_request_ref(request);
dns_dispatch_send(request->dispentry, &r);
2000-03-20 12:22:02 +00:00
}
static dns_request_t *
new_request(isc_mem_t *mctx, isc_loop_t *loop, isc_job_cb cb, void *arg,
bool tcp, unsigned int connect_timeout, unsigned int timeout,
unsigned int udptimeout, unsigned int udpretries) {
dns_request_t *request = isc_mem_get(mctx, sizeof(*request));
*request = (dns_request_t){
.magic = REQUEST_MAGIC,
.loop = loop,
.tid = isc_tid(),
.cb = cb,
.arg = arg,
.link = ISC_LINK_INITIALIZER,
.result = ISC_R_FAILURE,
.udpcount = udpretries + 1,
};
2001-01-23 19:50:10 +00:00
isc_refcount_init(&request->references, 1);
2001-01-23 19:50:10 +00:00
isc_mem_attach(mctx, &request->mctx);
if (tcp) {
request->connect_timeout = connect_timeout * 1000;
request->timeout = timeout * 1000;
} else {
if (udptimeout == 0) {
udptimeout = timeout / request->udpcount;
}
if (udptimeout == 0) {
udptimeout = 1;
}
request->timeout = udptimeout * 1000;
}
return request;
2001-01-23 19:50:10 +00:00
}
static bool
isblackholed(dns_dispatchmgr_t *dispatchmgr, const isc_sockaddr_t *destaddr) {
dns_acl_t *blackhole;
2001-01-23 07:36:06 +00:00
isc_netaddr_t netaddr;
char netaddrstr[ISC_NETADDR_FORMATSIZE];
int match;
isc_result_t result;
2001-01-23 07:36:06 +00:00
blackhole = dns_dispatchmgr_getblackhole(dispatchmgr);
if (blackhole == NULL) {
return false;
2001-01-23 07:36:06 +00:00
}
isc_netaddr_fromsockaddr(&netaddr, destaddr);
result = dns_acl_match(&netaddr, NULL, blackhole, NULL, &match, NULL);
if (result != ISC_R_SUCCESS || match <= 0) {
return false;
2001-01-23 07:36:06 +00:00
}
isc_netaddr_format(&netaddr, netaddrstr, sizeof(netaddrstr));
req_log(ISC_LOG_DEBUG(10), "blackholed address %s", netaddrstr);
return true;
2001-01-23 07:36:06 +00:00
}
static isc_result_t
Dispatch API simplification - Many dispatch attributes can be set implicitly instead of being passed in. we can infer whether to set DNS_DISPATCHATTR_TCP or _UDP from whether we're calling dns_dispatch_createtcp() or _createudp(). we can also infer DNS_DISPATCHATTR_IPV4 or _IPV6 from the addresses or the socket that were passed in. - We no longer use dup'd sockets in UDP dispatches, so the 'dup_socket' parameter has been removed from dns_dispatch_createudp(), along with the code implementing it. also removed isc_socket_dup() since it no longer has any callers. - The 'buffersize' parameter was ignored and has now been removed; buffersize is now fixed at 4096. - Maxbuffers and maxrequests don't need to be passed in on every call to dns_dispatch_createtcp() and _createudp(). In all current uses, the value for mgr->maxbuffers will either be raised once from its default of 20000 to 32768, or else left alone. (passing in a value lower than 20000 does not lower it.) there isn't enough difference between these values for there to be any need to configure this. The value for disp->maxrequests controls both the quota of concurrent requests for a dispatch and also the size of the dispatch socket memory pool. it's not clear that this quota is necessary at all. the memory pool size currently starts at 32768, but is sometimes lowered to 4096, which is definitely unnecessary. This commit sets both values permanently to 32768. - Previously TCP dispatches allocated their own separate QID table, which didn't incorporate a port table. this commit removes per-dispatch QID tables and shares the same table between all dispatches. since dispatches are created for each TCP socket, this may speed up the dispatch allocation process. there may be a slight increase in lock contention since all dispatches are sharing a single QID table, but since TCP sockets are used less often than UDP sockets (which were already sharing a QID table), it should not be a substantial change. - The dispatch port table was being used to determine whether a port was already in use; if so, then a UDP socket would be bound with REUSEADDR. this commit removes the port table, and always binds UDP sockets that way.
2020-12-17 00:43:00 -08:00
tcp_dispatch(bool newtcp, dns_requestmgr_t *requestmgr,
const isc_sockaddr_t *srcaddr, const isc_sockaddr_t *destaddr,
dns_transport_t *transport, dns_dispatch_t **dispatchp) {
2001-01-23 07:36:06 +00:00
isc_result_t result;
if (!newtcp) {
result = dns_dispatch_gettcp(requestmgr->dispatchmgr, destaddr,
srcaddr, transport, dispatchp);
if (result == ISC_R_SUCCESS) {
char peer[ISC_SOCKADDR_FORMATSIZE];
isc_sockaddr_format(destaddr, peer, sizeof(peer));
req_log(ISC_LOG_DEBUG(1),
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
"attached to TCP connection to %s", peer);
return result;
}
}
result = dns_dispatch_createtcp(requestmgr->dispatchmgr, srcaddr,
destaddr, transport, 0, dispatchp);
2001-01-23 07:36:06 +00:00
return result;
}
static isc_result_t
Dispatch API simplification - Many dispatch attributes can be set implicitly instead of being passed in. we can infer whether to set DNS_DISPATCHATTR_TCP or _UDP from whether we're calling dns_dispatch_createtcp() or _createudp(). we can also infer DNS_DISPATCHATTR_IPV4 or _IPV6 from the addresses or the socket that were passed in. - We no longer use dup'd sockets in UDP dispatches, so the 'dup_socket' parameter has been removed from dns_dispatch_createudp(), along with the code implementing it. also removed isc_socket_dup() since it no longer has any callers. - The 'buffersize' parameter was ignored and has now been removed; buffersize is now fixed at 4096. - Maxbuffers and maxrequests don't need to be passed in on every call to dns_dispatch_createtcp() and _createudp(). In all current uses, the value for mgr->maxbuffers will either be raised once from its default of 20000 to 32768, or else left alone. (passing in a value lower than 20000 does not lower it.) there isn't enough difference between these values for there to be any need to configure this. The value for disp->maxrequests controls both the quota of concurrent requests for a dispatch and also the size of the dispatch socket memory pool. it's not clear that this quota is necessary at all. the memory pool size currently starts at 32768, but is sometimes lowered to 4096, which is definitely unnecessary. This commit sets both values permanently to 32768. - Previously TCP dispatches allocated their own separate QID table, which didn't incorporate a port table. this commit removes per-dispatch QID tables and shares the same table between all dispatches. since dispatches are created for each TCP socket, this may speed up the dispatch allocation process. there may be a slight increase in lock contention since all dispatches are sharing a single QID table, but since TCP sockets are used less often than UDP sockets (which were already sharing a QID table), it should not be a substantial change. - The dispatch port table was being used to determine whether a port was already in use; if so, then a UDP socket would be bound with REUSEADDR. this commit removes the port table, and always binds UDP sockets that way.
2020-12-17 00:43:00 -08:00
udp_dispatch(dns_requestmgr_t *requestmgr, const isc_sockaddr_t *srcaddr,
const isc_sockaddr_t *destaddr, dns_dispatch_t **dispatchp) {
2001-01-23 07:36:06 +00:00
dns_dispatch_t *disp = NULL;
if (srcaddr == NULL) {
switch (isc_sockaddr_pf(destaddr)) {
case PF_INET:
disp = dns_dispatchset_get(requestmgr->dispatches4);
2001-01-23 07:36:06 +00:00
break;
case PF_INET6:
disp = dns_dispatchset_get(requestmgr->dispatches6);
2001-01-23 07:36:06 +00:00
break;
default:
return ISC_R_NOTIMPLEMENTED;
}
if (disp == NULL) {
return ISC_R_FAMILYNOSUPPORT;
}
2001-01-23 07:36:06 +00:00
dns_dispatch_attach(disp, dispatchp);
return ISC_R_SUCCESS;
}
return dns_dispatch_createudp(requestmgr->dispatchmgr, srcaddr,
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
dispatchp);
2001-01-23 07:36:06 +00:00
}
static isc_result_t
get_dispatch(bool tcp, bool newtcp, dns_requestmgr_t *requestmgr,
const isc_sockaddr_t *srcaddr, const isc_sockaddr_t *destaddr,
dns_transport_t *transport, dns_dispatch_t **dispatchp) {
2001-01-23 07:36:06 +00:00
isc_result_t result;
2001-01-23 07:36:06 +00:00
if (tcp) {
Dispatch API simplification - Many dispatch attributes can be set implicitly instead of being passed in. we can infer whether to set DNS_DISPATCHATTR_TCP or _UDP from whether we're calling dns_dispatch_createtcp() or _createudp(). we can also infer DNS_DISPATCHATTR_IPV4 or _IPV6 from the addresses or the socket that were passed in. - We no longer use dup'd sockets in UDP dispatches, so the 'dup_socket' parameter has been removed from dns_dispatch_createudp(), along with the code implementing it. also removed isc_socket_dup() since it no longer has any callers. - The 'buffersize' parameter was ignored and has now been removed; buffersize is now fixed at 4096. - Maxbuffers and maxrequests don't need to be passed in on every call to dns_dispatch_createtcp() and _createudp(). In all current uses, the value for mgr->maxbuffers will either be raised once from its default of 20000 to 32768, or else left alone. (passing in a value lower than 20000 does not lower it.) there isn't enough difference between these values for there to be any need to configure this. The value for disp->maxrequests controls both the quota of concurrent requests for a dispatch and also the size of the dispatch socket memory pool. it's not clear that this quota is necessary at all. the memory pool size currently starts at 32768, but is sometimes lowered to 4096, which is definitely unnecessary. This commit sets both values permanently to 32768. - Previously TCP dispatches allocated their own separate QID table, which didn't incorporate a port table. this commit removes per-dispatch QID tables and shares the same table between all dispatches. since dispatches are created for each TCP socket, this may speed up the dispatch allocation process. there may be a slight increase in lock contention since all dispatches are sharing a single QID table, but since TCP sockets are used less often than UDP sockets (which were already sharing a QID table), it should not be a substantial change. - The dispatch port table was being used to determine whether a port was already in use; if so, then a UDP socket would be bound with REUSEADDR. this commit removes the port table, and always binds UDP sockets that way.
2020-12-17 00:43:00 -08:00
result = tcp_dispatch(newtcp, requestmgr, srcaddr, destaddr,
transport, dispatchp);
2001-01-23 07:36:06 +00:00
} else {
Dispatch API simplification - Many dispatch attributes can be set implicitly instead of being passed in. we can infer whether to set DNS_DISPATCHATTR_TCP or _UDP from whether we're calling dns_dispatch_createtcp() or _createudp(). we can also infer DNS_DISPATCHATTR_IPV4 or _IPV6 from the addresses or the socket that were passed in. - We no longer use dup'd sockets in UDP dispatches, so the 'dup_socket' parameter has been removed from dns_dispatch_createudp(), along with the code implementing it. also removed isc_socket_dup() since it no longer has any callers. - The 'buffersize' parameter was ignored and has now been removed; buffersize is now fixed at 4096. - Maxbuffers and maxrequests don't need to be passed in on every call to dns_dispatch_createtcp() and _createudp(). In all current uses, the value for mgr->maxbuffers will either be raised once from its default of 20000 to 32768, or else left alone. (passing in a value lower than 20000 does not lower it.) there isn't enough difference between these values for there to be any need to configure this. The value for disp->maxrequests controls both the quota of concurrent requests for a dispatch and also the size of the dispatch socket memory pool. it's not clear that this quota is necessary at all. the memory pool size currently starts at 32768, but is sometimes lowered to 4096, which is definitely unnecessary. This commit sets both values permanently to 32768. - Previously TCP dispatches allocated their own separate QID table, which didn't incorporate a port table. this commit removes per-dispatch QID tables and shares the same table between all dispatches. since dispatches are created for each TCP socket, this may speed up the dispatch allocation process. there may be a slight increase in lock contention since all dispatches are sharing a single QID table, but since TCP sockets are used less often than UDP sockets (which were already sharing a QID table), it should not be a substantial change. - The dispatch port table was being used to determine whether a port was already in use; if so, then a UDP socket would be bound with REUSEADDR. this commit removes the port table, and always binds UDP sockets that way.
2020-12-17 00:43:00 -08:00
result = udp_dispatch(requestmgr, srcaddr, destaddr, dispatchp);
}
2001-01-23 07:36:06 +00:00
return result;
}
isc_result_t
dns_request_createraw(dns_requestmgr_t *requestmgr, isc_buffer_t *msgbuf,
const isc_sockaddr_t *srcaddr,
const isc_sockaddr_t *destaddr,
dns_transport_t *transport,
isc_tlsctx_cache_t *tlsctx_cache, unsigned int options,
unsigned int connect_timeout, unsigned int timeout,
unsigned int udptimeout, unsigned int udpretries,
isc_loop_t *loop, isc_job_cb cb, void *arg,
dns_request_t **requestp) {
dns_request_t *request = NULL;
isc_result_t result;
isc_mem_t *mctx = NULL;
dns_messageid_t id;
bool tcp = false;
bool newtcp = false;
isc_region_t r;
unsigned int dispopt = 0;
REQUIRE(VALID_REQUESTMGR(requestmgr));
REQUIRE(msgbuf != NULL);
REQUIRE(destaddr != NULL);
REQUIRE(loop != NULL);
REQUIRE(cb != NULL);
REQUIRE(requestp != NULL && *requestp == NULL);
REQUIRE(connect_timeout > 0 && timeout > 0);
REQUIRE(udpretries != UINT_MAX);
2008-06-23 23:47:11 +00:00
if (srcaddr != NULL) {
REQUIRE(isc_sockaddr_pf(srcaddr) == isc_sockaddr_pf(destaddr));
}
mctx = requestmgr->mctx;
req_log(ISC_LOG_DEBUG(3), "%s", __func__);
rcu_read_lock();
if (atomic_load_acquire(&requestmgr->shuttingdown)) {
result = ISC_R_SHUTTINGDOWN;
goto done;
}
if (isblackholed(requestmgr->dispatchmgr, destaddr)) {
result = DNS_R_BLACKHOLED;
goto done;
}
isc_buffer_usedregion(msgbuf, &r);
if (r.length < DNS_MESSAGE_HEADERLEN || r.length > 65535) {
result = DNS_R_FORMERR;
goto done;
}
2008-06-23 23:47:11 +00:00
if ((options & DNS_REQUESTOPT_TCP) != 0 || r.length > 512) {
tcp = true;
}
request = new_request(mctx, loop, cb, arg, tcp, connect_timeout,
timeout, udptimeout, udpretries);
isc_buffer_allocate(mctx, &request->query, r.length + (tcp ? 2 : 0));
result = isc_buffer_copyregion(request->query, &r);
if (result != ISC_R_SUCCESS) {
goto cleanup;
}
again:
result = get_dispatch(tcp, newtcp, requestmgr, srcaddr, destaddr,
transport, &request->dispatch);
2001-01-23 07:36:06 +00:00
if (result != ISC_R_SUCCESS) {
goto cleanup;
}
2001-01-23 07:36:06 +00:00
if ((options & DNS_REQUESTOPT_FIXEDID) != 0) {
id = (r.base[0] << 8) | r.base[1];
dispopt |= DNS_DISPATCHOPT_FIXEDID;
}
result = dns_dispatch_add(request->dispatch, loop, dispopt,
request->connect_timeout, request->timeout,
destaddr, transport, tlsctx_cache,
req_connected, req_senddone, req_response,
request, &id, &request->dispentry);
if (result != ISC_R_SUCCESS) {
if ((options & DNS_REQUESTOPT_FIXEDID) != 0 && !newtcp) {
dns_dispatch_detach(&request->dispatch);
newtcp = true;
goto again;
}
goto cleanup;
}
/* Add message ID. */
isc_buffer_usedregion(request->query, &r);
r.base[0] = (id >> 8) & 0xff;
r.base[1] = id & 0xff;
request->destaddr = *destaddr;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
request->flags |= DNS_REQUEST_F_CONNECTING;
if (tcp) {
request->flags |= DNS_REQUEST_F_TCP;
}
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
dns_requestmgr_attach(requestmgr, &request->requestmgr);
ISC_LIST_APPEND(requestmgr->requests[request->tid], request, link);
dns_request_ref(request); /* detached in req_connected() */
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
result = dns_dispatch_connect(request->dispentry);
if (result != ISC_R_SUCCESS) {
dns_request_unref(request);
goto cleanup;
}
req_log(ISC_LOG_DEBUG(3), "%s: request %p", __func__, request);
*requestp = request;
cleanup:
if (result != ISC_R_SUCCESS) {
req_cleanup(request);
dns_request_detach(&request);
req_log(ISC_LOG_DEBUG(3), "%s: failed %s", __func__,
isc_result_totext(result));
}
done:
rcu_read_unlock();
return result;
}
2000-02-24 14:31:43 +00:00
isc_result_t
dns_request_create(dns_requestmgr_t *requestmgr, dns_message_t *message,
const isc_sockaddr_t *srcaddr,
const isc_sockaddr_t *destaddr, dns_transport_t *transport,
isc_tlsctx_cache_t *tlsctx_cache, unsigned int options,
dns_tsigkey_t *key, unsigned int connect_timeout,
unsigned int timeout, unsigned int udptimeout,
unsigned int udpretries, isc_loop_t *loop, isc_job_cb cb,
void *arg, dns_request_t **requestp) {
2000-03-13 20:43:39 +00:00
dns_request_t *request = NULL;
isc_result_t result;
isc_mem_t *mctx = NULL;
2000-03-20 12:22:02 +00:00
dns_messageid_t id;
bool tcp = false;
2000-02-24 14:31:43 +00:00
REQUIRE(VALID_REQUESTMGR(requestmgr));
REQUIRE(message != NULL);
REQUIRE(destaddr != NULL);
REQUIRE(loop != NULL);
REQUIRE(cb != NULL);
2000-02-24 14:31:43 +00:00
REQUIRE(requestp != NULL && *requestp == NULL);
REQUIRE(connect_timeout > 0 && timeout > 0);
REQUIRE(udpretries != UINT_MAX);
2000-02-24 14:31:43 +00:00
if (srcaddr != NULL &&
2022-11-02 19:33:14 +01:00
isc_sockaddr_pf(srcaddr) != isc_sockaddr_pf(destaddr))
{
return ISC_R_FAMILYMISMATCH;
}
mctx = requestmgr->mctx;
req_log(ISC_LOG_DEBUG(3), "%s", __func__);
rcu_read_lock();
if (atomic_load_acquire(&requestmgr->shuttingdown)) {
result = ISC_R_SHUTTINGDOWN;
goto done;
}
2001-01-23 07:36:06 +00:00
if (isblackholed(requestmgr->dispatchmgr, destaddr)) {
result = DNS_R_BLACKHOLED;
goto done;
}
if ((options & DNS_REQUESTOPT_TCP) != 0) {
tcp = true;
}
2000-03-13 20:43:39 +00:00
request = new_request(mctx, loop, cb, arg, tcp, connect_timeout,
timeout, udptimeout, udpretries);
if (key != NULL) {
dns_tsigkey_attach(key, &request->tsigkey);
}
result = dns_message_settsigkey(message, request->tsigkey);
if (result != ISC_R_SUCCESS) {
goto cleanup;
}
again:
result = get_dispatch(tcp, false, requestmgr, srcaddr, destaddr,
transport, &request->dispatch);
2001-01-23 07:36:06 +00:00
if (result != ISC_R_SUCCESS) {
goto cleanup;
}
2001-01-23 07:36:06 +00:00
result = dns_dispatch_add(request->dispatch, loop, 0,
request->connect_timeout, request->timeout,
destaddr, transport, tlsctx_cache,
req_connected, req_senddone, req_response,
request, &id, &request->dispentry);
2000-03-13 20:43:39 +00:00
if (result != ISC_R_SUCCESS) {
goto cleanup;
}
2000-03-13 20:43:39 +00:00
2000-03-20 12:22:02 +00:00
message->id = id;
result = req_render(message, &request->query, options, mctx);
if (result == DNS_R_USETCP && !tcp) {
/* Try again using TCP. */
2000-03-20 12:22:02 +00:00
dns_message_renderreset(message);
dns_dispatch_done(&request->dispentry);
2000-03-20 12:22:02 +00:00
dns_dispatch_detach(&request->dispatch);
options |= DNS_REQUESTOPT_TCP;
tcp = true;
goto again;
} else if (result != ISC_R_SUCCESS) {
goto cleanup;
}
result = dns_message_getquerytsig(message, mctx, &request->tsig);
if (result != ISC_R_SUCCESS) {
goto cleanup;
}
request->destaddr = *destaddr;
request->flags |= DNS_REQUEST_F_CONNECTING;
if (tcp) {
request->flags |= DNS_REQUEST_F_TCP;
}
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
dns_requestmgr_attach(requestmgr, &request->requestmgr);
ISC_LIST_APPEND(requestmgr->requests[request->tid], request, link);
dns_request_ref(request); /* detached in req_connected() */
result = dns_dispatch_connect(request->dispentry);
if (result != ISC_R_SUCCESS) {
dns_request_unref(request);
goto cleanup;
2000-03-20 12:22:02 +00:00
}
req_log(ISC_LOG_DEBUG(3), "%s: request %p", __func__, request);
2000-03-20 12:22:02 +00:00
*requestp = request;
2000-03-13 20:43:39 +00:00
cleanup:
if (result != ISC_R_SUCCESS) {
req_cleanup(request);
dns_request_detach(&request);
req_log(ISC_LOG_DEBUG(3), "%s: failed %s", __func__,
isc_result_totext(result));
}
done:
rcu_read_unlock();
2000-03-13 20:43:39 +00:00
return result;
}
static isc_result_t
req_render(dns_message_t *message, isc_buffer_t **bufferp, unsigned int options,
isc_mem_t *mctx) {
2000-03-13 20:43:39 +00:00
isc_buffer_t *buf1 = NULL;
isc_buffer_t *buf2 = NULL;
isc_result_t result;
isc_region_t r;
dns_compress_t cctx;
unsigned int compflags;
2000-03-13 20:43:39 +00:00
REQUIRE(bufferp != NULL && *bufferp == NULL);
req_log(ISC_LOG_DEBUG(3), "%s", __func__);
2000-03-20 12:22:02 +00:00
2000-03-13 20:43:39 +00:00
/*
* Create buffer able to hold largest possible message.
*/
isc_buffer_allocate(mctx, &buf1, 65535);
2000-03-13 20:43:39 +00:00
compflags = 0;
if ((options & DNS_REQUESTOPT_LARGE) != 0) {
compflags |= DNS_COMPRESS_LARGE;
}
if ((options & DNS_REQUESTOPT_CASE) != 0) {
compflags |= DNS_COMPRESS_CASE;
}
dns_compress_init(&cctx, mctx, compflags);
2000-03-13 20:43:39 +00:00
/*
* Render message.
*/
result = dns_message_renderbegin(message, &cctx, buf1);
if (result != ISC_R_SUCCESS) {
2000-03-13 20:43:39 +00:00
goto cleanup;
}
2000-03-13 20:43:39 +00:00
result = dns_message_rendersection(message, DNS_SECTION_QUESTION, 0);
if (result != ISC_R_SUCCESS) {
2000-03-13 20:43:39 +00:00
goto cleanup;
}
2000-03-13 20:43:39 +00:00
result = dns_message_rendersection(message, DNS_SECTION_ANSWER, 0);
if (result != ISC_R_SUCCESS) {
2000-03-13 20:43:39 +00:00
goto cleanup;
}
2000-03-13 20:43:39 +00:00
result = dns_message_rendersection(message, DNS_SECTION_AUTHORITY, 0);
if (result != ISC_R_SUCCESS) {
2000-03-13 20:43:39 +00:00
goto cleanup;
}
2000-03-13 20:43:39 +00:00
result = dns_message_rendersection(message, DNS_SECTION_ADDITIONAL, 0);
if (result != ISC_R_SUCCESS) {
2000-03-13 20:43:39 +00:00
goto cleanup;
}
2000-03-13 20:43:39 +00:00
result = dns_message_renderend(message);
if (result != ISC_R_SUCCESS) {
2000-03-13 20:43:39 +00:00
goto cleanup;
}
2000-03-13 20:43:39 +00:00
/*
* Copy rendered message to exact sized buffer.
*/
isc_buffer_usedregion(buf1, &r);
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
if ((options & DNS_REQUESTOPT_TCP) == 0 && r.length > 512) {
result = DNS_R_USETCP;
goto cleanup;
}
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
isc_buffer_allocate(mctx, &buf2, r.length);
2000-03-13 20:43:39 +00:00
result = isc_buffer_copyregion(buf2, &r);
if (result != ISC_R_SUCCESS) {
goto cleanup;
}
2000-03-13 20:43:39 +00:00
/*
* Cleanup and return.
*/
dns_compress_invalidate(&cctx);
2000-03-13 20:43:39 +00:00
isc_buffer_free(&buf1);
*bufferp = buf2;
return ISC_R_SUCCESS;
cleanup:
2000-03-20 12:22:02 +00:00
dns_message_renderreset(message);
dns_compress_invalidate(&cctx);
2000-03-13 20:43:39 +00:00
if (buf1 != NULL) {
isc_buffer_free(&buf1);
}
2000-03-13 20:43:39 +00:00
if (buf2 != NULL) {
isc_buffer_free(&buf2);
}
2000-03-13 20:43:39 +00:00
return result;
2000-02-24 14:31:43 +00:00
}
static void
request_cancel(dns_request_t *request) {
2000-03-20 12:22:02 +00:00
REQUIRE(VALID_REQUEST(request));
REQUIRE(request->tid == isc_tid());
2000-03-13 20:43:39 +00:00
if (DNS_REQUEST_COMPLETE(request)) {
/* The request callback was already called */
return;
}
req_log(ISC_LOG_DEBUG(3), "%s: request %p", __func__, request);
req_sendevent(request, ISC_R_CANCELED); /* call asynchronously */
2000-02-24 14:31:43 +00:00
}
2000-03-13 20:43:39 +00:00
static void
req_cancel_cb(void *arg) {
dns_request_t *request = arg;
request_cancel(request);
dns_request_unref(request);
}
void
dns_request_cancel(dns_request_t *request) {
REQUIRE(VALID_REQUEST(request));
if (request->tid == isc_tid()) {
request_cancel(request);
} else {
dns_request_ref(request);
isc_async_run(request->loop, req_cancel_cb, request);
}
}
2000-02-24 14:31:43 +00:00
isc_result_t
dns_request_getresponse(dns_request_t *request, dns_message_t *message,
unsigned int options) {
isc_result_t result;
2000-03-13 20:43:39 +00:00
REQUIRE(VALID_REQUEST(request));
REQUIRE(request->tid == isc_tid());
2000-03-13 20:43:39 +00:00
REQUIRE(request->answer != NULL);
req_log(ISC_LOG_DEBUG(3), "%s: request %p", __func__, request);
dns_message_setquerytsig(message, request->tsig);
result = dns_message_settsigkey(message, request->tsigkey);
if (result != ISC_R_SUCCESS) {
return result;
}
result = dns_message_parse(message, request->answer, options);
if (result != ISC_R_SUCCESS) {
return result;
}
if (request->tsigkey != NULL) {
result = dns_tsig_verify(request->answer, message, NULL, NULL);
}
return result;
2000-02-24 14:31:43 +00:00
}
2019-07-20 14:35:59 -04:00
isc_buffer_t *
dns_request_getanswer(dns_request_t *request) {
REQUIRE(VALID_REQUEST(request));
REQUIRE(request->tid == isc_tid());
2019-07-20 14:35:59 -04:00
return request->answer;
}
bool
dns_request_usedtcp(dns_request_t *request) {
REQUIRE(VALID_REQUEST(request));
REQUIRE(request->tid == isc_tid());
return (request->flags & DNS_REQUEST_F_TCP) != 0;
}
2000-02-24 14:31:43 +00:00
void
dns_request_destroy(dns_request_t **requestp) {
2000-03-20 12:22:02 +00:00
REQUIRE(requestp != NULL && VALID_REQUEST(*requestp));
dns_request_t *request = *requestp;
*requestp = NULL;
req_log(ISC_LOG_DEBUG(3), "%s: request %p", __func__, request);
if (DNS_REQUEST_COMPLETE(request)) {
dns_request_cancel(request);
}
/* final detach to shut down request */
dns_request_detach(&request);
2000-02-24 14:31:43 +00:00
}
2000-03-20 12:22:02 +00:00
2000-03-13 20:43:39 +00:00
static void
req_connected(isc_result_t eresult, isc_region_t *region ISC_ATTR_UNUSED,
void *arg) {
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
dns_request_t *request = (dns_request_t *)arg;
REQUIRE(VALID_REQUEST(request));
REQUIRE(request->tid == isc_tid());
REQUIRE(DNS_REQUEST_CONNECTING(request));
2000-03-13 20:43:39 +00:00
req_log(ISC_LOG_DEBUG(3), "%s: request %p: %s", __func__, request,
isc_result_totext(eresult));
2000-03-20 12:22:02 +00:00
request->flags &= ~DNS_REQUEST_F_CONNECTING;
if (DNS_REQUEST_COMPLETE(request)) {
/* The request callback was already called */
goto detach;
}
if (eresult == ISC_R_SUCCESS) {
req_send(request);
2000-03-20 12:22:02 +00:00
} else {
req_sendevent(request, eresult);
2000-03-20 12:22:02 +00:00
}
detach:
/* attached in dns_request_create/_createraw() */
dns_request_unref(request);
2000-03-20 12:22:02 +00:00
}
static void
req_senddone(isc_result_t eresult, isc_region_t *region ISC_ATTR_UNUSED,
void *arg) {
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
dns_request_t *request = (dns_request_t *)arg;
2000-03-20 12:22:02 +00:00
2000-04-28 22:07:29 +00:00
REQUIRE(VALID_REQUEST(request));
REQUIRE(request->tid == isc_tid());
2000-12-31 05:05:34 +00:00
REQUIRE(DNS_REQUEST_SENDING(request));
2000-03-20 12:22:02 +00:00
req_log(ISC_LOG_DEBUG(3), "%s: request %p", __func__, request);
2000-12-31 05:05:34 +00:00
request->flags &= ~DNS_REQUEST_F_SENDING;
if (DNS_REQUEST_COMPLETE(request)) {
/* The request callback was already called */
goto detach;
2000-12-31 05:05:34 +00:00
}
2000-03-13 20:43:39 +00:00
if (eresult != ISC_R_SUCCESS) {
req_sendevent(request, eresult);
}
detach:
/* attached in req_send() */
dns_request_unref(request);
2000-03-13 20:43:39 +00:00
}
static void
req_response(isc_result_t eresult, isc_region_t *region, void *arg) {
dns_request_t *request = (dns_request_t *)arg;
if (eresult == ISC_R_CANCELED) {
return;
}
REQUIRE(VALID_REQUEST(request));
REQUIRE(request->tid == isc_tid());
req_log(ISC_LOG_DEBUG(3), "%s: request %p: %s", __func__, request,
isc_result_totext(eresult));
if (DNS_REQUEST_COMPLETE(request)) {
/* The request callback was already called */
return;
}
switch (eresult) {
case ISC_R_TIMEDOUT:
if (request->udpcount > 1 && !dns_request_usedtcp(request)) {
request->udpcount -= 1;
dns_dispatch_resume(request->dispentry,
request->timeout);
if (!DNS_REQUEST_SENDING(request)) {
req_send(request);
}
return;
}
break;
case ISC_R_SUCCESS:
/* Copy region to request. */
isc_buffer_allocate(request->mctx, &request->answer,
region->length);
eresult = isc_buffer_copyregion(request->answer, region);
if (eresult != ISC_R_SUCCESS) {
isc_buffer_free(&request->answer);
}
break;
default:
break;
}
2000-03-13 20:43:39 +00:00
req_sendevent(request, eresult);
}
static void
req_sendevent_cb(void *arg) {
dns_request_t *request = arg;
2000-03-13 20:43:39 +00:00
request->cb(request);
dns_request_unref(request);
}
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
static void
req_cleanup(dns_request_t *request) {
if (ISC_LINK_LINKED(request, link)) {
ISC_LIST_UNLINK(request->requestmgr->requests[request->tid],
request, link);
}
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
if (request->dispentry != NULL) {
dns_dispatch_done(&request->dispentry);
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
}
if (request->dispatch != NULL) {
dns_dispatch_detach(&request->dispatch);
}
2000-03-13 20:43:39 +00:00
}
2000-03-20 12:22:02 +00:00
static void
req_sendevent(dns_request_t *request, isc_result_t result) {
2000-04-28 22:07:29 +00:00
REQUIRE(VALID_REQUEST(request));
REQUIRE(request->tid == isc_tid());
REQUIRE(!DNS_REQUEST_COMPLETE(request));
request->flags |= DNS_REQUEST_F_COMPLETE;
req_cleanup(request);
req_log(ISC_LOG_DEBUG(3), "%s: request %p: %s", __func__, request,
isc_result_totext(result));
request->result = result;
2000-03-20 12:22:02 +00:00
/*
* Do not call request->cb directly as it introduces a dead lock
* between dns_zonemgr_shutdown and sendtoprimary in lib/dns/zone.c
* zone->lock.
*/
dns_request_ref(request);
isc_async_run(request->loop, req_sendevent_cb, request);
}
2000-03-20 12:22:02 +00:00
static void
req_destroy(dns_request_t *request) {
2000-04-28 22:07:29 +00:00
REQUIRE(VALID_REQUEST(request));
REQUIRE(request->tid == isc_tid());
REQUIRE(!ISC_LINK_LINKED(request, link));
req_log(ISC_LOG_DEBUG(3), "%s: request %p", __func__, request);
2000-03-20 12:22:02 +00:00
/*
* These should have been cleaned up before the
* completion event was sent.
*/
INSIST(!ISC_LINK_LINKED(request, link));
INSIST(request->dispentry == NULL);
INSIST(request->dispatch == NULL);
2000-03-20 12:22:02 +00:00
request->magic = 0;
if (request->query != NULL) {
isc_buffer_free(&request->query);
}
2000-03-20 12:22:02 +00:00
if (request->answer != NULL) {
isc_buffer_free(&request->answer);
}
if (request->tsig != NULL) {
isc_buffer_free(&request->tsig);
}
if (request->tsigkey != NULL) {
dns_tsigkey_detach(&request->tsigkey);
}
2001-01-23 19:50:10 +00:00
if (request->requestmgr != NULL) {
dns_requestmgr_detach(&request->requestmgr);
}
isc_mem_putanddetach(&request->mctx, request, sizeof(*request));
2000-03-20 12:22:02 +00:00
}
void *
dns_request_getarg(dns_request_t *request) {
REQUIRE(VALID_REQUEST(request));
REQUIRE(request->tid == isc_tid());
return request->arg;
}
isc_result_t
dns_request_getresult(dns_request_t *request) {
REQUIRE(VALID_REQUEST(request));
REQUIRE(request->tid == isc_tid());
return request->result;
}
#if DNS_REQUEST_TRACE
ISC_REFCOUNT_TRACE_IMPL(dns_request, req_destroy);
#else
ISC_REFCOUNT_IMPL(dns_request, req_destroy);
#endif
static void
req_log(int level, const char *fmt, ...) {
va_list ap;
va_start(ap, fmt);
isc_log_vwrite(DNS_LOGCATEGORY_GENERAL, DNS_LOGMODULE_REQUEST, level,
fmt, ap);
va_end(ap);
}