2
0
mirror of https://gitlab.isc.org/isc-projects/bind9 synced 2025-08-23 10:39:16 +00:00
bind/lib/dns/request.c

1218 lines
30 KiB
C
Raw Normal View History

2000-02-24 14:31:43 +00:00
/*
* Copyright (C) Internet Systems Consortium, Inc. ("ISC")
*
* SPDX-License-Identifier: MPL-2.0
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, you can obtain one at https://mozilla.org/MPL/2.0/.
*
* See the COPYRIGHT file distributed with this work for additional
* information regarding copyright ownership.
2000-02-24 14:31:43 +00:00
*/
/*! \file */
2000-06-22 22:00:42 +00:00
#include <inttypes.h>
#include <stdbool.h>
#include <isc/magic.h>
#include <isc/mem.h>
#include <isc/netmgr.h>
#include <isc/result.h>
2000-03-13 20:43:39 +00:00
#include <isc/task.h>
#include <isc/util.h>
#include <dns/acl.h>
#include <dns/compress.h>
2000-03-13 20:43:39 +00:00
#include <dns/dispatch.h>
#include <dns/events.h>
#include <dns/log.h>
2000-03-13 20:43:39 +00:00
#include <dns/message.h>
#include <dns/rdata.h>
#include <dns/rdatastruct.h>
2000-02-24 14:31:43 +00:00
#include <dns/request.h>
#include <dns/tsig.h>
2000-02-24 14:31:43 +00:00
2020-02-13 14:44:37 -08:00
#define REQUESTMGR_MAGIC ISC_MAGIC('R', 'q', 'u', 'M')
#define VALID_REQUESTMGR(mgr) ISC_MAGIC_VALID(mgr, REQUESTMGR_MAGIC)
2000-02-24 14:31:43 +00:00
2020-02-13 14:44:37 -08:00
#define REQUEST_MAGIC ISC_MAGIC('R', 'q', 'u', '!')
#define VALID_REQUEST(request) ISC_MAGIC_VALID(request, REQUEST_MAGIC)
2000-02-24 14:31:43 +00:00
2000-03-20 12:22:02 +00:00
typedef ISC_LIST(dns_request_t) dns_requestlist_t;
#define DNS_REQUEST_NLOCKS 7
2000-02-24 14:31:43 +00:00
struct dns_requestmgr {
unsigned int magic;
isc_refcount_t references;
2020-02-13 14:44:37 -08:00
isc_mutex_t lock;
isc_mem_t *mctx;
2000-02-24 14:31:43 +00:00
/* locked */
2020-02-13 14:44:37 -08:00
isc_taskmgr_t *taskmgr;
dns_dispatchmgr_t *dispatchmgr;
2020-02-13 14:44:37 -08:00
dns_dispatch_t *dispatchv4;
dns_dispatch_t *dispatchv6;
atomic_bool exiting;
2020-02-13 14:44:37 -08:00
isc_eventlist_t whenshutdown;
unsigned int hash;
isc_mutex_t locks[DNS_REQUEST_NLOCKS];
dns_requestlist_t requests;
2000-02-24 14:31:43 +00:00
};
struct dns_request {
unsigned int magic;
isc_refcount_t references;
unsigned int hash;
2020-02-13 14:44:37 -08:00
isc_mem_t *mctx;
int32_t flags;
ISC_LINK(dns_request_t) link;
2020-02-13 14:44:37 -08:00
isc_buffer_t *query;
isc_buffer_t *answer;
dns_requestevent_t *event;
2020-02-13 14:44:37 -08:00
dns_dispatch_t *dispatch;
dns_dispentry_t *dispentry;
dns_requestmgr_t *requestmgr;
isc_buffer_t *tsig;
dns_tsigkey_t *tsigkey;
isc_sockaddr_t destaddr;
unsigned int timeout;
2020-02-13 14:44:37 -08:00
unsigned int udpcount;
isc_dscp_t dscp;
2000-03-13 20:43:39 +00:00
};
2000-02-24 14:31:43 +00:00
2000-03-20 12:22:02 +00:00
#define DNS_REQUEST_F_CONNECTING 0x0001
2020-02-13 14:44:37 -08:00
#define DNS_REQUEST_F_SENDING 0x0002
#define DNS_REQUEST_F_CANCELED 0x0004
#define DNS_REQUEST_F_TCP 0x0010
2020-02-13 14:44:37 -08:00
#define DNS_REQUEST_CANCELED(r) (((r)->flags & DNS_REQUEST_F_CANCELED) != 0)
#define DNS_REQUEST_CONNECTING(r) (((r)->flags & DNS_REQUEST_F_CONNECTING) != 0)
2020-02-13 14:44:37 -08:00
#define DNS_REQUEST_SENDING(r) (((r)->flags & DNS_REQUEST_F_SENDING) != 0)
2000-03-20 12:22:02 +00:00
2000-02-24 14:31:43 +00:00
/***
*** Forward
***/
2020-02-14 08:14:03 +01:00
static void
mgr_destroy(dns_requestmgr_t *requestmgr);
static unsigned int
mgr_gethash(dns_requestmgr_t *requestmgr);
static void
send_shutdown_events(dns_requestmgr_t *requestmgr);
static isc_result_t
req_render(dns_message_t *message, isc_buffer_t **buffer, unsigned int options,
isc_mem_t *mctx);
static void
req_response(isc_result_t result, isc_region_t *region, void *arg);
2020-02-14 08:14:03 +01:00
static void
req_senddone(isc_result_t eresult, isc_region_t *region, void *arg);
2020-02-14 08:14:03 +01:00
static void
req_sendevent(dns_request_t *request, isc_result_t result);
static void
req_connected(isc_result_t eresult, isc_region_t *region, void *arg);
2020-02-14 08:14:03 +01:00
static void
req_attach(dns_request_t *source, dns_request_t **targetp);
static void
req_detach(dns_request_t **requestp);
static void
2020-02-14 08:14:03 +01:00
req_destroy(dns_request_t *request);
static void
req_log(int level, const char *fmt, ...) ISC_FORMAT_PRINTF(2, 3);
void
request_cancel(dns_request_t *request);
2000-02-24 14:31:43 +00:00
/***
*** Public
***/
isc_result_t
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
dns_requestmgr_create(isc_mem_t *mctx, isc_taskmgr_t *taskmgr,
dns_dispatchmgr_t *dispatchmgr,
dns_dispatch_t *dispatchv4, dns_dispatch_t *dispatchv6,
2020-02-13 14:44:37 -08:00
dns_requestmgr_t **requestmgrp) {
2000-02-24 14:31:43 +00:00
dns_requestmgr_t *requestmgr;
2020-02-13 14:44:37 -08:00
int i;
2000-02-24 14:31:43 +00:00
req_log(ISC_LOG_DEBUG(3), "dns_requestmgr_create");
2000-02-24 14:31:43 +00:00
REQUIRE(requestmgrp != NULL && *requestmgrp == NULL);
REQUIRE(taskmgr != NULL);
REQUIRE(dispatchmgr != NULL);
2020-08-06 12:21:50 +10:00
2000-02-24 14:31:43 +00:00
requestmgr = isc_mem_get(mctx, sizeof(*requestmgr));
*requestmgr = (dns_requestmgr_t){ 0 };
2000-02-24 14:31:43 +00:00
isc_taskmgr_attach(taskmgr, &requestmgr->taskmgr);
dns_dispatchmgr_attach(dispatchmgr, &requestmgr->dispatchmgr);
2018-11-16 15:33:22 +01:00
isc_mutex_init(&requestmgr->lock);
for (i = 0; i < DNS_REQUEST_NLOCKS; i++) {
2018-11-16 15:33:22 +01:00
isc_mutex_init(&requestmgr->locks[i]);
}
if (dispatchv4 != NULL) {
2000-03-13 20:43:39 +00:00
dns_dispatch_attach(dispatchv4, &requestmgr->dispatchv4);
}
if (dispatchv6 != NULL) {
2000-03-13 20:43:39 +00:00
dns_dispatch_attach(dispatchv6, &requestmgr->dispatchv6);
}
isc_mem_attach(mctx, &requestmgr->mctx);
isc_refcount_init(&requestmgr->references, 1);
2000-02-24 14:31:43 +00:00
ISC_LIST_INIT(requestmgr->whenshutdown);
2000-03-20 12:22:02 +00:00
ISC_LIST_INIT(requestmgr->requests);
atomic_init(&requestmgr->exiting, false);
2000-02-24 14:31:43 +00:00
requestmgr->magic = REQUESTMGR_MAGIC;
req_log(ISC_LOG_DEBUG(3), "dns_requestmgr_create: %p", requestmgr);
*requestmgrp = requestmgr;
2000-02-24 14:31:43 +00:00
return (ISC_R_SUCCESS);
}
void
dns_requestmgr_whenshutdown(dns_requestmgr_t *requestmgr, isc_task_t *task,
2020-02-13 14:44:37 -08:00
isc_event_t **eventp) {
isc_task_t *tclone;
2000-12-11 19:24:30 +00:00
isc_event_t *event;
2000-02-24 14:31:43 +00:00
req_log(ISC_LOG_DEBUG(3), "dns_requestmgr_whenshutdown");
2000-12-11 19:24:30 +00:00
REQUIRE(VALID_REQUESTMGR(requestmgr));
REQUIRE(eventp != NULL);
event = *eventp;
*eventp = NULL;
LOCK(&requestmgr->lock);
if (atomic_load_acquire(&requestmgr->exiting)) {
2000-12-11 19:24:30 +00:00
/*
* We're already shutdown. Send the event.
*/
event->ev_sender = requestmgr;
isc_task_send(task, &event);
} else {
tclone = NULL;
isc_task_attach(task, &tclone);
event->ev_sender = tclone;
2000-12-11 19:24:30 +00:00
ISC_LIST_APPEND(requestmgr->whenshutdown, event, ev_link);
2000-02-24 14:31:43 +00:00
}
UNLOCK(&requestmgr->lock);
}
void
2020-02-13 14:44:37 -08:00
dns_requestmgr_shutdown(dns_requestmgr_t *requestmgr) {
dns_request_t *request;
2000-12-11 19:24:30 +00:00
REQUIRE(VALID_REQUESTMGR(requestmgr));
2000-02-24 14:31:43 +00:00
req_log(ISC_LOG_DEBUG(3), "dns_requestmgr_shutdown: %p", requestmgr);
if (!atomic_compare_exchange_strong(&requestmgr->exiting,
&(bool){ false }, true))
{
return;
}
LOCK(&requestmgr->lock);
for (request = ISC_LIST_HEAD(requestmgr->requests); request != NULL;
request = ISC_LIST_NEXT(request, link))
{
dns_request_cancel(request);
}
if (ISC_LIST_EMPTY(requestmgr->requests)) {
2000-02-24 14:31:43 +00:00
send_shutdown_events(requestmgr);
}
UNLOCK(&requestmgr->lock);
2000-02-24 14:31:43 +00:00
}
void
dns_requestmgr_attach(dns_requestmgr_t *source, dns_requestmgr_t **targetp) {
uint_fast32_t ref;
2000-12-11 19:24:30 +00:00
REQUIRE(VALID_REQUESTMGR(source));
REQUIRE(targetp != NULL && *targetp == NULL);
2000-02-24 14:31:43 +00:00
REQUIRE(!atomic_load_acquire(&source->exiting));
ref = isc_refcount_increment(&source->references);
req_log(ISC_LOG_DEBUG(3),
"dns_requestmgr_attach: %p: references = %" PRIuFAST32, source,
ref + 1);
*targetp = source;
2000-02-24 14:31:43 +00:00
}
void
dns_requestmgr_detach(dns_requestmgr_t **requestmgrp) {
dns_requestmgr_t *requestmgr = NULL;
uint_fast32_t ref;
REQUIRE(requestmgrp != NULL && VALID_REQUESTMGR(*requestmgrp));
2000-02-24 14:31:43 +00:00
2000-03-13 20:43:39 +00:00
requestmgr = *requestmgrp;
*requestmgrp = NULL;
2000-02-24 14:31:43 +00:00
ref = isc_refcount_decrement(&requestmgr->references);
req_log(ISC_LOG_DEBUG(3),
"dns_requestmgr_detach: %p: references = %" PRIuFAST32,
requestmgr, ref - 1);
2000-02-24 14:31:43 +00:00
if (ref == 1) {
INSIST(ISC_LIST_EMPTY(requestmgr->requests));
2000-03-20 12:22:02 +00:00
mgr_destroy(requestmgr);
}
2000-02-24 14:31:43 +00:00
}
/* FIXME */
2000-02-24 14:31:43 +00:00
static void
2020-02-13 14:44:37 -08:00
send_shutdown_events(dns_requestmgr_t *requestmgr) {
2000-02-24 14:31:43 +00:00
isc_event_t *event, *next_event;
2020-02-13 14:44:37 -08:00
isc_task_t *etask;
2000-02-24 14:31:43 +00:00
req_log(ISC_LOG_DEBUG(3), "send_shutdown_events: %p", requestmgr);
2000-02-24 14:31:43 +00:00
/*
* Caller must be holding the manager lock.
*/
for (event = ISC_LIST_HEAD(requestmgr->whenshutdown); event != NULL;
2020-02-13 14:44:37 -08:00
event = next_event)
{
next_event = ISC_LIST_NEXT(event, ev_link);
ISC_LIST_UNLINK(requestmgr->whenshutdown, event, ev_link);
etask = event->ev_sender;
event->ev_sender = requestmgr;
2000-02-24 14:31:43 +00:00
isc_task_sendanddetach(&etask, &event);
}
}
static void
2020-02-13 14:44:37 -08:00
mgr_destroy(dns_requestmgr_t *requestmgr) {
int i;
req_log(ISC_LOG_DEBUG(3), "mgr_destroy");
isc_refcount_destroy(&requestmgr->references);
2000-03-20 12:22:02 +00:00
isc_mutex_destroy(&requestmgr->lock);
for (i = 0; i < DNS_REQUEST_NLOCKS; i++) {
isc_mutex_destroy(&requestmgr->locks[i]);
}
if (requestmgr->dispatchv4 != NULL) {
2000-02-24 14:31:43 +00:00
dns_dispatch_detach(&requestmgr->dispatchv4);
}
if (requestmgr->dispatchv6 != NULL) {
dns_dispatch_detach(&requestmgr->dispatchv6);
}
if (requestmgr->dispatchmgr != NULL) {
dns_dispatchmgr_detach(&requestmgr->dispatchmgr);
}
if (requestmgr->taskmgr != NULL) {
isc_taskmgr_detach(&requestmgr->taskmgr);
}
2000-03-13 20:43:39 +00:00
requestmgr->magic = 0;
isc_mem_putanddetach(&requestmgr->mctx, requestmgr,
sizeof(*requestmgr));
2000-02-24 14:31:43 +00:00
}
static unsigned int
2020-02-13 14:44:37 -08:00
mgr_gethash(dns_requestmgr_t *requestmgr) {
req_log(ISC_LOG_DEBUG(3), "mgr_gethash");
/*
* Locked by caller.
*/
requestmgr->hash++;
2001-01-23 02:00:56 +00:00
return (requestmgr->hash % DNS_REQUEST_NLOCKS);
}
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
static inline void
req_send(dns_request_t *request) {
isc_region_t r;
2000-03-20 12:22:02 +00:00
req_log(ISC_LOG_DEBUG(3), "req_send: request %p", request);
2000-04-28 22:07:29 +00:00
REQUIRE(VALID_REQUEST(request));
isc_buffer_usedregion(request->query, &r);
Dispatch API simplification - Many dispatch attributes can be set implicitly instead of being passed in. we can infer whether to set DNS_DISPATCHATTR_TCP or _UDP from whether we're calling dns_dispatch_createtcp() or _createudp(). we can also infer DNS_DISPATCHATTR_IPV4 or _IPV6 from the addresses or the socket that were passed in. - We no longer use dup'd sockets in UDP dispatches, so the 'dup_socket' parameter has been removed from dns_dispatch_createudp(), along with the code implementing it. also removed isc_socket_dup() since it no longer has any callers. - The 'buffersize' parameter was ignored and has now been removed; buffersize is now fixed at 4096. - Maxbuffers and maxrequests don't need to be passed in on every call to dns_dispatch_createtcp() and _createudp(). In all current uses, the value for mgr->maxbuffers will either be raised once from its default of 20000 to 32768, or else left alone. (passing in a value lower than 20000 does not lower it.) there isn't enough difference between these values for there to be any need to configure this. The value for disp->maxrequests controls both the quota of concurrent requests for a dispatch and also the size of the dispatch socket memory pool. it's not clear that this quota is necessary at all. the memory pool size currently starts at 32768, but is sometimes lowered to 4096, which is definitely unnecessary. This commit sets both values permanently to 32768. - Previously TCP dispatches allocated their own separate QID table, which didn't incorporate a port table. this commit removes per-dispatch QID tables and shares the same table between all dispatches. since dispatches are created for each TCP socket, this may speed up the dispatch allocation process. there may be a slight increase in lock contention since all dispatches are sharing a single QID table, but since TCP sockets are used less often than UDP sockets (which were already sharing a QID table), it should not be a substantial change. - The dispatch port table was being used to determine whether a port was already in use; if so, then a UDP socket would be bound with REUSEADDR. this commit removes the port table, and always binds UDP sockets that way.
2020-12-17 00:43:00 -08:00
request->flags |= DNS_REQUEST_F_SENDING;
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
dns_dispatch_send(request->dispentry, &r, request->dscp);
2000-03-20 12:22:02 +00:00
}
2001-01-23 19:50:10 +00:00
static isc_result_t
2020-02-13 14:44:37 -08:00
new_request(isc_mem_t *mctx, dns_request_t **requestp) {
dns_request_t *request = NULL;
2001-01-23 19:50:10 +00:00
request = isc_mem_get(mctx, sizeof(*request));
Dispatch API simplification - Many dispatch attributes can be set implicitly instead of being passed in. we can infer whether to set DNS_DISPATCHATTR_TCP or _UDP from whether we're calling dns_dispatch_createtcp() or _createudp(). we can also infer DNS_DISPATCHATTR_IPV4 or _IPV6 from the addresses or the socket that were passed in. - We no longer use dup'd sockets in UDP dispatches, so the 'dup_socket' parameter has been removed from dns_dispatch_createudp(), along with the code implementing it. also removed isc_socket_dup() since it no longer has any callers. - The 'buffersize' parameter was ignored and has now been removed; buffersize is now fixed at 4096. - Maxbuffers and maxrequests don't need to be passed in on every call to dns_dispatch_createtcp() and _createudp(). In all current uses, the value for mgr->maxbuffers will either be raised once from its default of 20000 to 32768, or else left alone. (passing in a value lower than 20000 does not lower it.) there isn't enough difference between these values for there to be any need to configure this. The value for disp->maxrequests controls both the quota of concurrent requests for a dispatch and also the size of the dispatch socket memory pool. it's not clear that this quota is necessary at all. the memory pool size currently starts at 32768, but is sometimes lowered to 4096, which is definitely unnecessary. This commit sets both values permanently to 32768. - Previously TCP dispatches allocated their own separate QID table, which didn't incorporate a port table. this commit removes per-dispatch QID tables and shares the same table between all dispatches. since dispatches are created for each TCP socket, this may speed up the dispatch allocation process. there may be a slight increase in lock contention since all dispatches are sharing a single QID table, but since TCP sockets are used less often than UDP sockets (which were already sharing a QID table), it should not be a substantial change. - The dispatch port table was being used to determine whether a port was already in use; if so, then a UDP socket would be bound with REUSEADDR. this commit removes the port table, and always binds UDP sockets that way.
2020-12-17 00:43:00 -08:00
*request = (dns_request_t){ .dscp = -1 };
2001-01-23 19:50:10 +00:00
ISC_LINK_INIT(request, link);
isc_refcount_init(&request->references, 1);
2001-01-23 19:50:10 +00:00
isc_mem_attach(mctx, &request->mctx);
request->magic = REQUEST_MAGIC;
2001-01-23 19:50:10 +00:00
*requestp = request;
return (ISC_R_SUCCESS);
}
static bool
2020-02-13 14:44:37 -08:00
isblackholed(dns_dispatchmgr_t *dispatchmgr, const isc_sockaddr_t *destaddr) {
dns_acl_t *blackhole;
2001-01-23 07:36:06 +00:00
isc_netaddr_t netaddr;
2020-02-13 14:44:37 -08:00
char netaddrstr[ISC_NETADDR_FORMATSIZE];
int match;
isc_result_t result;
2001-01-23 07:36:06 +00:00
blackhole = dns_dispatchmgr_getblackhole(dispatchmgr);
if (blackhole == NULL) {
return (false);
2001-01-23 07:36:06 +00:00
}
isc_netaddr_fromsockaddr(&netaddr, destaddr);
result = dns_acl_match(&netaddr, NULL, blackhole, NULL, &match, NULL);
if (result != ISC_R_SUCCESS || match <= 0) {
return (false);
2001-01-23 07:36:06 +00:00
}
isc_netaddr_format(&netaddr, netaddrstr, sizeof(netaddrstr));
req_log(ISC_LOG_DEBUG(10), "blackholed address %s", netaddrstr);
return (true);
2001-01-23 07:36:06 +00:00
}
static isc_result_t
Dispatch API simplification - Many dispatch attributes can be set implicitly instead of being passed in. we can infer whether to set DNS_DISPATCHATTR_TCP or _UDP from whether we're calling dns_dispatch_createtcp() or _createudp(). we can also infer DNS_DISPATCHATTR_IPV4 or _IPV6 from the addresses or the socket that were passed in. - We no longer use dup'd sockets in UDP dispatches, so the 'dup_socket' parameter has been removed from dns_dispatch_createudp(), along with the code implementing it. also removed isc_socket_dup() since it no longer has any callers. - The 'buffersize' parameter was ignored and has now been removed; buffersize is now fixed at 4096. - Maxbuffers and maxrequests don't need to be passed in on every call to dns_dispatch_createtcp() and _createudp(). In all current uses, the value for mgr->maxbuffers will either be raised once from its default of 20000 to 32768, or else left alone. (passing in a value lower than 20000 does not lower it.) there isn't enough difference between these values for there to be any need to configure this. The value for disp->maxrequests controls both the quota of concurrent requests for a dispatch and also the size of the dispatch socket memory pool. it's not clear that this quota is necessary at all. the memory pool size currently starts at 32768, but is sometimes lowered to 4096, which is definitely unnecessary. This commit sets both values permanently to 32768. - Previously TCP dispatches allocated their own separate QID table, which didn't incorporate a port table. this commit removes per-dispatch QID tables and shares the same table between all dispatches. since dispatches are created for each TCP socket, this may speed up the dispatch allocation process. there may be a slight increase in lock contention since all dispatches are sharing a single QID table, but since TCP sockets are used less often than UDP sockets (which were already sharing a QID table), it should not be a substantial change. - The dispatch port table was being used to determine whether a port was already in use; if so, then a UDP socket would be bound with REUSEADDR. this commit removes the port table, and always binds UDP sockets that way.
2020-12-17 00:43:00 -08:00
tcp_dispatch(bool newtcp, dns_requestmgr_t *requestmgr,
const isc_sockaddr_t *srcaddr, const isc_sockaddr_t *destaddr,
isc_dscp_t dscp, bool *connected, dns_dispatch_t **dispatchp) {
2020-02-13 14:44:37 -08:00
isc_result_t result;
2001-01-23 07:36:06 +00:00
if (!newtcp) {
result = dns_dispatch_gettcp(requestmgr->dispatchmgr, destaddr,
srcaddr, connected, dispatchp);
if (result == ISC_R_SUCCESS) {
char peer[ISC_SOCKADDR_FORMATSIZE];
isc_sockaddr_format(destaddr, peer, sizeof(peer));
req_log(ISC_LOG_DEBUG(1),
"attached to %s TCP "
"connection to %s",
*connected ? "existing" : "pending", peer);
return (result);
}
}
result = dns_dispatch_createtcp(requestmgr->dispatchmgr, srcaddr,
destaddr, dscp, dispatchp);
2001-01-23 07:36:06 +00:00
return (result);
}
static isc_result_t
Dispatch API simplification - Many dispatch attributes can be set implicitly instead of being passed in. we can infer whether to set DNS_DISPATCHATTR_TCP or _UDP from whether we're calling dns_dispatch_createtcp() or _createudp(). we can also infer DNS_DISPATCHATTR_IPV4 or _IPV6 from the addresses or the socket that were passed in. - We no longer use dup'd sockets in UDP dispatches, so the 'dup_socket' parameter has been removed from dns_dispatch_createudp(), along with the code implementing it. also removed isc_socket_dup() since it no longer has any callers. - The 'buffersize' parameter was ignored and has now been removed; buffersize is now fixed at 4096. - Maxbuffers and maxrequests don't need to be passed in on every call to dns_dispatch_createtcp() and _createudp(). In all current uses, the value for mgr->maxbuffers will either be raised once from its default of 20000 to 32768, or else left alone. (passing in a value lower than 20000 does not lower it.) there isn't enough difference between these values for there to be any need to configure this. The value for disp->maxrequests controls both the quota of concurrent requests for a dispatch and also the size of the dispatch socket memory pool. it's not clear that this quota is necessary at all. the memory pool size currently starts at 32768, but is sometimes lowered to 4096, which is definitely unnecessary. This commit sets both values permanently to 32768. - Previously TCP dispatches allocated their own separate QID table, which didn't incorporate a port table. this commit removes per-dispatch QID tables and shares the same table between all dispatches. since dispatches are created for each TCP socket, this may speed up the dispatch allocation process. there may be a slight increase in lock contention since all dispatches are sharing a single QID table, but since TCP sockets are used less often than UDP sockets (which were already sharing a QID table), it should not be a substantial change. - The dispatch port table was being used to determine whether a port was already in use; if so, then a UDP socket would be bound with REUSEADDR. this commit removes the port table, and always binds UDP sockets that way.
2020-12-17 00:43:00 -08:00
udp_dispatch(dns_requestmgr_t *requestmgr, const isc_sockaddr_t *srcaddr,
const isc_sockaddr_t *destaddr, dns_dispatch_t **dispatchp) {
2001-01-23 07:36:06 +00:00
dns_dispatch_t *disp = NULL;
if (srcaddr == NULL) {
switch (isc_sockaddr_pf(destaddr)) {
case PF_INET:
disp = requestmgr->dispatchv4;
break;
case PF_INET6:
disp = requestmgr->dispatchv6;
break;
default:
return (ISC_R_NOTIMPLEMENTED);
}
if (disp == NULL) {
2001-01-23 07:36:06 +00:00
return (ISC_R_FAMILYNOSUPPORT);
}
2001-01-23 07:36:06 +00:00
dns_dispatch_attach(disp, dispatchp);
return (ISC_R_SUCCESS);
}
return (dns_dispatch_createudp(requestmgr->dispatchmgr, srcaddr,
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
dispatchp));
2001-01-23 07:36:06 +00:00
}
static isc_result_t
get_dispatch(bool tcp, bool newtcp, dns_requestmgr_t *requestmgr,
const isc_sockaddr_t *srcaddr, const isc_sockaddr_t *destaddr,
2020-02-13 14:44:37 -08:00
isc_dscp_t dscp, bool *connected, dns_dispatch_t **dispatchp) {
2001-01-23 07:36:06 +00:00
isc_result_t result;
if (tcp) {
Dispatch API simplification - Many dispatch attributes can be set implicitly instead of being passed in. we can infer whether to set DNS_DISPATCHATTR_TCP or _UDP from whether we're calling dns_dispatch_createtcp() or _createudp(). we can also infer DNS_DISPATCHATTR_IPV4 or _IPV6 from the addresses or the socket that were passed in. - We no longer use dup'd sockets in UDP dispatches, so the 'dup_socket' parameter has been removed from dns_dispatch_createudp(), along with the code implementing it. also removed isc_socket_dup() since it no longer has any callers. - The 'buffersize' parameter was ignored and has now been removed; buffersize is now fixed at 4096. - Maxbuffers and maxrequests don't need to be passed in on every call to dns_dispatch_createtcp() and _createudp(). In all current uses, the value for mgr->maxbuffers will either be raised once from its default of 20000 to 32768, or else left alone. (passing in a value lower than 20000 does not lower it.) there isn't enough difference between these values for there to be any need to configure this. The value for disp->maxrequests controls both the quota of concurrent requests for a dispatch and also the size of the dispatch socket memory pool. it's not clear that this quota is necessary at all. the memory pool size currently starts at 32768, but is sometimes lowered to 4096, which is definitely unnecessary. This commit sets both values permanently to 32768. - Previously TCP dispatches allocated their own separate QID table, which didn't incorporate a port table. this commit removes per-dispatch QID tables and shares the same table between all dispatches. since dispatches are created for each TCP socket, this may speed up the dispatch allocation process. there may be a slight increase in lock contention since all dispatches are sharing a single QID table, but since TCP sockets are used less often than UDP sockets (which were already sharing a QID table), it should not be a substantial change. - The dispatch port table was being used to determine whether a port was already in use; if so, then a UDP socket would be bound with REUSEADDR. this commit removes the port table, and always binds UDP sockets that way.
2020-12-17 00:43:00 -08:00
result = tcp_dispatch(newtcp, requestmgr, srcaddr, destaddr,
dscp, connected, dispatchp);
} else {
Dispatch API simplification - Many dispatch attributes can be set implicitly instead of being passed in. we can infer whether to set DNS_DISPATCHATTR_TCP or _UDP from whether we're calling dns_dispatch_createtcp() or _createudp(). we can also infer DNS_DISPATCHATTR_IPV4 or _IPV6 from the addresses or the socket that were passed in. - We no longer use dup'd sockets in UDP dispatches, so the 'dup_socket' parameter has been removed from dns_dispatch_createudp(), along with the code implementing it. also removed isc_socket_dup() since it no longer has any callers. - The 'buffersize' parameter was ignored and has now been removed; buffersize is now fixed at 4096. - Maxbuffers and maxrequests don't need to be passed in on every call to dns_dispatch_createtcp() and _createudp(). In all current uses, the value for mgr->maxbuffers will either be raised once from its default of 20000 to 32768, or else left alone. (passing in a value lower than 20000 does not lower it.) there isn't enough difference between these values for there to be any need to configure this. The value for disp->maxrequests controls both the quota of concurrent requests for a dispatch and also the size of the dispatch socket memory pool. it's not clear that this quota is necessary at all. the memory pool size currently starts at 32768, but is sometimes lowered to 4096, which is definitely unnecessary. This commit sets both values permanently to 32768. - Previously TCP dispatches allocated their own separate QID table, which didn't incorporate a port table. this commit removes per-dispatch QID tables and shares the same table between all dispatches. since dispatches are created for each TCP socket, this may speed up the dispatch allocation process. there may be a slight increase in lock contention since all dispatches are sharing a single QID table, but since TCP sockets are used less often than UDP sockets (which were already sharing a QID table), it should not be a substantial change. - The dispatch port table was being used to determine whether a port was already in use; if so, then a UDP socket would be bound with REUSEADDR. this commit removes the port table, and always binds UDP sockets that way.
2020-12-17 00:43:00 -08:00
result = udp_dispatch(requestmgr, srcaddr, destaddr, dispatchp);
}
2001-01-23 07:36:06 +00:00
return (result);
}
isc_result_t
dns_request_createraw(dns_requestmgr_t *requestmgr, isc_buffer_t *msgbuf,
const isc_sockaddr_t *srcaddr,
const isc_sockaddr_t *destaddr, isc_dscp_t dscp,
unsigned int options, unsigned int timeout,
unsigned int udptimeout, unsigned int udpretries,
isc_task_t *task, isc_taskaction_t action, void *arg,
2020-02-13 14:44:37 -08:00
dns_request_t **requestp) {
dns_request_t *request = NULL;
isc_task_t *tclone = NULL;
dns_request_t *rclone = NULL;
2020-02-13 14:44:37 -08:00
isc_result_t result;
isc_mem_t *mctx = NULL;
dns_messageid_t id;
2020-02-13 14:44:37 -08:00
bool tcp = false;
bool newtcp = false;
isc_region_t r;
bool connected = false;
unsigned int dispopt = 0;
REQUIRE(VALID_REQUESTMGR(requestmgr));
REQUIRE(msgbuf != NULL);
REQUIRE(destaddr != NULL);
REQUIRE(task != NULL);
REQUIRE(action != NULL);
REQUIRE(requestp != NULL && *requestp == NULL);
REQUIRE(timeout > 0);
if (srcaddr != NULL) {
REQUIRE(isc_sockaddr_pf(srcaddr) == isc_sockaddr_pf(destaddr));
}
mctx = requestmgr->mctx;
req_log(ISC_LOG_DEBUG(3), "dns_request_createraw");
if (atomic_load_acquire(&requestmgr->exiting)) {
return (ISC_R_SHUTTINGDOWN);
}
if (isblackholed(requestmgr->dispatchmgr, destaddr)) {
2001-01-23 07:36:06 +00:00
return (DNS_R_BLACKHOLED);
}
2001-01-23 19:50:10 +00:00
result = new_request(mctx, &request);
if (result != ISC_R_SUCCESS) {
2001-01-23 19:50:10 +00:00
return (result);
}
request->udpcount = udpretries;
request->dscp = dscp;
2002-11-12 23:58:14 +00:00
request->event = (dns_requestevent_t *)isc_event_allocate(
mctx, task, DNS_EVENT_REQUESTDONE, action, arg,
sizeof(dns_requestevent_t));
isc_task_attach(task, &tclone);
request->event->ev_sender = task;
request->event->request = request;
request->event->result = ISC_R_FAILURE;
isc_buffer_usedregion(msgbuf, &r);
if (r.length < DNS_MESSAGE_HEADERLEN || r.length > 65535) {
result = DNS_R_FORMERR;
goto cleanup;
}
2008-06-23 23:47:11 +00:00
if ((options & DNS_REQUESTOPT_TCP) != 0 || r.length > 512) {
tcp = true;
request->timeout = timeout * 1000;
} else {
if (udptimeout == 0 && udpretries != 0) {
udptimeout = timeout / (udpretries + 1);
}
if (udptimeout == 0) {
udptimeout = 1;
}
request->timeout = udptimeout * 1000;
}
isc_buffer_allocate(mctx, &request->query, r.length + (tcp ? 2 : 0));
result = isc_buffer_copyregion(request->query, &r);
if (result != ISC_R_SUCCESS) {
goto cleanup;
}
again:
result = get_dispatch(tcp, newtcp, requestmgr, srcaddr, destaddr, dscp,
&connected, &request->dispatch);
if (result != ISC_R_SUCCESS) {
2001-01-23 07:36:06 +00:00
goto cleanup;
}
2001-01-23 07:36:06 +00:00
if ((options & DNS_REQUESTOPT_FIXEDID) != 0) {
id = (r.base[0] << 8) | r.base[1];
dispopt |= DNS_DISPATCHOPT_FIXEDID;
}
req_attach(request, &rclone);
result = dns_dispatch_add(request->dispatch, dispopt, request->timeout,
destaddr, req_connected, req_senddone,
req_response, request, &id,
&request->dispentry);
if (result != ISC_R_SUCCESS) {
if ((options & DNS_REQUESTOPT_FIXEDID) != 0 && !newtcp) {
newtcp = true;
connected = false;
req_detach(&rclone);
dns_dispatch_detach(&request->dispatch);
goto again;
}
goto cleanup;
}
/* Add message ID. */
isc_buffer_usedregion(request->query, &r);
r.base[0] = (id >> 8) & 0xff;
r.base[1] = id & 0xff;
LOCK(&requestmgr->lock);
dns_requestmgr_attach(requestmgr, &request->requestmgr);
request->hash = mgr_gethash(requestmgr);
ISC_LIST_APPEND(requestmgr->requests, request, link);
UNLOCK(&requestmgr->lock);
request->destaddr = *destaddr;
if (tcp && connected) {
req_send(request);
req_detach(&rclone);
} else {
request->flags |= DNS_REQUEST_F_CONNECTING;
if (tcp) {
request->flags |= DNS_REQUEST_F_TCP;
}
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
result = dns_dispatch_connect(request->dispentry);
if (result != ISC_R_SUCCESS) {
goto unlink;
}
}
req_log(ISC_LOG_DEBUG(3), "dns_request_createraw: request %p", request);
*requestp = request;
return (ISC_R_SUCCESS);
unlink:
LOCK(&requestmgr->lock);
ISC_LIST_UNLINK(requestmgr->requests, request, link);
UNLOCK(&requestmgr->lock);
cleanup:
if (tclone != NULL) {
isc_task_detach(&tclone);
}
if (rclone != NULL) {
req_detach(&rclone);
}
req_detach(&request);
2001-01-23 19:50:10 +00:00
req_log(ISC_LOG_DEBUG(3), "dns_request_createraw: failed %s",
isc_result_totext(result));
return (result);
}
2000-02-24 14:31:43 +00:00
isc_result_t
dns_request_create(dns_requestmgr_t *requestmgr, dns_message_t *message,
const isc_sockaddr_t *address, unsigned int options,
dns_tsigkey_t *key, unsigned int timeout, isc_task_t *task,
2020-02-13 14:44:37 -08:00
isc_taskaction_t action, void *arg,
dns_request_t **requestp) {
return (dns_request_createvia(requestmgr, message, NULL, address, -1,
options, key, timeout, 0, 0, task, action,
arg, requestp));
}
isc_result_t
dns_request_createvia(dns_requestmgr_t *requestmgr, dns_message_t *message,
const isc_sockaddr_t *srcaddr,
const isc_sockaddr_t *destaddr, isc_dscp_t dscp,
unsigned int options, dns_tsigkey_t *key,
unsigned int timeout, unsigned int udptimeout,
unsigned int udpretries, isc_task_t *task,
isc_taskaction_t action, void *arg,
2020-02-13 14:44:37 -08:00
dns_request_t **requestp) {
dns_request_t *request = NULL;
isc_task_t *tclone = NULL;
dns_request_t *rclone = NULL;
2020-02-13 14:44:37 -08:00
isc_result_t result;
isc_mem_t *mctx = NULL;
dns_messageid_t id;
bool tcp = false;
2020-02-13 14:44:37 -08:00
bool connected = false;
2000-02-24 14:31:43 +00:00
REQUIRE(VALID_REQUESTMGR(requestmgr));
REQUIRE(message != NULL);
REQUIRE(destaddr != NULL);
2000-02-24 14:31:43 +00:00
REQUIRE(task != NULL);
REQUIRE(action != NULL);
REQUIRE(requestp != NULL && *requestp == NULL);
REQUIRE(timeout > 0);
2000-03-13 20:43:39 +00:00
mctx = requestmgr->mctx;
2001-01-23 19:50:10 +00:00
req_log(ISC_LOG_DEBUG(3), "dns_request_createvia");
if (atomic_load_acquire(&requestmgr->exiting)) {
return (ISC_R_SHUTTINGDOWN);
}
if (srcaddr != NULL &&
isc_sockaddr_pf(srcaddr) != isc_sockaddr_pf(destaddr)) {
return (ISC_R_FAMILYMISMATCH);
}
if (isblackholed(requestmgr->dispatchmgr, destaddr)) {
2001-01-23 07:36:06 +00:00
return (DNS_R_BLACKHOLED);
}
2001-01-23 19:50:10 +00:00
result = new_request(mctx, &request);
if (result != ISC_R_SUCCESS) {
2001-01-23 19:50:10 +00:00
return (result);
}
2000-03-13 20:43:39 +00:00
request->udpcount = udpretries;
request->dscp = dscp;
2002-11-12 23:58:14 +00:00
request->event = (dns_requestevent_t *)isc_event_allocate(
mctx, task, DNS_EVENT_REQUESTDONE, action, arg,
sizeof(dns_requestevent_t));
2000-03-13 20:43:39 +00:00
isc_task_attach(task, &tclone);
request->event->ev_sender = task;
2000-03-20 12:22:02 +00:00
request->event->request = request;
request->event->result = ISC_R_FAILURE;
if (key != NULL) {
dns_tsigkey_attach(key, &request->tsigkey);
}
result = dns_message_settsigkey(message, request->tsigkey);
if (result != ISC_R_SUCCESS) {
goto cleanup;
}
if ((options & DNS_REQUESTOPT_TCP) != 0) {
tcp = true;
request->timeout = timeout * 1000;
} else {
if (udptimeout == 0 && udpretries != 0) {
udptimeout = timeout / (udpretries + 1);
}
if (udptimeout == 0) {
udptimeout = 1;
}
request->timeout = udptimeout * 1000;
}
use_tcp:
result = get_dispatch(tcp, false, requestmgr, srcaddr, destaddr, dscp,
&connected, &request->dispatch);
if (result != ISC_R_SUCCESS) {
2001-01-23 07:36:06 +00:00
goto cleanup;
}
2001-01-23 07:36:06 +00:00
req_attach(request, &rclone);
result = dns_dispatch_add(
request->dispatch, 0, request->timeout, destaddr, req_connected,
req_senddone, req_response, request, &id, &request->dispentry);
if (result != ISC_R_SUCCESS) {
2000-03-13 20:43:39 +00:00
goto cleanup;
}
2000-03-13 20:43:39 +00:00
2000-03-20 12:22:02 +00:00
message->id = id;
result = req_render(message, &request->query, options, mctx);
if (result == DNS_R_USETCP && !tcp) {
2000-03-20 12:22:02 +00:00
/*
* Try again using TCP.
*/
req_detach(&rclone);
2000-03-20 12:22:02 +00:00
dns_message_renderreset(message);
dns_dispatch_done(&request->dispentry);
2000-03-20 12:22:02 +00:00
dns_dispatch_detach(&request->dispatch);
options |= DNS_REQUESTOPT_TCP;
tcp = true;
2000-03-20 12:22:02 +00:00
goto use_tcp;
}
if (result != ISC_R_SUCCESS) {
2000-03-13 20:43:39 +00:00
goto cleanup;
}
result = dns_message_getquerytsig(message, mctx, &request->tsig);
if (result != ISC_R_SUCCESS) {
goto cleanup;
}
2000-03-20 12:22:02 +00:00
LOCK(&requestmgr->lock);
dns_requestmgr_attach(requestmgr, &request->requestmgr);
request->hash = mgr_gethash(requestmgr);
2000-03-20 12:22:02 +00:00
ISC_LIST_APPEND(requestmgr->requests, request, link);
UNLOCK(&requestmgr->lock);
2000-03-13 20:43:39 +00:00
request->destaddr = *destaddr;
if (tcp && connected) {
req_send(request);
req_detach(&rclone);
} else {
request->flags |= DNS_REQUEST_F_CONNECTING;
if (tcp) {
request->flags |= DNS_REQUEST_F_TCP;
}
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
result = dns_dispatch_connect(request->dispentry);
if (result != ISC_R_SUCCESS) {
goto unlink;
}
2000-03-20 12:22:02 +00:00
}
req_log(ISC_LOG_DEBUG(3), "dns_request_createvia: request %p", request);
2000-03-20 12:22:02 +00:00
*requestp = request;
2000-02-24 14:31:43 +00:00
return (ISC_R_SUCCESS);
2000-03-13 20:43:39 +00:00
unlink:
LOCK(&requestmgr->lock);
ISC_LIST_UNLINK(requestmgr->requests, request, link);
UNLOCK(&requestmgr->lock);
cleanup:
if (tclone != NULL) {
2000-03-13 20:43:39 +00:00
isc_task_detach(&tclone);
}
if (rclone != NULL) {
req_detach(&rclone);
}
req_detach(&request);
2001-01-23 19:50:10 +00:00
req_log(ISC_LOG_DEBUG(3), "dns_request_createvia: failed %s",
isc_result_totext(result));
2000-03-13 20:43:39 +00:00
return (result);
}
static isc_result_t
req_render(dns_message_t *message, isc_buffer_t **bufferp, unsigned int options,
2020-02-13 14:44:37 -08:00
isc_mem_t *mctx) {
isc_buffer_t *buf1 = NULL;
isc_buffer_t *buf2 = NULL;
isc_result_t result;
isc_region_t r;
dns_compress_t cctx;
2020-02-13 14:44:37 -08:00
bool cleanup_cctx = false;
2000-03-13 20:43:39 +00:00
REQUIRE(bufferp != NULL && *bufferp == NULL);
req_log(ISC_LOG_DEBUG(3), "request_render");
2000-03-20 12:22:02 +00:00
2000-03-13 20:43:39 +00:00
/*
* Create buffer able to hold largest possible message.
*/
isc_buffer_allocate(mctx, &buf1, 65535);
2000-03-13 20:43:39 +00:00
result = dns_compress_init(&cctx, -1, mctx);
if (result != ISC_R_SUCCESS) {
return (result);
}
cleanup_cctx = true;
if ((options & DNS_REQUESTOPT_CASE) != 0) {
dns_compress_setsensitive(&cctx, true);
}
2000-03-13 20:43:39 +00:00
/*
* Render message.
*/
result = dns_message_renderbegin(message, &cctx, buf1);
if (result != ISC_R_SUCCESS) {
2000-03-13 20:43:39 +00:00
goto cleanup;
}
2000-03-13 20:43:39 +00:00
result = dns_message_rendersection(message, DNS_SECTION_QUESTION, 0);
if (result != ISC_R_SUCCESS) {
2000-03-13 20:43:39 +00:00
goto cleanup;
}
2000-03-13 20:43:39 +00:00
result = dns_message_rendersection(message, DNS_SECTION_ANSWER, 0);
if (result != ISC_R_SUCCESS) {
2000-03-13 20:43:39 +00:00
goto cleanup;
}
2000-03-13 20:43:39 +00:00
result = dns_message_rendersection(message, DNS_SECTION_AUTHORITY, 0);
if (result != ISC_R_SUCCESS) {
2000-03-13 20:43:39 +00:00
goto cleanup;
}
2000-03-13 20:43:39 +00:00
result = dns_message_rendersection(message, DNS_SECTION_ADDITIONAL, 0);
if (result != ISC_R_SUCCESS) {
2000-03-13 20:43:39 +00:00
goto cleanup;
}
2000-03-13 20:43:39 +00:00
result = dns_message_renderend(message);
if (result != ISC_R_SUCCESS) {
2000-03-13 20:43:39 +00:00
goto cleanup;
}
2000-03-13 20:43:39 +00:00
dns_compress_invalidate(&cctx);
cleanup_cctx = false;
2000-03-13 20:43:39 +00:00
/*
* Copy rendered message to exact sized buffer.
*/
isc_buffer_usedregion(buf1, &r);
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
if ((options & DNS_REQUESTOPT_TCP) == 0 && r.length > 512) {
result = DNS_R_USETCP;
goto cleanup;
}
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
isc_buffer_allocate(mctx, &buf2, r.length);
2000-03-13 20:43:39 +00:00
result = isc_buffer_copyregion(buf2, &r);
if (result != ISC_R_SUCCESS) {
2000-03-13 20:43:39 +00:00
goto cleanup;
}
2000-03-13 20:43:39 +00:00
/*
* Cleanup and return.
*/
isc_buffer_free(&buf1);
*bufferp = buf2;
return (ISC_R_SUCCESS);
cleanup:
2000-03-20 12:22:02 +00:00
dns_message_renderreset(message);
if (buf1 != NULL) {
2000-03-13 20:43:39 +00:00
isc_buffer_free(&buf1);
}
if (buf2 != NULL) {
2000-03-13 20:43:39 +00:00
isc_buffer_free(&buf2);
}
if (cleanup_cctx) {
dns_compress_invalidate(&cctx);
}
2000-03-13 20:43:39 +00:00
return (result);
2000-02-24 14:31:43 +00:00
}
void
request_cancel(dns_request_t *request) {
if (!DNS_REQUEST_CANCELED(request)) {
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
req_log(ISC_LOG_DEBUG(3), "request_cancel: request %p",
request);
request->flags |= DNS_REQUEST_F_CANCELED;
request->flags &= ~DNS_REQUEST_F_CONNECTING;
if (request->dispentry != NULL) {
dns_dispatch_cancel(&request->dispentry);
}
dns_dispatch_detach(&request->dispatch);
}
}
void
2020-02-13 14:44:37 -08:00
dns_request_cancel(dns_request_t *request) {
2000-03-20 12:22:02 +00:00
REQUIRE(VALID_REQUEST(request));
2000-03-13 20:43:39 +00:00
req_log(ISC_LOG_DEBUG(3), "dns_request_cancel: request %p", request);
LOCK(&request->requestmgr->locks[request->hash]);
request_cancel(request);
req_sendevent(request, ISC_R_CANCELED);
UNLOCK(&request->requestmgr->locks[request->hash]);
2000-02-24 14:31:43 +00:00
}
2000-03-13 20:43:39 +00:00
2000-02-24 14:31:43 +00:00
isc_result_t
dns_request_getresponse(dns_request_t *request, dns_message_t *message,
2020-02-13 14:44:37 -08:00
unsigned int options) {
isc_result_t result;
2000-03-13 20:43:39 +00:00
REQUIRE(VALID_REQUEST(request));
REQUIRE(request->answer != NULL);
req_log(ISC_LOG_DEBUG(3), "dns_request_getresponse: request %p",
request);
result = dns_message_setquerytsig(message, request->tsig);
if (result != ISC_R_SUCCESS) {
return (result);
}
result = dns_message_settsigkey(message, request->tsigkey);
if (result != ISC_R_SUCCESS) {
return (result);
}
result = dns_message_parse(message, request->answer, options);
if (result != ISC_R_SUCCESS) {
return (result);
}
if (request->tsigkey != NULL) {
result = dns_tsig_verify(request->answer, message, NULL, NULL);
}
return (result);
2000-02-24 14:31:43 +00:00
}
2019-07-20 14:35:59 -04:00
isc_buffer_t *
2020-02-13 14:44:37 -08:00
dns_request_getanswer(dns_request_t *request) {
2019-07-20 14:35:59 -04:00
REQUIRE(VALID_REQUEST(request));
return (request->answer);
}
bool
2020-02-13 14:44:37 -08:00
dns_request_usedtcp(dns_request_t *request) {
REQUIRE(VALID_REQUEST(request));
return ((request->flags & DNS_REQUEST_F_TCP) != 0);
}
2000-02-24 14:31:43 +00:00
void
2020-02-13 14:44:37 -08:00
dns_request_destroy(dns_request_t **requestp) {
2000-03-20 12:22:02 +00:00
dns_request_t *request;
2000-03-20 12:22:02 +00:00
REQUIRE(requestp != NULL && VALID_REQUEST(*requestp));
2000-03-20 12:22:02 +00:00
request = *requestp;
*requestp = NULL;
req_log(ISC_LOG_DEBUG(3), "dns_request_destroy: request %p", request);
LOCK(&request->requestmgr->lock);
LOCK(&request->requestmgr->locks[request->hash]);
ISC_LIST_UNLINK(request->requestmgr->requests, request, link);
UNLOCK(&request->requestmgr->locks[request->hash]);
UNLOCK(&request->requestmgr->lock);
/*
* These should have been cleaned up before the completion
* event was sent.
*/
INSIST(request->dispentry == NULL);
INSIST(request->dispatch == NULL);
req_detach(&request);
2000-02-24 14:31:43 +00:00
}
2000-03-20 12:22:02 +00:00
2000-03-13 20:43:39 +00:00
static void
req_connected(isc_result_t eresult, isc_region_t *region, void *arg) {
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
dns_request_t *request = (dns_request_t *)arg;
UNUSED(region);
2000-03-13 20:43:39 +00:00
req_log(ISC_LOG_DEBUG(3), "req_connected: request %p: %s", request,
isc_result_totext(eresult));
2000-04-28 22:07:29 +00:00
REQUIRE(VALID_REQUEST(request));
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
REQUIRE(DNS_REQUEST_CONNECTING(request) ||
DNS_REQUEST_CANCELED(request));
2000-03-20 12:22:02 +00:00
LOCK(&request->requestmgr->locks[request->hash]);
2000-03-20 12:22:02 +00:00
request->flags &= ~DNS_REQUEST_F_CONNECTING;
if (eresult == ISC_R_TIMEDOUT) {
dns_dispatch_done(&request->dispentry);
dns_dispatch_detach(&request->dispatch);
req_sendevent(request, eresult);
} else if (DNS_REQUEST_CANCELED(request)) {
req_sendevent(request, ISC_R_CANCELED);
} else if (eresult == ISC_R_SUCCESS) {
req_send(request);
2000-03-20 12:22:02 +00:00
} else {
request_cancel(request);
req_sendevent(request, ISC_R_CANCELED);
2000-03-20 12:22:02 +00:00
}
UNLOCK(&request->requestmgr->locks[request->hash]);
req_detach(&request);
2000-03-20 12:22:02 +00:00
}
static void
req_senddone(isc_result_t eresult, isc_region_t *region, void *arg) {
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
dns_request_t *request = (dns_request_t *)arg;
2000-03-20 12:22:02 +00:00
2000-04-28 22:07:29 +00:00
REQUIRE(VALID_REQUEST(request));
2000-12-31 05:05:34 +00:00
REQUIRE(DNS_REQUEST_SENDING(request));
2000-03-20 12:22:02 +00:00
UNUSED(region);
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
req_log(ISC_LOG_DEBUG(3), "req_senddone: request %p", request);
2000-03-13 20:43:39 +00:00
2000-12-31 05:05:34 +00:00
LOCK(&request->requestmgr->locks[request->hash]);
request->flags &= ~DNS_REQUEST_F_SENDING;
if (DNS_REQUEST_CANCELED(request)) {
if (eresult == ISC_R_TIMEDOUT) {
req_sendevent(request, eresult);
} else {
req_sendevent(request, ISC_R_CANCELED);
}
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
} else if (eresult != ISC_R_SUCCESS) {
request_cancel(request);
req_sendevent(request, ISC_R_CANCELED);
2000-12-31 05:05:34 +00:00
}
2000-03-13 20:43:39 +00:00
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
UNLOCK(&request->requestmgr->locks[request->hash]);
2000-03-13 20:43:39 +00:00
}
static void
req_response(isc_result_t result, isc_region_t *region, void *arg) {
dns_request_t *request = (dns_request_t *)arg;
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
req_log(ISC_LOG_DEBUG(3), "req_response: request %p: %s", request,
isc_result_totext(result));
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
if (result == ISC_R_CANCELED) {
return;
}
if (result == ISC_R_TIMEDOUT) {
LOCK(&request->requestmgr->locks[request->hash]);
if (--request->udpcount != 0) {
dns_dispatch_resume(request->dispentry,
request->timeout);
if (!DNS_REQUEST_SENDING(request)) {
req_send(request);
}
UNLOCK(&request->requestmgr->locks[request->hash]);
return;
}
/* The lock is unlocked below */
goto done;
}
2000-03-13 20:43:39 +00:00
REQUIRE(VALID_REQUEST(request));
LOCK(&request->requestmgr->locks[request->hash]);
if (result != ISC_R_SUCCESS) {
2000-03-13 20:43:39 +00:00
goto done;
}
2000-03-13 20:43:39 +00:00
/*
* Copy region to request.
2000-03-13 20:43:39 +00:00
*/
isc_buffer_allocate(request->mctx, &request->answer, region->length);
result = isc_buffer_copyregion(request->answer, region);
if (result != ISC_R_SUCCESS) {
2000-03-20 12:22:02 +00:00
isc_buffer_free(&request->answer);
}
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
done:
2000-03-20 12:22:02 +00:00
/*
* Cleanup.
*/
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
if (request->dispentry != NULL) {
dns_dispatch_done(&request->dispentry);
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
}
request_cancel(request);
2000-03-20 12:22:02 +00:00
/*
* Send completion event.
*/
req_sendevent(request, result);
UNLOCK(&request->requestmgr->locks[request->hash]);
2000-03-13 20:43:39 +00:00
}
2000-03-20 12:22:02 +00:00
static void
2020-02-13 14:44:37 -08:00
req_sendevent(dns_request_t *request, isc_result_t result) {
isc_task_t *task = NULL;
2000-03-20 12:22:02 +00:00
2000-04-28 22:07:29 +00:00
REQUIRE(VALID_REQUEST(request));
if (request->event == NULL) {
return;
}
req_log(ISC_LOG_DEBUG(3), "req_sendevent: request %p", request);
/*
* Lock held by caller.
*/
task = request->event->ev_sender;
request->event->ev_sender = request;
2000-03-20 12:22:02 +00:00
request->event->result = result;
isc_task_sendanddetach(&task, (isc_event_t **)&request->event);
}
static void
req_attach(dns_request_t *source, dns_request_t **targetp) {
REQUIRE(VALID_REQUEST(source));
REQUIRE(targetp != NULL && *targetp == NULL);
isc_refcount_increment(&source->references);
*targetp = source;
}
static void
req_detach(dns_request_t **requestp) {
dns_request_t *request = NULL;
uint_fast32_t ref;
REQUIRE(requestp != NULL && VALID_REQUEST(*requestp));
request = *requestp;
*requestp = NULL;
ref = isc_refcount_decrement(&request->references);
if (request->requestmgr != NULL &&
atomic_load_acquire(&request->requestmgr->exiting))
{
/* We are shutting down and this was last request */
LOCK(&request->requestmgr->lock);
if (ISC_LIST_EMPTY(request->requestmgr->requests)) {
send_shutdown_events(request->requestmgr);
}
UNLOCK(&request->requestmgr->lock);
}
if (ref == 1) {
req_destroy(request);
}
}
2000-03-20 12:22:02 +00:00
static void
2020-02-13 14:44:37 -08:00
req_destroy(dns_request_t *request) {
2000-04-28 22:07:29 +00:00
REQUIRE(VALID_REQUEST(request));
req_log(ISC_LOG_DEBUG(3), "req_destroy: request %p", request);
2000-03-20 12:22:02 +00:00
isc_refcount_destroy(&request->references);
2000-03-20 12:22:02 +00:00
request->magic = 0;
if (request->query != NULL) {
2000-03-20 12:22:02 +00:00
isc_buffer_free(&request->query);
}
if (request->answer != NULL) {
2000-03-20 12:22:02 +00:00
isc_buffer_free(&request->answer);
}
if (request->event != NULL) {
2000-03-20 12:22:02 +00:00
isc_event_free((isc_event_t **)&request->event);
}
if (request->dispentry != NULL) {
dns_dispatch_done(&request->dispentry);
}
if (request->dispatch != NULL) {
2000-03-20 12:22:02 +00:00
dns_dispatch_detach(&request->dispatch);
}
if (request->tsig != NULL) {
isc_buffer_free(&request->tsig);
}
if (request->tsigkey != NULL) {
dns_tsigkey_detach(&request->tsigkey);
}
if (request->requestmgr != NULL) {
dns_requestmgr_detach(&request->requestmgr);
}
isc_mem_putanddetach(&request->mctx, request, sizeof(*request));
2000-03-20 12:22:02 +00:00
}
static void
2020-02-13 14:44:37 -08:00
req_log(int level, const char *fmt, ...) {
va_list ap;
va_start(ap, fmt);
isc_log_vwrite(dns_lctx, DNS_LOGCATEGORY_GENERAL, DNS_LOGMODULE_REQUEST,
level, fmt, ap);
va_end(ap);
}