2
0
mirror of https://gitlab.isc.org/isc-projects/bind9 synced 2025-08-22 18:19:42 +00:00
bind/lib/dns/dispatch.c

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

2359 lines
57 KiB
C
Raw Normal View History

1999-06-16 01:32:31 +00:00
/*
2011-02-03 12:18:12 +00:00
* Copyright (C) Internet Systems Consortium, Inc. ("ISC")
1999-06-16 01:32:31 +00:00
*
* SPDX-License-Identifier: MPL-2.0
*
1999-06-16 01:32:31 +00:00
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, you can obtain one at https://mozilla.org/MPL/2.0/.
*
1999-06-16 01:32:31 +00:00
* See the COPYRIGHT file distributed with this work for additional
1999-07-08 22:12:37 +00:00
* information regarding copyright ownership.
1999-06-16 01:32:31 +00:00
*/
/*! \file */
2000-06-22 22:00:42 +00:00
#include <inttypes.h>
#include <stdbool.h>
1999-06-16 01:32:31 +00:00
#include <stdlib.h>
#include <sys/types.h>
#include <unistd.h>
1999-06-16 01:32:31 +00:00
#include <isc/atomic.h>
#include <isc/loop.h>
1999-06-18 02:01:42 +00:00
#include <isc/mem.h>
#include <isc/mutex.h>
#include <isc/net.h>
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
#include <isc/netmgr.h>
#include <isc/portset.h>
#include <isc/random.h>
#include <isc/stats.h>
2000-05-13 21:57:02 +00:00
#include <isc/string.h>
#include <isc/tid.h>
2007-06-26 06:02:37 +00:00
#include <isc/time.h>
#include <isc/tls.h>
1999-12-16 22:24:22 +00:00
#include <isc/util.h>
1999-06-16 01:32:31 +00:00
#include <dns/acl.h>
1999-06-16 01:32:31 +00:00
#include <dns/dispatch.h>
2000-04-29 00:45:26 +00:00
#include <dns/log.h>
#include <dns/message.h>
#include <dns/stats.h>
#include <dns/transport.h>
#include <dns/types.h>
typedef ISC_LIST(dns_dispentry_t) dns_displist_t;
typedef struct dns_qid {
unsigned int magic;
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
isc_mutex_t lock;
unsigned int qid_nbuckets; /*%< hash table size */
unsigned int qid_increment; /*%< id increment on collision */
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
dns_displist_t *qid_table; /*%< the table itself */
} dns_qid_t;
struct dns_dispatchmgr {
/* Unlocked. */
unsigned int magic;
isc_refcount_t references;
isc_mem_t *mctx;
dns_acl_t *blackhole;
isc_stats_t *stats;
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
isc_nm_t *nm;
/* Locked by "lock". */
isc_mutex_t lock;
ISC_LIST(dns_dispatch_t) list;
dns_qid_t *qid;
in_port_t *v4ports; /*%< available ports for IPv4 */
unsigned int nv4ports; /*%< # of available ports for IPv4 */
in_port_t *v6ports; /*%< available ports for IPv4 */
unsigned int nv6ports; /*%< # of available ports for IPv4 */
};
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
typedef enum {
DNS_DISPATCHSTATE_NONE = 0UL,
DNS_DISPATCHSTATE_CONNECTING,
DNS_DISPATCHSTATE_CONNECTED,
DNS_DISPATCHSTATE_CANCELED,
} dns_dispatchstate_t;
1999-07-06 19:32:40 +00:00
struct dns_dispentry {
1999-06-18 02:01:42 +00:00
unsigned int magic;
isc_refcount_t references;
dns_dispatch_t *disp;
isc_loop_t *loop;
isc_nmhandle_t *handle; /*%< netmgr handle for UDP connection */
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispatchstate_t state;
dns_transport_t *transport;
isc_tlsctx_cache_t *tlsctx_cache;
1999-06-18 02:01:42 +00:00
unsigned int bucket;
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
unsigned int retries;
unsigned int timeout;
isc_time_t start;
isc_sockaddr_t local;
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
isc_sockaddr_t peer;
in_port_t port;
dns_messageid_t id;
dispatch_cb_t connected;
dispatch_cb_t sent;
dispatch_cb_t response;
1999-06-16 01:32:31 +00:00
void *arg;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
bool reading;
isc_result_t result;
1999-07-08 02:50:00 +00:00
ISC_LINK(dns_dispentry_t) link;
ISC_LINK(dns_dispentry_t) alink;
ISC_LINK(dns_dispentry_t) plink;
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
ISC_LINK(dns_dispentry_t) rlink;
};
1999-06-16 01:32:31 +00:00
struct dns_dispatch {
/* Unlocked. */
unsigned int magic; /*%< magic */
uint32_t tid;
dns_dispatchmgr_t *mgr; /*%< dispatch manager */
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
isc_nmhandle_t *handle; /*%< netmgr handle for TCP connection */
isc_sockaddr_t local; /*%< local address */
in_port_t localport; /*%< local UDP port */
isc_sockaddr_t peer; /*%< peer address (TCP) */
/*% Locked by mgr->lock. */
ISC_LINK(dns_dispatch_t) link;
/* Locked by "lock". */
isc_mutex_t lock; /*%< locks all below */
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
isc_socktype_t socktype;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispatchstate_t state;
isc_refcount_t references;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
bool reading;
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
dns_displist_t pending;
dns_displist_t active;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
unsigned int requests; /*%< how many requests we have */
unsigned int timedout;
1999-06-16 01:32:31 +00:00
};
#define QID_MAGIC ISC_MAGIC('Q', 'i', 'd', ' ')
#define VALID_QID(e) ISC_MAGIC_VALID((e), QID_MAGIC)
#define RESPONSE_MAGIC ISC_MAGIC('D', 'r', 's', 'p')
#define VALID_RESPONSE(e) ISC_MAGIC_VALID((e), RESPONSE_MAGIC)
1999-06-18 02:01:42 +00:00
#define DISPSOCK_MAGIC ISC_MAGIC('D', 's', 'o', 'c')
#define VALID_DISPSOCK(e) ISC_MAGIC_VALID((e), DISPSOCK_MAGIC)
#define DISPATCH_MAGIC ISC_MAGIC('D', 'i', 's', 'p')
#define VALID_DISPATCH(e) ISC_MAGIC_VALID((e), DISPATCH_MAGIC)
1999-06-18 02:01:42 +00:00
#define DNS_DISPATCHMGR_MAGIC ISC_MAGIC('D', 'M', 'g', 'r')
#define VALID_DISPATCHMGR(e) ISC_MAGIC_VALID((e), DNS_DISPATCHMGR_MAGIC)
1999-06-18 02:01:42 +00:00
/*%
Dispatch API simplification - Many dispatch attributes can be set implicitly instead of being passed in. we can infer whether to set DNS_DISPATCHATTR_TCP or _UDP from whether we're calling dns_dispatch_createtcp() or _createudp(). we can also infer DNS_DISPATCHATTR_IPV4 or _IPV6 from the addresses or the socket that were passed in. - We no longer use dup'd sockets in UDP dispatches, so the 'dup_socket' parameter has been removed from dns_dispatch_createudp(), along with the code implementing it. also removed isc_socket_dup() since it no longer has any callers. - The 'buffersize' parameter was ignored and has now been removed; buffersize is now fixed at 4096. - Maxbuffers and maxrequests don't need to be passed in on every call to dns_dispatch_createtcp() and _createudp(). In all current uses, the value for mgr->maxbuffers will either be raised once from its default of 20000 to 32768, or else left alone. (passing in a value lower than 20000 does not lower it.) there isn't enough difference between these values for there to be any need to configure this. The value for disp->maxrequests controls both the quota of concurrent requests for a dispatch and also the size of the dispatch socket memory pool. it's not clear that this quota is necessary at all. the memory pool size currently starts at 32768, but is sometimes lowered to 4096, which is definitely unnecessary. This commit sets both values permanently to 32768. - Previously TCP dispatches allocated their own separate QID table, which didn't incorporate a port table. this commit removes per-dispatch QID tables and shares the same table between all dispatches. since dispatches are created for each TCP socket, this may speed up the dispatch allocation process. there may be a slight increase in lock contention since all dispatches are sharing a single QID table, but since TCP sockets are used less often than UDP sockets (which were already sharing a QID table), it should not be a substantial change. - The dispatch port table was being used to determine whether a port was already in use; if so, then a UDP socket would be bound with REUSEADDR. this commit removes the port table, and always binds UDP sockets that way.
2020-12-17 00:43:00 -08:00
* Number of buckets in the QID hash table, and the value to
* increment the QID by when attempting to avoid collisions.
* The number of buckets should be prime, and the increment
* should be the next higher prime number.
*/
Dispatch API simplification - Many dispatch attributes can be set implicitly instead of being passed in. we can infer whether to set DNS_DISPATCHATTR_TCP or _UDP from whether we're calling dns_dispatch_createtcp() or _createudp(). we can also infer DNS_DISPATCHATTR_IPV4 or _IPV6 from the addresses or the socket that were passed in. - We no longer use dup'd sockets in UDP dispatches, so the 'dup_socket' parameter has been removed from dns_dispatch_createudp(), along with the code implementing it. also removed isc_socket_dup() since it no longer has any callers. - The 'buffersize' parameter was ignored and has now been removed; buffersize is now fixed at 4096. - Maxbuffers and maxrequests don't need to be passed in on every call to dns_dispatch_createtcp() and _createudp(). In all current uses, the value for mgr->maxbuffers will either be raised once from its default of 20000 to 32768, or else left alone. (passing in a value lower than 20000 does not lower it.) there isn't enough difference between these values for there to be any need to configure this. The value for disp->maxrequests controls both the quota of concurrent requests for a dispatch and also the size of the dispatch socket memory pool. it's not clear that this quota is necessary at all. the memory pool size currently starts at 32768, but is sometimes lowered to 4096, which is definitely unnecessary. This commit sets both values permanently to 32768. - Previously TCP dispatches allocated their own separate QID table, which didn't incorporate a port table. this commit removes per-dispatch QID tables and shares the same table between all dispatches. since dispatches are created for each TCP socket, this may speed up the dispatch allocation process. there may be a slight increase in lock contention since all dispatches are sharing a single QID table, but since TCP sockets are used less often than UDP sockets (which were already sharing a QID table), it should not be a substantial change. - The dispatch port table was being used to determine whether a port was already in use; if so, then a UDP socket would be bound with REUSEADDR. this commit removes the port table, and always binds UDP sockets that way.
2020-12-17 00:43:00 -08:00
#ifndef DNS_QID_BUCKETS
#define DNS_QID_BUCKETS 16411
#endif /* ifndef DNS_QID_BUCKETS */
#ifndef DNS_QID_INCREMENT
#define DNS_QID_INCREMENT 16433
#endif /* ifndef DNS_QID_INCREMENT */
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
#if DNS_DISPATCH_TRACE
#define dns_dispentry_ref(ptr) \
dns_dispentry__ref(ptr, __func__, __FILE__, __LINE__)
#define dns_dispentry_unref(ptr) \
dns_dispentry__unref(ptr, __func__, __FILE__, __LINE__)
#define dns_dispentry_attach(ptr, ptrp) \
dns_dispentry__attach(ptr, ptrp, __func__, __FILE__, __LINE__)
#define dns_dispentry_detach(ptrp) \
dns_dispentry__detach(ptrp, __func__, __FILE__, __LINE__)
ISC_REFCOUNT_TRACE_DECL(dns_dispentry);
#else
ISC_REFCOUNT_DECL(dns_dispentry);
#endif
/*
2000-05-13 21:57:02 +00:00
* Statics.
*/
static void
dispatchmgr_destroy(dns_dispatchmgr_t *mgr);
static dns_dispentry_t *
entry_search(dns_qid_t *, const isc_sockaddr_t *, dns_messageid_t, in_port_t,
unsigned int);
static void
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
udp_recv(isc_nmhandle_t *handle, isc_result_t eresult, isc_region_t *region,
void *arg);
static void
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
tcp_recv(isc_nmhandle_t *handle, isc_result_t eresult, isc_region_t *region,
void *arg);
static uint32_t
dns_hash(dns_qid_t *, const isc_sockaddr_t *, dns_messageid_t, in_port_t);
static void
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dispentry_cancel(dns_dispentry_t *resp, isc_result_t result);
static isc_result_t
dispatch_createudp(dns_dispatchmgr_t *mgr, const isc_sockaddr_t *localaddr,
dns_dispatch_t **dispp);
Dispatch API simplification - Many dispatch attributes can be set implicitly instead of being passed in. we can infer whether to set DNS_DISPATCHATTR_TCP or _UDP from whether we're calling dns_dispatch_createtcp() or _createudp(). we can also infer DNS_DISPATCHATTR_IPV4 or _IPV6 from the addresses or the socket that were passed in. - We no longer use dup'd sockets in UDP dispatches, so the 'dup_socket' parameter has been removed from dns_dispatch_createudp(), along with the code implementing it. also removed isc_socket_dup() since it no longer has any callers. - The 'buffersize' parameter was ignored and has now been removed; buffersize is now fixed at 4096. - Maxbuffers and maxrequests don't need to be passed in on every call to dns_dispatch_createtcp() and _createudp(). In all current uses, the value for mgr->maxbuffers will either be raised once from its default of 20000 to 32768, or else left alone. (passing in a value lower than 20000 does not lower it.) there isn't enough difference between these values for there to be any need to configure this. The value for disp->maxrequests controls both the quota of concurrent requests for a dispatch and also the size of the dispatch socket memory pool. it's not clear that this quota is necessary at all. the memory pool size currently starts at 32768, but is sometimes lowered to 4096, which is definitely unnecessary. This commit sets both values permanently to 32768. - Previously TCP dispatches allocated their own separate QID table, which didn't incorporate a port table. this commit removes per-dispatch QID tables and shares the same table between all dispatches. since dispatches are created for each TCP socket, this may speed up the dispatch allocation process. there may be a slight increase in lock contention since all dispatches are sharing a single QID table, but since TCP sockets are used less often than UDP sockets (which were already sharing a QID table), it should not be a substantial change. - The dispatch port table was being used to determine whether a port was already in use; if so, then a UDP socket would be bound with REUSEADDR. this commit removes the port table, and always binds UDP sockets that way.
2020-12-17 00:43:00 -08:00
static void
qid_allocate(dns_dispatchmgr_t *mgr, dns_qid_t **qidp);
static void
qid_destroy(isc_mem_t *mctx, dns_qid_t **qidp);
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
static void
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
udp_startrecv(isc_nmhandle_t *handle, dns_dispentry_t *resp);
static void
udp_dispatch_connect(dns_dispatch_t *disp, dns_dispentry_t *resp);
static void
tcp_startrecv(dns_dispatch_t *disp, dns_dispentry_t *resp);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
static void
tcp_dispatch_getnext(dns_dispatch_t *disp, dns_dispentry_t *resp,
int32_t timeout);
static void
udp_dispatch_getnext(dns_dispentry_t *resp, int32_t timeout);
#define LVL(x) ISC_LOG_DEBUG(x)
1999-07-08 02:50:00 +00:00
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
static const char *
socktype2str(dns_dispentry_t *resp) {
dns_transport_type_t transport_type = DNS_TRANSPORT_UDP;
dns_dispatch_t *disp = resp->disp;
if (disp->socktype == isc_socktype_tcp) {
if (resp->transport != NULL) {
transport_type =
dns_transport_get_type(resp->transport);
} else {
transport_type = DNS_TRANSPORT_TCP;
}
}
switch (transport_type) {
case DNS_TRANSPORT_UDP:
return ("UDP");
case DNS_TRANSPORT_TCP:
return ("TCP");
case DNS_TRANSPORT_TLS:
return ("TLS");
case DNS_TRANSPORT_HTTP:
return ("HTTP");
default:
return ("<unexpected>");
}
}
static const char *
state2str(dns_dispatchstate_t state) {
switch (state) {
case DNS_DISPATCHSTATE_NONE:
return ("none");
case DNS_DISPATCHSTATE_CONNECTING:
return ("connecting");
case DNS_DISPATCHSTATE_CONNECTED:
return ("connected");
case DNS_DISPATCHSTATE_CANCELED:
return ("canceled");
default:
return ("<unexpected>");
}
}
static void
mgr_log(dns_dispatchmgr_t *mgr, int level, const char *fmt, ...)
ISC_FORMAT_PRINTF(3, 4);
2000-04-29 00:45:26 +00:00
static void
mgr_log(dns_dispatchmgr_t *mgr, int level, const char *fmt, ...) {
2000-04-29 00:45:26 +00:00
char msgbuf[2048];
va_list ap;
if (!isc_log_wouldlog(dns_lctx, level)) {
return;
}
2000-04-29 00:45:26 +00:00
va_start(ap, fmt);
vsnprintf(msgbuf, sizeof(msgbuf), fmt, ap);
va_end(ap);
isc_log_write(dns_lctx, DNS_LOGCATEGORY_DISPATCH,
DNS_LOGMODULE_DISPATCH, level, "dispatchmgr %p: %s", mgr,
msgbuf);
}
static void
2009-01-31 00:37:04 +00:00
inc_stats(dns_dispatchmgr_t *mgr, isc_statscounter_t counter) {
if (mgr->stats != NULL) {
isc_stats_increment(mgr->stats, counter);
}
}
static void
dec_stats(dns_dispatchmgr_t *mgr, isc_statscounter_t counter) {
if (mgr->stats != NULL) {
isc_stats_decrement(mgr->stats, counter);
}
}
static void
dispatch_log(dns_dispatch_t *disp, int level, const char *fmt, ...)
ISC_FORMAT_PRINTF(3, 4);
static void
dispatch_log(dns_dispatch_t *disp, int level, const char *fmt, ...) {
char msgbuf[2048];
va_list ap;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
int r;
2000-07-13 01:16:22 +00:00
if (!isc_log_wouldlog(dns_lctx, level)) {
return;
}
va_start(ap, fmt);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
r = vsnprintf(msgbuf, sizeof(msgbuf), fmt, ap);
if (r < 0) {
msgbuf[0] = '\0';
} else if ((unsigned int)r >= sizeof(msgbuf)) {
/* Truncated */
msgbuf[sizeof(msgbuf) - 1] = '\0';
}
va_end(ap);
isc_log_write(dns_lctx, DNS_LOGCATEGORY_DISPATCH,
DNS_LOGMODULE_DISPATCH, level, "dispatch %p: %s", disp,
msgbuf);
2000-04-29 00:45:26 +00:00
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
static void
dispentry_log(dns_dispentry_t *resp, int level, const char *fmt, ...)
ISC_FORMAT_PRINTF(3, 4);
static void
dispentry_log(dns_dispentry_t *resp, int level, const char *fmt, ...) {
char msgbuf[2048];
va_list ap;
int r;
if (!isc_log_wouldlog(dns_lctx, level)) {
return;
}
va_start(ap, fmt);
r = vsnprintf(msgbuf, sizeof(msgbuf), fmt, ap);
if (r < 0) {
msgbuf[0] = '\0';
} else if ((unsigned int)r >= sizeof(msgbuf)) {
/* Truncated */
msgbuf[sizeof(msgbuf) - 1] = '\0';
}
va_end(ap);
dispatch_log(resp->disp, level, "%s response %p: %s",
socktype2str(resp), resp, msgbuf);
}
/*
* Return a hash of the destination and message id.
*/
static uint32_t
dns_hash(dns_qid_t *qid, const isc_sockaddr_t *dest, dns_messageid_t id,
in_port_t port) {
uint32_t ret;
ret = isc_sockaddr_hash(dest, true);
ret ^= ((uint32_t)id << 16) | port;
ret %= qid->qid_nbuckets;
INSIST(ret < qid->qid_nbuckets);
return (ret);
}
/*%
* Choose a random port number for a dispatch entry.
* The caller must hold the disp->lock
*/
static isc_result_t
setup_socket(dns_dispatch_t *disp, dns_dispentry_t *resp,
const isc_sockaddr_t *dest, in_port_t *portp) {
dns_dispatchmgr_t *mgr = disp->mgr;
unsigned int nports;
in_port_t *ports = NULL;
in_port_t port = *portp;
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
if (resp->retries++ > 5) {
return (ISC_R_FAILURE);
}
if (isc_sockaddr_pf(&disp->local) == AF_INET) {
nports = mgr->nv4ports;
ports = mgr->v4ports;
} else {
nports = mgr->nv6ports;
ports = mgr->v6ports;
}
if (nports == 0) {
return (ISC_R_ADDRNOTAVAIL);
}
resp->local = disp->local;
resp->peer = *dest;
if (port == 0) {
port = ports[isc_random_uniform(nports)];
isc_sockaddr_setport(&resp->local, port);
*portp = port;
}
resp->port = port;
Dispatch API simplification - Many dispatch attributes can be set implicitly instead of being passed in. we can infer whether to set DNS_DISPATCHATTR_TCP or _UDP from whether we're calling dns_dispatch_createtcp() or _createudp(). we can also infer DNS_DISPATCHATTR_IPV4 or _IPV6 from the addresses or the socket that were passed in. - We no longer use dup'd sockets in UDP dispatches, so the 'dup_socket' parameter has been removed from dns_dispatch_createudp(), along with the code implementing it. also removed isc_socket_dup() since it no longer has any callers. - The 'buffersize' parameter was ignored and has now been removed; buffersize is now fixed at 4096. - Maxbuffers and maxrequests don't need to be passed in on every call to dns_dispatch_createtcp() and _createudp(). In all current uses, the value for mgr->maxbuffers will either be raised once from its default of 20000 to 32768, or else left alone. (passing in a value lower than 20000 does not lower it.) there isn't enough difference between these values for there to be any need to configure this. The value for disp->maxrequests controls both the quota of concurrent requests for a dispatch and also the size of the dispatch socket memory pool. it's not clear that this quota is necessary at all. the memory pool size currently starts at 32768, but is sometimes lowered to 4096, which is definitely unnecessary. This commit sets both values permanently to 32768. - Previously TCP dispatches allocated their own separate QID table, which didn't incorporate a port table. this commit removes per-dispatch QID tables and shares the same table between all dispatches. since dispatches are created for each TCP socket, this may speed up the dispatch allocation process. there may be a slight increase in lock contention since all dispatches are sharing a single QID table, but since TCP sockets are used less often than UDP sockets (which were already sharing a QID table), it should not be a substantial change. - The dispatch port table was being used to determine whether a port was already in use; if so, then a UDP socket would be bound with REUSEADDR. this commit removes the port table, and always binds UDP sockets that way.
2020-12-17 00:43:00 -08:00
return (ISC_R_SUCCESS);
}
2001-01-25 13:47:59 +00:00
/*
* Find an entry for query ID 'id', socket address 'dest', and port number
* 'port'.
2001-01-25 13:47:59 +00:00
* Return NULL if no such entry exists.
*/
1999-07-06 19:32:40 +00:00
static dns_dispentry_t *
entry_search(dns_qid_t *qid, const isc_sockaddr_t *dest, dns_messageid_t id,
in_port_t port, unsigned int bucket) {
dns_dispentry_t *res = NULL;
REQUIRE(VALID_QID(qid));
REQUIRE(bucket < qid->qid_nbuckets);
res = ISC_LIST_HEAD(qid->qid_table[bucket]);
while (res != NULL) {
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
if (res->id == id && isc_sockaddr_equal(dest, &res->peer) &&
2022-11-02 19:33:14 +01:00
res->port == port)
{
return (res);
}
res = ISC_LIST_NEXT(res, link);
}
return (NULL);
}
static void
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dispentry_destroy(dns_dispentry_t *resp) {
dns_dispatch_t *disp = resp->disp;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
/*
* We need to call this from here in case there's an external event that
* shuts down our dispatch (like ISC_R_SHUTTINGDOWN).
*/
dispentry_cancel(resp, ISC_R_CANCELED);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
LOCK(&disp->lock);
INSIST(disp->requests > 0);
disp->requests--;
UNLOCK(&disp->lock);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
isc_refcount_destroy(&resp->references);
resp->magic = 0;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
INSIST(!ISC_LINK_LINKED(resp, link));
INSIST(!ISC_LINK_LINKED(resp, plink));
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
INSIST(!ISC_LINK_LINKED(resp, alink));
INSIST(!ISC_LINK_LINKED(resp, rlink));
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dispentry_log(resp, LVL(90), "destroying");
if (resp->handle != NULL) {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dispentry_log(resp, LVL(90), "detaching handle %p from %p",
resp->handle, &resp->handle);
isc_nmhandle_detach(&resp->handle);
}
if (resp->tlsctx_cache != NULL) {
isc_tlsctx_cache_detach(&resp->tlsctx_cache);
}
if (resp->transport != NULL) {
dns_transport_detach(&resp->transport);
}
isc_mem_put(disp->mgr->mctx, resp, sizeof(*resp));
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispatch_detach(&disp); /* DISPATCH001 */
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
#if DNS_DISPATCH_TRACE
ISC_REFCOUNT_TRACE_IMPL(dns_dispentry, dispentry_destroy);
#else
ISC_REFCOUNT_IMPL(dns_dispentry, dispentry_destroy);
#endif
/*
* How long in milliseconds has it been since this dispentry
* started reading?
*/
static unsigned int
dispentry_runtime(dns_dispentry_t *resp, const isc_time_t *now) {
if (isc_time_isepoch(&resp->start)) {
return (0);
}
return (isc_time_microdiff(now, &resp->start) / 1000);
}
1999-07-08 02:50:00 +00:00
/*
* General flow:
*
* If I/O result == CANCELED or error, free the buffer.
1999-07-08 02:50:00 +00:00
*
* If query, free the buffer, restart.
1999-07-08 02:50:00 +00:00
*
* If response:
* Allocate event, fill in details.
* If cannot allocate, free buffer, restart.
* find target. If not found, free buffer, restart.
* if event queue is not empty, queue. else, send.
* restart.
*/
static void
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
udp_recv(isc_nmhandle_t *handle, isc_result_t eresult, isc_region_t *region,
void *arg) {
dns_dispentry_t *resp = (dns_dispentry_t *)arg;
dns_dispatch_t *disp = NULL;
dns_messageid_t id;
1999-07-22 01:34:31 +00:00
isc_result_t dres;
isc_buffer_t source;
unsigned int flags;
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
isc_sockaddr_t peer;
isc_netaddr_t netaddr;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
int match, timeout = 0;
bool respond = true;
isc_time_t now;
REQUIRE(VALID_RESPONSE(resp));
REQUIRE(VALID_DISPATCH(resp->disp));
disp = resp->disp;
LOCK(&disp->lock);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
INSIST(resp->reading);
resp->reading = false;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
if (resp->state == DNS_DISPATCHSTATE_CANCELED) {
/*
* Nobody is interested in the callback if the response
* has been canceled already. Detach from the response
* and the handle.
*/
respond = false;
eresult = ISC_R_CANCELED;
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dispentry_log(resp, LVL(90), "read callback:%s, requests %d",
isc_result_totext(eresult), disp->requests);
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
if (eresult != ISC_R_SUCCESS) {
/*
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
* This is most likely a network error on a connected
* socket, a timeout, or the query has been canceled.
* It makes no sense to check the address or parse the
* packet, but we can return the error to the caller.
*/
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
goto done;
}
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
peer = isc_nmhandle_peeraddr(handle);
isc_netaddr_fromsockaddr(&netaddr, &peer);
/*
* If this is from a blackholed address, drop it.
*/
if (disp->mgr->blackhole != NULL &&
dns_acl_match(&netaddr, NULL, disp->mgr->blackhole, NULL, &match,
NULL) == ISC_R_SUCCESS &&
match > 0)
{
if (isc_log_wouldlog(dns_lctx, LVL(10))) {
char netaddrstr[ISC_NETADDR_FORMATSIZE];
isc_netaddr_format(&netaddr, netaddrstr,
sizeof(netaddrstr));
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dispentry_log(resp, LVL(10),
"blackholed packet from %s", netaddrstr);
}
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
goto next;
}
/*
* Peek into the buffer to see what we can see.
*/
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
id = resp->id;
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
isc_buffer_init(&source, region->base, region->length);
isc_buffer_add(&source, region->length);
dres = dns_message_peekheader(&source, &id, &flags);
if (dres != ISC_R_SUCCESS) {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
char netaddrstr[ISC_NETADDR_FORMATSIZE];
isc_netaddr_format(&netaddr, netaddrstr, sizeof(netaddrstr));
dispentry_log(resp, LVL(10), "got garbage packet from %s",
netaddrstr);
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
goto next;
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dispentry_log(resp, LVL(92),
"got valid DNS message header, /QR %c, id %u",
(((flags & DNS_MESSAGEFLAG_QR) != 0) ? '1' : '0'), id);
1999-07-08 22:12:37 +00:00
/*
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
* Look at the message flags. If it's a query, ignore it.
*/
1999-07-06 19:32:40 +00:00
if ((flags & DNS_MESSAGEFLAG_QR) == 0) {
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
goto next;
}
/*
* The QID and the address must match the expected ones.
*/
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
if (resp->id != id || !isc_sockaddr_equal(&peer, &resp->peer)) {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dispentry_log(resp, LVL(90), "response doesn't match");
inc_stats(disp->mgr, dns_resstatscounter_mismatch);
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
goto next;
}
1999-07-08 22:12:37 +00:00
/*
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
* We have the right resp, so call the caller back.
1999-07-08 22:12:37 +00:00
*/
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
goto done;
1999-07-08 22:12:37 +00:00
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
next:
/*
2021-11-23 15:35:39 +01:00
* This is the wrong response. Check whether there is still enough
* time to wait for the correct one to arrive before the timeout fires.
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
*/
now = isc_loop_now(resp->loop);
timeout = resp->timeout - dispentry_runtime(resp, &now);
if (timeout <= 0) {
2021-11-23 15:35:39 +01:00
/*
* The time window for receiving the correct response is
* already closed, libuv has just not processed the socket
* timer yet. Invoke the read callback, indicating a timeout.
*/
eresult = ISC_R_TIMEDOUT;
goto done;
}
2021-11-23 15:35:39 +01:00
/*
* Do not invoke the read callback just yet and instead wait for the
* proper response to arrive until the original timeout fires.
*/
respond = false;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
udp_dispatch_getnext(resp, timeout);
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
done:
UNLOCK(&disp->lock);
if (respond) {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dispentry_log(resp, LVL(90), "UDP read callback on %p: %s",
handle, isc_result_totext(eresult));
resp->response(eresult, region, resp->arg);
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispentry_detach(&resp); /* DISPENTRY003 */
}
static isc_result_t
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
tcp_recv_oldest(dns_dispatch_t *disp, dns_dispentry_t **respp) {
dns_dispentry_t *resp = NULL;
resp = ISC_LIST_HEAD(disp->active);
if (resp != NULL) {
disp->timedout++;
*respp = resp;
return (ISC_R_TIMEDOUT);
}
return (ISC_R_NOTFOUND);
}
static isc_result_t
tcp_recv_success(dns_dispatch_t *disp, isc_region_t *region, dns_qid_t *qid,
isc_sockaddr_t *peer, dns_dispentry_t **respp) {
isc_buffer_t source;
dns_messageid_t id;
unsigned int flags;
unsigned int bucket;
isc_result_t result = ISC_R_SUCCESS;
dns_dispentry_t *resp = NULL;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dispatch_log(disp, LVL(90), "TCP read success, length == %d, addr = %p",
region->length, region->base);
/*
* Peek into the buffer to see what we can see.
*/
isc_buffer_init(&source, region->base, region->length);
isc_buffer_add(&source, region->length);
result = dns_message_peekheader(&source, &id, &flags);
if (result != ISC_R_SUCCESS) {
dispatch_log(disp, LVL(10), "got garbage packet");
return (ISC_R_UNEXPECTED);
}
dispatch_log(disp, LVL(92),
"got valid DNS message header, /QR %c, id %u",
(((flags & DNS_MESSAGEFLAG_QR) != 0) ? '1' : '0'), id);
/*
* Look at the message flags. If it's a query, ignore it and keep
* reading.
*/
if ((flags & DNS_MESSAGEFLAG_QR) == 0) {
dispatch_log(disp, LVL(10), "got DNS query instead of answer");
return (ISC_R_UNEXPECTED);
}
/*
* We have a valid response; find the associated dispentry object
* and call the caller back.
*/
bucket = dns_hash(qid, peer, id, disp->localport);
LOCK(&qid->lock);
resp = entry_search(qid, peer, id, disp->localport, bucket);
if (resp != NULL) {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
if (resp->reading) {
*respp = resp;
} else {
/*
* We already got a message for this QID and weren't
* expecting any more.
*/
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
result = ISC_R_UNEXPECTED;
}
} else {
/* We are not expecting this DNS message */
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
result = ISC_R_NOTFOUND;
}
dispatch_log(disp, LVL(90), "search for response in bucket %d: %s",
bucket, isc_result_totext(result));
UNLOCK(&qid->lock);
return (result);
}
static void
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
tcp_recv_add(dns_displist_t *resps, dns_dispentry_t *resp,
isc_result_t result) {
dns_dispentry_ref(resp); /* DISPENTRY009 */
ISC_LIST_UNLINK(resp->disp->active, resp, alink);
ISC_LIST_APPEND(*resps, resp, rlink);
INSIST(resp->reading);
resp->reading = false;
resp->result = result;
}
static void
tcp_recv_shutdown(dns_dispatch_t *disp, dns_displist_t *resps,
isc_result_t result) {
dns_dispentry_t *resp = NULL, *next = NULL;
/*
* If there are any active responses, shut them all down.
*/
for (resp = ISC_LIST_HEAD(disp->active); resp != NULL; resp = next) {
next = ISC_LIST_NEXT(resp, alink);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
tcp_recv_add(resps, resp, result);
}
disp->state = DNS_DISPATCHSTATE_CANCELED;
}
static void
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
tcp_recv_processall(dns_displist_t *resps, isc_region_t *region) {
dns_dispentry_t *resp = NULL, *next = NULL;
for (resp = ISC_LIST_HEAD(*resps); resp != NULL; resp = next) {
next = ISC_LIST_NEXT(resp, rlink);
ISC_LIST_UNLINK(*resps, resp, rlink);
dispentry_log(resp, LVL(90), "read callback: %s",
isc_result_totext(resp->result));
resp->response(resp->result, region, resp->arg);
dns_dispentry_detach(&resp); /* DISPENTRY009 */
}
}
1999-07-12 23:44:31 +00:00
/*
* General flow:
*
* If I/O result == CANCELED, EOF, or error, notify everyone as the
* various queues drain.
1999-07-12 23:44:31 +00:00
*
* If response:
* Allocate event, fill in details.
* If cannot allocate, restart.
* find target. If not found, restart.
1999-07-12 23:44:31 +00:00
* if event queue is not empty, queue. else, send.
* restart.
*/
static void
tcp_recv(isc_nmhandle_t *handle, isc_result_t result, isc_region_t *region,
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
void *arg) {
dns_dispatch_t *disp = (dns_dispatch_t *)arg;
dns_dispentry_t *resp = NULL;
dns_qid_t *qid = NULL;
char buf[ISC_SOCKADDR_FORMATSIZE];
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
isc_sockaddr_t peer;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_displist_t resps = ISC_LIST_INITIALIZER;
isc_time_t now;
int timeout;
1999-07-12 23:44:31 +00:00
REQUIRE(VALID_DISPATCH(disp));
Dispatch API simplification - Many dispatch attributes can be set implicitly instead of being passed in. we can infer whether to set DNS_DISPATCHATTR_TCP or _UDP from whether we're calling dns_dispatch_createtcp() or _createudp(). we can also infer DNS_DISPATCHATTR_IPV4 or _IPV6 from the addresses or the socket that were passed in. - We no longer use dup'd sockets in UDP dispatches, so the 'dup_socket' parameter has been removed from dns_dispatch_createudp(), along with the code implementing it. also removed isc_socket_dup() since it no longer has any callers. - The 'buffersize' parameter was ignored and has now been removed; buffersize is now fixed at 4096. - Maxbuffers and maxrequests don't need to be passed in on every call to dns_dispatch_createtcp() and _createudp(). In all current uses, the value for mgr->maxbuffers will either be raised once from its default of 20000 to 32768, or else left alone. (passing in a value lower than 20000 does not lower it.) there isn't enough difference between these values for there to be any need to configure this. The value for disp->maxrequests controls both the quota of concurrent requests for a dispatch and also the size of the dispatch socket memory pool. it's not clear that this quota is necessary at all. the memory pool size currently starts at 32768, but is sometimes lowered to 4096, which is definitely unnecessary. This commit sets both values permanently to 32768. - Previously TCP dispatches allocated their own separate QID table, which didn't incorporate a port table. this commit removes per-dispatch QID tables and shares the same table between all dispatches. since dispatches are created for each TCP socket, this may speed up the dispatch allocation process. there may be a slight increase in lock contention since all dispatches are sharing a single QID table, but since TCP sockets are used less often than UDP sockets (which were already sharing a QID table), it should not be a substantial change. - The dispatch port table was being used to determine whether a port was already in use; if so, then a UDP socket would be bound with REUSEADDR. this commit removes the port table, and always binds UDP sockets that way.
2020-12-17 00:43:00 -08:00
qid = disp->mgr->qid;
2019-11-22 10:49:40 +11:00
LOCK(&disp->lock);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
INSIST(disp->reading);
disp->reading = false;
2019-11-22 10:49:40 +11:00
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dispatch_log(disp, LVL(90), "TCP read:%s:requests %u",
isc_result_totext(result), disp->requests);
1999-07-12 23:44:31 +00:00
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
peer = isc_nmhandle_peeraddr(handle);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
/*
* Phase 1: Process timeout and success.
*/
switch (result) {
case ISC_R_TIMEDOUT:
/*
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
* Time out the oldest response in the active queue.
*/
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
result = tcp_recv_oldest(disp, &resp);
break;
case ISC_R_SUCCESS:
/* We got an answer */
result = tcp_recv_success(disp, region, qid, &peer, &resp);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
break;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
default:
break;
}
if (resp != NULL) {
tcp_recv_add(&resps, resp, result);
}
/*
* Phase 2: Look if we timed out before.
*/
if (result == ISC_R_NOTFOUND) {
if (disp->timedout > 0) {
/* There was active query that timed-out before */
disp->timedout--;
} else {
result = ISC_R_UNEXPECTED;
}
}
/*
* Phase 3: Trigger timeouts. It's possible that the responses would
* have been timed out out already, but non-matching TCP reads have
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
* prevented this.
*/
resp = ISC_LIST_HEAD(disp->active);
if (resp != NULL) {
now = isc_loop_now(resp->loop);
}
while (resp != NULL) {
dns_dispentry_t *next = ISC_LIST_NEXT(resp, alink);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
timeout = resp->timeout - dispentry_runtime(resp, &now);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
if (timeout <= 0) {
tcp_recv_add(&resps, resp, ISC_R_TIMEDOUT);
2000-09-08 22:02:21 +00:00
}
resp = next;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
}
/*
* Phase 4: log if we errored out.
*/
switch (result) {
case ISC_R_SUCCESS:
case ISC_R_TIMEDOUT:
case ISC_R_NOTFOUND:
break;
2000-04-29 00:45:26 +00:00
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
case ISC_R_SHUTTINGDOWN:
case ISC_R_CANCELED:
case ISC_R_EOF:
case ISC_R_CONNECTIONRESET:
isc_sockaddr_format(&peer, buf, sizeof(buf));
dispatch_log(disp, LVL(90), "shutting down TCP: %s: %s", buf,
isc_result_totext(result));
tcp_recv_shutdown(disp, &resps, result);
break;
default:
isc_sockaddr_format(&peer, buf, sizeof(buf));
dispatch_log(disp, ISC_LOG_ERROR,
"shutting down due to TCP "
"receive error: %s: %s",
buf, isc_result_totext(result));
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
tcp_recv_shutdown(disp, &resps, result);
break;
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
/*
* Phase 5: Resume reading if there are still active responses
*/
resp = ISC_LIST_HEAD(disp->active);
if (resp != NULL) {
timeout = resp->timeout - dispentry_runtime(resp, &now);
INSIST(timeout > 0);
tcp_startrecv(disp, resp);
isc_nmhandle_settimeout(handle, timeout);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
}
UNLOCK(&disp->lock);
1999-07-12 23:44:31 +00:00
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
/*
* Phase 6: Process all scheduled callbacks.
*/
tcp_recv_processall(&resps, region);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispatch_detach(&disp); /* DISPATCH002 */
}
/*%
* Create a temporary port list to set the initial default set of dispatch
* ephemeral ports. This is almost meaningless as the application will
* normally set the ports explicitly, but is provided to fill some minor corner
* cases.
*/
static void
create_default_portset(isc_mem_t *mctx, int family, isc_portset_t **portsetp) {
in_port_t low, high;
isc_net_getudpportrange(family, &low, &high);
isc_portset_create(mctx, portsetp);
isc_portset_addrange(*portsetp, low, high);
}
static isc_result_t
setavailports(dns_dispatchmgr_t *mgr, isc_portset_t *v4portset,
isc_portset_t *v6portset) {
in_port_t *v4ports, *v6ports, p = 0;
unsigned int nv4ports, nv6ports, i4 = 0, i6 = 0;
nv4ports = isc_portset_nports(v4portset);
nv6ports = isc_portset_nports(v6portset);
v4ports = NULL;
if (nv4ports != 0) {
2023-08-23 08:56:31 +02:00
v4ports = isc_mem_cget(mgr->mctx, nv4ports, sizeof(in_port_t));
}
v6ports = NULL;
if (nv6ports != 0) {
2023-08-23 08:56:31 +02:00
v6ports = isc_mem_cget(mgr->mctx, nv6ports, sizeof(in_port_t));
}
do {
if (isc_portset_isset(v4portset, p)) {
INSIST(i4 < nv4ports);
v4ports[i4++] = p;
}
if (isc_portset_isset(v6portset, p)) {
INSIST(i6 < nv6ports);
v6ports[i6++] = p;
}
} while (p++ < 65535);
INSIST(i4 == nv4ports && i6 == nv6ports);
if (mgr->v4ports != NULL) {
2023-08-23 08:56:31 +02:00
isc_mem_cput(mgr->mctx, mgr->v4ports, mgr->nv4ports,
sizeof(in_port_t));
}
mgr->v4ports = v4ports;
mgr->nv4ports = nv4ports;
if (mgr->v6ports != NULL) {
2023-08-23 08:56:31 +02:00
isc_mem_cput(mgr->mctx, mgr->v6ports, mgr->nv6ports,
sizeof(in_port_t));
}
mgr->v6ports = v6ports;
mgr->nv6ports = nv6ports;
return (ISC_R_SUCCESS);
}
/*
* Publics.
*/
1999-07-22 01:34:31 +00:00
isc_result_t
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
dns_dispatchmgr_create(isc_mem_t *mctx, isc_nm_t *nm,
dns_dispatchmgr_t **mgrp) {
dns_dispatchmgr_t *mgr = NULL;
isc_portset_t *v4portset = NULL;
isc_portset_t *v6portset = NULL;
REQUIRE(mctx != NULL);
REQUIRE(mgrp != NULL && *mgrp == NULL);
mgr = isc_mem_get(mctx, sizeof(dns_dispatchmgr_t));
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
*mgr = (dns_dispatchmgr_t){ .magic = 0 };
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
#if DNS_DISPATCH_TRACE
fprintf(stderr, "dns_dispatchmgr__init:%s:%s:%d:%p->references = 1\n",
__func__, __FILE__, __LINE__, mgr);
#endif
isc_refcount_init(&mgr->references, 1);
isc_mem_attach(mctx, &mgr->mctx);
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
isc_nm_attach(nm, &mgr->nm);
2018-11-16 15:33:22 +01:00
isc_mutex_init(&mgr->lock);
ISC_LIST_INIT(mgr->list);
create_default_portset(mctx, AF_INET, &v4portset);
create_default_portset(mctx, AF_INET6, &v6portset);
setavailports(mgr, v4portset, v6portset);
isc_portset_destroy(mctx, &v4portset);
isc_portset_destroy(mctx, &v6portset);
Dispatch API simplification - Many dispatch attributes can be set implicitly instead of being passed in. we can infer whether to set DNS_DISPATCHATTR_TCP or _UDP from whether we're calling dns_dispatch_createtcp() or _createudp(). we can also infer DNS_DISPATCHATTR_IPV4 or _IPV6 from the addresses or the socket that were passed in. - We no longer use dup'd sockets in UDP dispatches, so the 'dup_socket' parameter has been removed from dns_dispatch_createudp(), along with the code implementing it. also removed isc_socket_dup() since it no longer has any callers. - The 'buffersize' parameter was ignored and has now been removed; buffersize is now fixed at 4096. - Maxbuffers and maxrequests don't need to be passed in on every call to dns_dispatch_createtcp() and _createudp(). In all current uses, the value for mgr->maxbuffers will either be raised once from its default of 20000 to 32768, or else left alone. (passing in a value lower than 20000 does not lower it.) there isn't enough difference between these values for there to be any need to configure this. The value for disp->maxrequests controls both the quota of concurrent requests for a dispatch and also the size of the dispatch socket memory pool. it's not clear that this quota is necessary at all. the memory pool size currently starts at 32768, but is sometimes lowered to 4096, which is definitely unnecessary. This commit sets both values permanently to 32768. - Previously TCP dispatches allocated their own separate QID table, which didn't incorporate a port table. this commit removes per-dispatch QID tables and shares the same table between all dispatches. since dispatches are created for each TCP socket, this may speed up the dispatch allocation process. there may be a slight increase in lock contention since all dispatches are sharing a single QID table, but since TCP sockets are used less often than UDP sockets (which were already sharing a QID table), it should not be a substantial change. - The dispatch port table was being used to determine whether a port was already in use; if so, then a UDP socket would be bound with REUSEADDR. this commit removes the port table, and always binds UDP sockets that way.
2020-12-17 00:43:00 -08:00
qid_allocate(mgr, &mgr->qid);
mgr->magic = DNS_DISPATCHMGR_MAGIC;
*mgrp = mgr;
return (ISC_R_SUCCESS);
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
#if DNS_DISPATCH_TRACE
ISC_REFCOUNT_TRACE_IMPL(dns_dispatchmgr, dispatchmgr_destroy);
#else
ISC_REFCOUNT_IMPL(dns_dispatchmgr, dispatchmgr_destroy);
#endif
void
dns_dispatchmgr_setblackhole(dns_dispatchmgr_t *mgr, dns_acl_t *blackhole) {
REQUIRE(VALID_DISPATCHMGR(mgr));
if (mgr->blackhole != NULL) {
dns_acl_detach(&mgr->blackhole);
}
dns_acl_attach(blackhole, &mgr->blackhole);
}
dns_acl_t *
dns_dispatchmgr_getblackhole(dns_dispatchmgr_t *mgr) {
REQUIRE(VALID_DISPATCHMGR(mgr));
return (mgr->blackhole);
}
isc_result_t
dns_dispatchmgr_setavailports(dns_dispatchmgr_t *mgr, isc_portset_t *v4portset,
isc_portset_t *v6portset) {
REQUIRE(VALID_DISPATCHMGR(mgr));
return (setavailports(mgr, v4portset, v6portset));
}
static void
dispatchmgr_destroy(dns_dispatchmgr_t *mgr) {
REQUIRE(VALID_DISPATCHMGR(mgr));
isc_refcount_destroy(&mgr->references);
mgr->magic = 0;
isc_mutex_destroy(&mgr->lock);
qid_destroy(mgr->mctx, &mgr->qid);
if (mgr->blackhole != NULL) {
dns_acl_detach(&mgr->blackhole);
}
if (mgr->stats != NULL) {
isc_stats_detach(&mgr->stats);
}
if (mgr->v4ports != NULL) {
2023-08-23 08:56:31 +02:00
isc_mem_cput(mgr->mctx, mgr->v4ports, mgr->nv4ports,
sizeof(in_port_t));
}
if (mgr->v6ports != NULL) {
2023-08-23 08:56:31 +02:00
isc_mem_cput(mgr->mctx, mgr->v6ports, mgr->nv6ports,
sizeof(in_port_t));
}
isc_nm_detach(&mgr->nm);
isc_mem_putanddetach(&mgr->mctx, mgr, sizeof(dns_dispatchmgr_t));
}
void
dns_dispatchmgr_setstats(dns_dispatchmgr_t *mgr, isc_stats_t *stats) {
REQUIRE(VALID_DISPATCHMGR(mgr));
REQUIRE(ISC_LIST_EMPTY(mgr->list));
REQUIRE(mgr->stats == NULL);
isc_stats_attach(stats, &mgr->stats);
}
Dispatch API simplification - Many dispatch attributes can be set implicitly instead of being passed in. we can infer whether to set DNS_DISPATCHATTR_TCP or _UDP from whether we're calling dns_dispatch_createtcp() or _createudp(). we can also infer DNS_DISPATCHATTR_IPV4 or _IPV6 from the addresses or the socket that were passed in. - We no longer use dup'd sockets in UDP dispatches, so the 'dup_socket' parameter has been removed from dns_dispatch_createudp(), along with the code implementing it. also removed isc_socket_dup() since it no longer has any callers. - The 'buffersize' parameter was ignored and has now been removed; buffersize is now fixed at 4096. - Maxbuffers and maxrequests don't need to be passed in on every call to dns_dispatch_createtcp() and _createudp(). In all current uses, the value for mgr->maxbuffers will either be raised once from its default of 20000 to 32768, or else left alone. (passing in a value lower than 20000 does not lower it.) there isn't enough difference between these values for there to be any need to configure this. The value for disp->maxrequests controls both the quota of concurrent requests for a dispatch and also the size of the dispatch socket memory pool. it's not clear that this quota is necessary at all. the memory pool size currently starts at 32768, but is sometimes lowered to 4096, which is definitely unnecessary. This commit sets both values permanently to 32768. - Previously TCP dispatches allocated their own separate QID table, which didn't incorporate a port table. this commit removes per-dispatch QID tables and shares the same table between all dispatches. since dispatches are created for each TCP socket, this may speed up the dispatch allocation process. there may be a slight increase in lock contention since all dispatches are sharing a single QID table, but since TCP sockets are used less often than UDP sockets (which were already sharing a QID table), it should not be a substantial change. - The dispatch port table was being used to determine whether a port was already in use; if so, then a UDP socket would be bound with REUSEADDR. this commit removes the port table, and always binds UDP sockets that way.
2020-12-17 00:43:00 -08:00
static void
qid_allocate(dns_dispatchmgr_t *mgr, dns_qid_t **qidp) {
dns_qid_t *qid = NULL;
unsigned int i;
REQUIRE(qidp != NULL && *qidp == NULL);
qid = isc_mem_get(mgr->mctx, sizeof(*qid));
Dispatch API simplification - Many dispatch attributes can be set implicitly instead of being passed in. we can infer whether to set DNS_DISPATCHATTR_TCP or _UDP from whether we're calling dns_dispatch_createtcp() or _createudp(). we can also infer DNS_DISPATCHATTR_IPV4 or _IPV6 from the addresses or the socket that were passed in. - We no longer use dup'd sockets in UDP dispatches, so the 'dup_socket' parameter has been removed from dns_dispatch_createudp(), along with the code implementing it. also removed isc_socket_dup() since it no longer has any callers. - The 'buffersize' parameter was ignored and has now been removed; buffersize is now fixed at 4096. - Maxbuffers and maxrequests don't need to be passed in on every call to dns_dispatch_createtcp() and _createudp(). In all current uses, the value for mgr->maxbuffers will either be raised once from its default of 20000 to 32768, or else left alone. (passing in a value lower than 20000 does not lower it.) there isn't enough difference between these values for there to be any need to configure this. The value for disp->maxrequests controls both the quota of concurrent requests for a dispatch and also the size of the dispatch socket memory pool. it's not clear that this quota is necessary at all. the memory pool size currently starts at 32768, but is sometimes lowered to 4096, which is definitely unnecessary. This commit sets both values permanently to 32768. - Previously TCP dispatches allocated their own separate QID table, which didn't incorporate a port table. this commit removes per-dispatch QID tables and shares the same table between all dispatches. since dispatches are created for each TCP socket, this may speed up the dispatch allocation process. there may be a slight increase in lock contention since all dispatches are sharing a single QID table, but since TCP sockets are used less often than UDP sockets (which were already sharing a QID table), it should not be a substantial change. - The dispatch port table was being used to determine whether a port was already in use; if so, then a UDP socket would be bound with REUSEADDR. this commit removes the port table, and always binds UDP sockets that way.
2020-12-17 00:43:00 -08:00
*qid = (dns_qid_t){ .qid_nbuckets = DNS_QID_BUCKETS,
.qid_increment = DNS_QID_INCREMENT };
2023-08-23 08:56:31 +02:00
qid->qid_table = isc_mem_cget(mgr->mctx, DNS_QID_BUCKETS,
sizeof(dns_displist_t));
Dispatch API simplification - Many dispatch attributes can be set implicitly instead of being passed in. we can infer whether to set DNS_DISPATCHATTR_TCP or _UDP from whether we're calling dns_dispatch_createtcp() or _createudp(). we can also infer DNS_DISPATCHATTR_IPV4 or _IPV6 from the addresses or the socket that were passed in. - We no longer use dup'd sockets in UDP dispatches, so the 'dup_socket' parameter has been removed from dns_dispatch_createudp(), along with the code implementing it. also removed isc_socket_dup() since it no longer has any callers. - The 'buffersize' parameter was ignored and has now been removed; buffersize is now fixed at 4096. - Maxbuffers and maxrequests don't need to be passed in on every call to dns_dispatch_createtcp() and _createudp(). In all current uses, the value for mgr->maxbuffers will either be raised once from its default of 20000 to 32768, or else left alone. (passing in a value lower than 20000 does not lower it.) there isn't enough difference between these values for there to be any need to configure this. The value for disp->maxrequests controls both the quota of concurrent requests for a dispatch and also the size of the dispatch socket memory pool. it's not clear that this quota is necessary at all. the memory pool size currently starts at 32768, but is sometimes lowered to 4096, which is definitely unnecessary. This commit sets both values permanently to 32768. - Previously TCP dispatches allocated their own separate QID table, which didn't incorporate a port table. this commit removes per-dispatch QID tables and shares the same table between all dispatches. since dispatches are created for each TCP socket, this may speed up the dispatch allocation process. there may be a slight increase in lock contention since all dispatches are sharing a single QID table, but since TCP sockets are used less often than UDP sockets (which were already sharing a QID table), it should not be a substantial change. - The dispatch port table was being used to determine whether a port was already in use; if so, then a UDP socket would be bound with REUSEADDR. this commit removes the port table, and always binds UDP sockets that way.
2020-12-17 00:43:00 -08:00
for (i = 0; i < qid->qid_nbuckets; i++) {
ISC_LIST_INIT(qid->qid_table[i]);
}
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
isc_mutex_init(&qid->lock);
qid->magic = QID_MAGIC;
*qidp = qid;
}
static void
qid_destroy(isc_mem_t *mctx, dns_qid_t **qidp) {
dns_qid_t *qid = NULL;
REQUIRE(qidp != NULL);
qid = *qidp;
*qidp = NULL;
REQUIRE(VALID_QID(qid));
qid->magic = 0;
2023-08-23 08:56:31 +02:00
isc_mem_cput(mctx, qid->qid_table, qid->qid_nbuckets,
sizeof(dns_displist_t));
isc_mutex_destroy(&qid->lock);
isc_mem_put(mctx, qid, sizeof(*qid));
}
/*
* Allocate and set important limits.
*/
static void
dispatch_allocate(dns_dispatchmgr_t *mgr, isc_socktype_t type,
dns_dispatch_t **dispp) {
dns_dispatch_t *disp = NULL;
1999-06-18 02:01:42 +00:00
REQUIRE(VALID_DISPATCHMGR(mgr));
1999-06-18 23:54:59 +00:00
REQUIRE(dispp != NULL && *dispp == NULL);
1999-06-18 02:01:42 +00:00
/*
* Set up the dispatcher, mostly. Don't bother setting some of
* the options that are controlled by tcp vs. udp, etc.
*/
1999-06-18 02:01:42 +00:00
disp = isc_mem_get(mgr->mctx, sizeof(*disp));
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
*disp = (dns_dispatch_t){
.socktype = type,
.link = ISC_LINK_INITIALIZER,
.active = ISC_LIST_INITIALIZER,
.pending = ISC_LIST_INITIALIZER,
.tid = isc_tid(),
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
.magic = DISPATCH_MAGIC,
};
1999-06-18 02:01:42 +00:00
dns_dispatchmgr_attach(mgr, &disp->mgr);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
#if DNS_DISPATCH_TRACE
fprintf(stderr, "dns_dispatch__init:%s:%s:%d:%p->references = 1\n",
__func__, __FILE__, __LINE__, disp);
#endif
isc_refcount_init(&disp->references, 1); /* DISPATCH000 */
2018-11-16 15:33:22 +01:00
isc_mutex_init(&disp->lock);
*dispp = disp;
}
isc_result_t
dns_dispatch_createtcp(dns_dispatchmgr_t *mgr, const isc_sockaddr_t *localaddr,
const isc_sockaddr_t *destaddr, dns_dispatch_t **dispp) {
dns_dispatch_t *disp = NULL;
REQUIRE(VALID_DISPATCHMGR(mgr));
REQUIRE(destaddr != NULL);
LOCK(&mgr->lock);
dispatch_allocate(mgr, isc_socktype_tcp, &disp);
disp->peer = *destaddr;
if (localaddr != NULL) {
disp->local = *localaddr;
} else {
int pf;
pf = isc_sockaddr_pf(destaddr);
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
isc_sockaddr_anyofpf(&disp->local, pf);
isc_sockaddr_setport(&disp->local, 0);
}
/*
* Append it to the dispatcher list.
*/
/* FIXME: There should be a lookup hashtable here */
ISC_LIST_APPEND(mgr->list, disp, link);
UNLOCK(&mgr->lock);
1999-07-12 23:44:31 +00:00
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
if (isc_log_wouldlog(dns_lctx, 90)) {
char addrbuf[ISC_SOCKADDR_FORMATSIZE];
isc_sockaddr_format(&disp->local, addrbuf,
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
ISC_SOCKADDR_FORMATSIZE);
mgr_log(mgr, LVL(90),
"dns_dispatch_createtcp: created TCP dispatch %p for "
"%s",
disp, addrbuf);
}
*dispp = disp;
return (ISC_R_SUCCESS);
}
isc_result_t
dns_dispatch_gettcp(dns_dispatchmgr_t *mgr, const isc_sockaddr_t *destaddr,
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
const isc_sockaddr_t *localaddr, dns_dispatch_t **dispp) {
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
dns_dispatch_t *disp_connected = NULL;
dns_dispatch_t *disp_fallback = NULL;
isc_result_t result = ISC_R_NOTFOUND;
REQUIRE(VALID_DISPATCHMGR(mgr));
REQUIRE(destaddr != NULL);
REQUIRE(dispp != NULL && *dispp == NULL);
LOCK(&mgr->lock);
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
for (dns_dispatch_t *disp = ISC_LIST_HEAD(mgr->list); disp != NULL;
disp = ISC_LIST_NEXT(disp, link))
{
isc_sockaddr_t sockname;
isc_sockaddr_t peeraddr;
LOCK(&disp->lock);
if (disp->tid != isc_tid()) {
UNLOCK(&disp->lock);
continue;
}
if (disp->handle != NULL) {
sockname = isc_nmhandle_localaddr(disp->handle);
peeraddr = isc_nmhandle_peeraddr(disp->handle);
} else {
sockname = disp->local;
peeraddr = disp->peer;
}
/*
* The conditions match:
* 1. socktype is TCP
* 2. destination address is same
* 3. local address is either NULL or same
*/
if (disp->socktype != isc_socktype_tcp ||
!isc_sockaddr_equal(destaddr, &peeraddr) ||
(localaddr != NULL &&
!isc_sockaddr_eqaddr(localaddr, &sockname)))
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
{
UNLOCK(&disp->lock);
continue;
}
switch (disp->state) {
case DNS_DISPATCHSTATE_NONE:
/* A dispatch in indeterminate state, skip it */
break;
case DNS_DISPATCHSTATE_CONNECTED:
if (ISC_LIST_EMPTY(disp->active)) {
/* Ignore dispatch with no responses */
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
break;
}
/* We found a connected dispatch */
dns_dispatch_attach(disp, &disp_connected);
break;
case DNS_DISPATCHSTATE_CONNECTING:
if (ISC_LIST_EMPTY(disp->pending)) {
/* Ignore dispatch with no responses */
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
break;
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
}
/* We found "a" dispatch, store it for later */
if (disp_fallback == NULL) {
dns_dispatch_attach(disp, &disp_fallback);
}
break;
case DNS_DISPATCHSTATE_CANCELED:
/* A canceled dispatch, skip it. */
break;
default:
UNREACHABLE();
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
}
UNLOCK(&disp->lock);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
if (disp_connected != NULL) {
break;
}
}
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
if (disp_connected != NULL) {
/* We found connected dispatch */
INSIST(disp_connected->handle != NULL);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
*dispp = disp_connected;
disp_connected = NULL;
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
result = ISC_R_SUCCESS;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
if (disp_fallback != NULL) {
dns_dispatch_detach(&disp_fallback);
}
} else if (disp_fallback != NULL) {
*dispp = disp_fallback;
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
result = ISC_R_SUCCESS;
}
UNLOCK(&mgr->lock);
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
return (result);
}
isc_result_t
dns_dispatch_createudp(dns_dispatchmgr_t *mgr, const isc_sockaddr_t *localaddr,
dns_dispatch_t **dispp) {
isc_result_t result;
dns_dispatch_t *disp = NULL;
REQUIRE(VALID_DISPATCHMGR(mgr));
REQUIRE(localaddr != NULL);
REQUIRE(dispp != NULL && *dispp == NULL);
LOCK(&mgr->lock);
result = dispatch_createudp(mgr, localaddr, &disp);
if (result == ISC_R_SUCCESS) {
*dispp = disp;
}
UNLOCK(&mgr->lock);
return (result);
}
static isc_result_t
dispatch_createudp(dns_dispatchmgr_t *mgr, const isc_sockaddr_t *localaddr,
dns_dispatch_t **dispp) {
isc_result_t result = ISC_R_SUCCESS;
dns_dispatch_t *disp = NULL;
isc_sockaddr_t sa_any;
/*
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
* Check whether this address/port is available locally.
*/
isc_sockaddr_anyofpf(&sa_any, isc_sockaddr_pf(localaddr));
if (!isc_sockaddr_eqaddr(&sa_any, localaddr)) {
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
result = isc_nm_checkaddr(localaddr, isc_socktype_udp);
if (result != ISC_R_SUCCESS) {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
return (result);
}
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dispatch_allocate(mgr, isc_socktype_udp, &disp);
if (isc_log_wouldlog(dns_lctx, 90)) {
char addrbuf[ISC_SOCKADDR_FORMATSIZE];
isc_sockaddr_format(localaddr, addrbuf,
ISC_SOCKADDR_FORMATSIZE);
mgr_log(mgr, LVL(90),
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
"dispatch_createudp: created UDP dispatch %p for %s",
disp, addrbuf);
}
disp->local = *localaddr;
/*
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
* Don't append it to the dispatcher list, we don't care about UDP, only
* TCP should be searched
*
* ISC_LIST_APPEND(mgr->list, disp, link);
*/
*dispp = disp;
return (result);
1999-06-16 01:32:31 +00:00
}
static void
dispatch_destroy(dns_dispatch_t *disp) {
dns_dispatchmgr_t *mgr = disp->mgr;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
isc_refcount_destroy(&disp->references);
disp->magic = 0;
LOCK(&mgr->lock);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
if (ISC_LINK_LINKED(disp, link)) {
ISC_LIST_UNLINK(disp->mgr->list, disp, link);
}
UNLOCK(&mgr->lock);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
INSIST(disp->requests == 0);
INSIST(ISC_LIST_EMPTY(disp->pending));
INSIST(ISC_LIST_EMPTY(disp->active));
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
INSIST(!ISC_LINK_LINKED(disp, link));
dispatch_log(disp, LVL(90), "destroying dispatch %p", disp);
if (disp->handle) {
dispatch_log(disp, LVL(90), "detaching TCP handle %p from %p",
disp->handle, &disp->handle);
isc_nmhandle_detach(&disp->handle);
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
isc_mutex_destroy(&disp->lock);
isc_mem_put(mgr->mctx, disp, sizeof(*disp));
/*
* Because dispatch uses mgr->mctx, we must detach after freeing
* dispatch, not before.
*/
dns_dispatchmgr_detach(&mgr);
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
#if DNS_DISPATCH_TRACE
ISC_REFCOUNT_TRACE_IMPL(dns_dispatch, dispatch_destroy);
#else
ISC_REFCOUNT_IMPL(dns_dispatch, dispatch_destroy);
#endif
1999-06-16 01:32:31 +00:00
1999-07-22 01:34:31 +00:00
isc_result_t
dns_dispatch_add(dns_dispatch_t *disp, isc_loop_t *loop, unsigned int options,
unsigned int timeout, const isc_sockaddr_t *dest,
dns_transport_t *transport, isc_tlsctx_cache_t *tlsctx_cache,
dispatch_cb_t connected, dispatch_cb_t sent,
dispatch_cb_t response, void *arg, dns_messageid_t *idp,
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispentry_t **respp) {
dns_dispentry_t *resp = NULL;
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
dns_qid_t *qid = NULL;
in_port_t localport;
1999-06-18 02:01:42 +00:00
dns_messageid_t id;
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
unsigned int bucket;
bool ok = false;
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
int i = 0;
1999-06-18 02:01:42 +00:00
REQUIRE(VALID_DISPATCH(disp));
REQUIRE(dest != NULL);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
REQUIRE(respp != NULL && *respp == NULL);
1999-06-18 02:01:42 +00:00
REQUIRE(idp != NULL);
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
REQUIRE(disp->socktype == isc_socktype_tcp ||
disp->socktype == isc_socktype_udp);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
REQUIRE(connected != NULL);
REQUIRE(response != NULL);
REQUIRE(sent != NULL);
REQUIRE(loop != NULL);
1999-06-18 02:01:42 +00:00
LOCK(&disp->lock);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
if (disp->state == DNS_DISPATCHSTATE_CANCELED) {
1999-07-09 02:47:55 +00:00
UNLOCK(&disp->lock);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
return (ISC_R_CANCELED);
1999-07-09 02:47:55 +00:00
}
Dispatch API simplification - Many dispatch attributes can be set implicitly instead of being passed in. we can infer whether to set DNS_DISPATCHATTR_TCP or _UDP from whether we're calling dns_dispatch_createtcp() or _createudp(). we can also infer DNS_DISPATCHATTR_IPV4 or _IPV6 from the addresses or the socket that were passed in. - We no longer use dup'd sockets in UDP dispatches, so the 'dup_socket' parameter has been removed from dns_dispatch_createudp(), along with the code implementing it. also removed isc_socket_dup() since it no longer has any callers. - The 'buffersize' parameter was ignored and has now been removed; buffersize is now fixed at 4096. - Maxbuffers and maxrequests don't need to be passed in on every call to dns_dispatch_createtcp() and _createudp(). In all current uses, the value for mgr->maxbuffers will either be raised once from its default of 20000 to 32768, or else left alone. (passing in a value lower than 20000 does not lower it.) there isn't enough difference between these values for there to be any need to configure this. The value for disp->maxrequests controls both the quota of concurrent requests for a dispatch and also the size of the dispatch socket memory pool. it's not clear that this quota is necessary at all. the memory pool size currently starts at 32768, but is sometimes lowered to 4096, which is definitely unnecessary. This commit sets both values permanently to 32768. - Previously TCP dispatches allocated their own separate QID table, which didn't incorporate a port table. this commit removes per-dispatch QID tables and shares the same table between all dispatches. since dispatches are created for each TCP socket, this may speed up the dispatch allocation process. there may be a slight increase in lock contention since all dispatches are sharing a single QID table, but since TCP sockets are used less often than UDP sockets (which were already sharing a QID table), it should not be a substantial change. - The dispatch port table was being used to determine whether a port was already in use; if so, then a UDP socket would be bound with REUSEADDR. this commit removes the port table, and always binds UDP sockets that way.
2020-12-17 00:43:00 -08:00
qid = disp->mgr->qid;
localport = isc_sockaddr_getport(&disp->local);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
resp = isc_mem_get(disp->mgr->mctx, sizeof(*resp));
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
*resp = (dns_dispentry_t){
.port = localport,
.timeout = timeout,
.peer = *dest,
.loop = loop,
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
.connected = connected,
.sent = sent,
.response = response,
.arg = arg,
.link = ISC_LINK_INITIALIZER,
.alink = ISC_LINK_INITIALIZER,
.plink = ISC_LINK_INITIALIZER,
.rlink = ISC_LINK_INITIALIZER,
.magic = RESPONSE_MAGIC,
};
#if DNS_DISPATCH_TRACE
fprintf(stderr, "dns_dispentry__init:%s:%s:%d:%p->references = 1\n",
__func__, __FILE__, __LINE__, resp);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
#endif
isc_refcount_init(&resp->references, 1); /* DISPENTRY000 */
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
if (disp->socktype == isc_socktype_udp) {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
isc_result_t result = setup_socket(disp, resp, dest,
&localport);
if (result != ISC_R_SUCCESS) {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
isc_mem_put(disp->mgr->mctx, resp, sizeof(*resp));
UNLOCK(&disp->lock);
inc_stats(disp->mgr, dns_resstatscounter_dispsockfail);
return (result);
}
}
1999-06-18 02:01:42 +00:00
/*
* Try somewhat hard to find a unique ID. Start with
* a random number unless DNS_DISPATCHOPT_FIXEDID is set,
* in which case we start with the ID passed in via *idp.
1999-06-18 02:01:42 +00:00
*/
if ((options & DNS_DISPATCHOPT_FIXEDID) != 0) {
id = *idp;
} else {
id = (dns_messageid_t)isc_random16();
}
LOCK(&qid->lock);
2014-01-09 15:57:59 +11:00
do {
dns_dispentry_t *entry = NULL;
bucket = dns_hash(qid, dest, id, localport);
entry = entry_search(qid, dest, id, localport, bucket);
if (entry == NULL) {
ok = true;
1999-06-18 02:01:42 +00:00
break;
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
if ((options & DNS_DISPATCHOPT_FIXEDID) != 0) {
/* When using fixed ID, we either must use it or fail */
break;
}
id += qid->qid_increment;
id &= 0x0000ffff;
2014-01-09 15:57:59 +11:00
} while (i++ < 64);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
if (ok) {
resp->id = id;
resp->bucket = bucket;
ISC_LIST_APPEND(qid->qid_table[bucket], resp, link);
}
UNLOCK(&qid->lock);
1999-06-18 02:01:42 +00:00
if (!ok) {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
isc_mem_put(disp->mgr->mctx, resp, sizeof(*resp));
1999-06-18 02:01:42 +00:00
UNLOCK(&disp->lock);
return (ISC_R_NOMORE);
1999-06-18 02:01:42 +00:00
}
if (transport != NULL) {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_transport_attach(transport, &resp->transport);
}
if (tlsctx_cache != NULL) {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
isc_tlsctx_cache_attach(tlsctx_cache, &resp->tlsctx_cache);
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispatch_attach(disp, &resp->disp); /* DISPATCH001 */
1999-06-18 02:01:42 +00:00
1999-07-09 00:51:08 +00:00
disp->requests++;
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
inc_stats(disp->mgr, (disp->socktype == isc_socktype_udp)
? dns_resstatscounter_disprequdp
: dns_resstatscounter_dispreqtcp);
1999-06-18 02:01:42 +00:00
UNLOCK(&disp->lock);
*idp = id;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
*respp = resp;
1999-06-18 02:01:42 +00:00
return (ISC_R_SUCCESS);
1999-06-16 01:32:31 +00:00
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
isc_result_t
dns_dispatch_getnext(dns_dispentry_t *resp) {
REQUIRE(VALID_RESPONSE(resp));
REQUIRE(VALID_DISPATCH(resp->disp));
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispatch_t *disp = resp->disp;
isc_result_t result = ISC_R_SUCCESS;
int32_t timeout = -1;
dispentry_log(resp, LVL(90), "getnext for QID %d", resp->id);
isc_time_t now = isc_loop_now(resp->loop);
timeout = resp->timeout - dispentry_runtime(resp, &now);
if (timeout <= 0) {
return (ISC_R_TIMEDOUT);
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
LOCK(&disp->lock);
switch (disp->socktype) {
case isc_socktype_udp:
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
udp_dispatch_getnext(resp, timeout);
break;
case isc_socktype_tcp:
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
tcp_dispatch_getnext(disp, resp, timeout);
break;
default:
UNREACHABLE();
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
UNLOCK(&disp->lock);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
return (result);
}
static void
udp_dispentry_cancel(dns_dispentry_t *resp, isc_result_t result) {
REQUIRE(VALID_RESPONSE(resp));
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
REQUIRE(VALID_DISPATCH(resp->disp));
REQUIRE(VALID_DISPATCHMGR(resp->disp->mgr));
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispatch_t *disp = resp->disp;
dns_dispatchmgr_t *mgr = disp->mgr;
dns_qid_t *qid = mgr->qid;
bool respond = false;
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
LOCK(&disp->lock);
dispentry_log(resp, LVL(90),
"canceling response: %s, %s/%s (%s/%s), "
"requests %u",
isc_result_totext(result), state2str(resp->state),
resp->reading ? "reading" : "not reading",
state2str(disp->state),
disp->reading ? "reading" : "not reading",
disp->requests);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
if (ISC_LINK_LINKED(resp, alink)) {
ISC_LIST_UNLINK(disp->active, resp, alink);
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
switch (resp->state) {
case DNS_DISPATCHSTATE_NONE:
break;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
case DNS_DISPATCHSTATE_CONNECTING:
break;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
case DNS_DISPATCHSTATE_CONNECTED:
if (resp->reading) {
respond = true;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dispentry_log(resp, LVL(90), "canceling read on %p",
resp->handle);
isc_nm_cancelread(resp->handle);
}
break;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
case DNS_DISPATCHSTATE_CANCELED:
goto unlock;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
default:
UNREACHABLE();
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dec_stats(disp->mgr, dns_resstatscounter_disprequdp);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
LOCK(&qid->lock);
ISC_LIST_UNLINK(qid->qid_table[resp->bucket], resp, link);
UNLOCK(&qid->lock);
resp->state = DNS_DISPATCHSTATE_CANCELED;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
unlock:
UNLOCK(&disp->lock);
if (respond) {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dispentry_log(resp, LVL(90), "read callback: %s",
isc_result_totext(result));
resp->response(result, NULL, resp->arg);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
}
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
static void
tcp_dispentry_cancel(dns_dispentry_t *resp, isc_result_t result) {
REQUIRE(VALID_RESPONSE(resp));
REQUIRE(VALID_DISPATCH(resp->disp));
REQUIRE(VALID_DISPATCHMGR(resp->disp->mgr));
1999-06-18 02:01:42 +00:00
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispatch_t *disp = resp->disp;
dns_dispatchmgr_t *mgr = disp->mgr;
dns_qid_t *qid = mgr->qid;
dns_displist_t resps = ISC_LIST_INITIALIZER;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
LOCK(&disp->lock);
dispentry_log(resp, LVL(90),
"canceling response: %s, %s/%s (%s/%s), "
"requests %u",
isc_result_totext(result), state2str(resp->state),
resp->reading ? "reading" : "not reading",
state2str(disp->state),
disp->reading ? "reading" : "not reading",
disp->requests);
switch (resp->state) {
case DNS_DISPATCHSTATE_NONE:
break;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
case DNS_DISPATCHSTATE_CONNECTING:
break;
1999-06-18 02:01:42 +00:00
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
case DNS_DISPATCHSTATE_CONNECTED:
if (resp->reading) {
tcp_recv_add(&resps, resp, ISC_R_CANCELED);
}
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
INSIST(!ISC_LINK_LINKED(resp, alink));
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
if (ISC_LIST_EMPTY(disp->active)) {
INSIST(disp->handle != NULL);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
#if DISPATCH_TCP_KEEPALIVE
/*
* This is an experimental code that keeps the TCP
* connection open for 1 second before it is finally
* closed. By keeping the TCP connection open, it can
* be reused by dns_request that uses
* dns_dispatch_gettcp() to join existing TCP
* connections.
*
* It is disabled for now, because it changes the
* behaviour, but I am keeping the code here for future
* reference when we improve the dns_dispatch to reuse
* the TCP connections also in the resolver.
*
* The TCP connection reuse should be seamless and not
* require any extra handling on the client side though.
*/
isc_nmhandle_cleartimeout(disp->handle);
isc_nmhandle_settimeout(disp->handle, 1000);
if (!disp->reading) {
dispentry_log(resp, LVL(90),
"final 1 second timeout on %p",
disp->handle);
tcp_startrecv(disp, NULL);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
}
#else
if (disp->reading) {
dispentry_log(resp, LVL(90),
"canceling read on %p",
disp->handle);
isc_nm_cancelread(disp->handle);
}
#endif
}
break;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
case DNS_DISPATCHSTATE_CANCELED:
goto unlock;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
default:
UNREACHABLE();
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dec_stats(disp->mgr, dns_resstatscounter_dispreqtcp);
1999-06-18 02:01:42 +00:00
LOCK(&qid->lock);
ISC_LIST_UNLINK(qid->qid_table[resp->bucket], resp, link);
UNLOCK(&qid->lock);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
resp->state = DNS_DISPATCHSTATE_CANCELED;
unlock:
UNLOCK(&disp->lock);
1999-07-09 00:51:08 +00:00
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
/*
* NOTE: Calling the response callback directly from here should be done
* asynchronously, as the dns_dispatch_done() is usually called directly
* from the response callback, so there's a slight chance that the call
* stack will get higher here, but it's mitigated by the ".reading"
* flag, so we don't ever go into a loop.
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
*/
tcp_recv_processall(&resps, NULL);
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
}
static void
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dispentry_cancel(dns_dispentry_t *resp, isc_result_t result) {
REQUIRE(VALID_RESPONSE(resp));
REQUIRE(VALID_DISPATCH(resp->disp));
dns_dispatch_t *disp = resp->disp;
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
switch (disp->socktype) {
case isc_socktype_udp:
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
udp_dispentry_cancel(resp, result);
break;
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
case isc_socktype_tcp:
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
tcp_dispentry_cancel(resp, result);
break;
default:
UNREACHABLE();
}
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
void
dns_dispatch_done(dns_dispentry_t **respp) {
REQUIRE(VALID_RESPONSE(*respp));
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispentry_t *resp = *respp;
*respp = NULL;
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dispentry_cancel(resp, ISC_R_CANCELED);
dns_dispentry_detach(&resp); /* DISPENTRY000 */
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
static void
udp_startrecv(isc_nmhandle_t *handle, dns_dispentry_t *resp) {
REQUIRE(VALID_RESPONSE(resp));
dispentry_log(resp, LVL(90), "attaching handle %p to %p", handle,
&resp->handle);
isc_nmhandle_attach(handle, &resp->handle);
dns_dispentry_ref(resp); /* DISPENTRY003 */
dispentry_log(resp, LVL(90), "reading");
isc_nm_read(resp->handle, udp_recv, resp);
resp->reading = true;
}
static void
tcp_startrecv(dns_dispatch_t *disp, dns_dispentry_t *resp) {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
REQUIRE(VALID_DISPATCH(disp));
REQUIRE(disp->socktype == isc_socktype_tcp);
dns_dispatch_ref(disp); /* DISPATCH002 */
if (resp != NULL) {
dispentry_log(resp, LVL(90), "reading from %p", disp->handle);
INSIST(!isc_time_isepoch(&resp->start));
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
} else {
dispatch_log(disp, LVL(90),
"TCP reading without response from %p",
disp->handle);
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
isc_nm_read(disp->handle, tcp_recv, disp);
disp->reading = true;
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
}
static void
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
tcp_connected(isc_nmhandle_t *handle, isc_result_t eresult, void *arg) {
dns_dispatch_t *disp = (dns_dispatch_t *)arg;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispentry_t *resp = NULL;
dns_dispentry_t *next = NULL;
dns_displist_t resps = ISC_LIST_INITIALIZER;
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
if (isc_log_wouldlog(dns_lctx, 90)) {
char localbuf[ISC_SOCKADDR_FORMATSIZE];
char peerbuf[ISC_SOCKADDR_FORMATSIZE];
if (handle != NULL) {
isc_sockaddr_t local = isc_nmhandle_localaddr(handle);
isc_sockaddr_t peer = isc_nmhandle_peeraddr(handle);
isc_sockaddr_format(&local, localbuf,
ISC_SOCKADDR_FORMATSIZE);
isc_sockaddr_format(&peer, peerbuf,
ISC_SOCKADDR_FORMATSIZE);
} else {
isc_sockaddr_format(&disp->local, localbuf,
ISC_SOCKADDR_FORMATSIZE);
isc_sockaddr_format(&disp->peer, peerbuf,
ISC_SOCKADDR_FORMATSIZE);
}
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dispatch_log(disp, LVL(90), "connected from %s to %s: %s",
localbuf, peerbuf, isc_result_totext(eresult));
}
LOCK(&disp->lock);
INSIST(disp->state == DNS_DISPATCHSTATE_CONNECTING);
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
/*
* If there are pending responses, call the connect
* callbacks for all of them.
*/
for (resp = ISC_LIST_HEAD(disp->pending); resp != NULL; resp = next) {
next = ISC_LIST_NEXT(resp, plink);
ISC_LIST_UNLINK(disp->pending, resp, plink);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
ISC_LIST_APPEND(resps, resp, rlink);
resp->result = eresult;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
if (resp->state == DNS_DISPATCHSTATE_CANCELED) {
resp->result = ISC_R_CANCELED;
} else if (eresult == ISC_R_SUCCESS) {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
resp->state = DNS_DISPATCHSTATE_CONNECTED;
ISC_LIST_APPEND(disp->active, resp, alink);
resp->reading = true;
dispentry_log(resp, LVL(90), "start reading");
} else {
resp->state = DNS_DISPATCHSTATE_NONE;
}
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
}
if (ISC_LIST_EMPTY(disp->active)) {
/* All responses have been canceled */
disp->state = DNS_DISPATCHSTATE_CANCELED;
} else if (eresult == ISC_R_SUCCESS) {
disp->state = DNS_DISPATCHSTATE_CONNECTED;
isc_nmhandle_attach(handle, &disp->handle);
tcp_startrecv(disp, resp);
} else {
disp->state = DNS_DISPATCHSTATE_NONE;
}
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
UNLOCK(&disp->lock);
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
for (resp = ISC_LIST_HEAD(resps); resp != NULL; resp = next) {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
next = ISC_LIST_NEXT(resp, rlink);
ISC_LIST_UNLINK(resps, resp, rlink);
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dispentry_log(resp, LVL(90), "connect callback: %s",
isc_result_totext(resp->result));
resp->connected(resp->result, NULL, resp->arg);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispentry_detach(&resp); /* DISPENTRY005 */
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispatch_detach(&disp); /* DISPATCH003 */
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
}
static void
udp_connected(isc_nmhandle_t *handle, isc_result_t eresult, void *arg) {
dns_dispentry_t *resp = (dns_dispentry_t *)arg;
dns_dispatch_t *disp = resp->disp;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dispentry_log(resp, LVL(90), "connected: %s",
isc_result_totext(eresult));
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
LOCK(&disp->lock);
switch (resp->state) {
case DNS_DISPATCHSTATE_CANCELED:
eresult = ISC_R_CANCELED;
ISC_LIST_UNLINK(disp->pending, resp, plink);
goto unlock;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
case DNS_DISPATCHSTATE_CONNECTING:
ISC_LIST_UNLINK(disp->pending, resp, plink);
break;
default:
UNREACHABLE();
}
switch (eresult) {
case ISC_R_CANCELED:
break;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
case ISC_R_SUCCESS:
resp->state = DNS_DISPATCHSTATE_CONNECTED;
udp_startrecv(handle, resp);
break;
case ISC_R_NOPERM:
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
case ISC_R_ADDRINUSE: {
in_port_t localport = isc_sockaddr_getport(&disp->local);
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
isc_result_t result;
/* probably a port collision; try a different one */
result = setup_socket(disp, resp, &resp->peer, &localport);
if (result == ISC_R_SUCCESS) {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
UNLOCK(&disp->lock);
udp_dispatch_connect(disp, resp);
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
goto detach;
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
resp->state = DNS_DISPATCHSTATE_NONE;
break;
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
default:
resp->state = DNS_DISPATCHSTATE_NONE;
break;
}
unlock:
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
UNLOCK(&disp->lock);
dispentry_log(resp, LVL(90), "connect callback: %s",
isc_result_totext(eresult));
resp->connected(eresult, NULL, resp->arg);
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
detach:
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispentry_detach(&resp); /* DISPENTRY004 */
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
static void
udp_dispatch_connect(dns_dispatch_t *disp, dns_dispentry_t *resp) {
LOCK(&disp->lock);
resp->state = DNS_DISPATCHSTATE_CONNECTING;
resp->start = isc_loop_now(resp->loop);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispentry_ref(resp); /* DISPENTRY004 */
ISC_LIST_APPEND(disp->pending, resp, plink);
UNLOCK(&disp->lock);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
isc_nm_udpconnect(disp->mgr->nm, &resp->local, &resp->peer,
udp_connected, resp, resp->timeout);
}
static isc_result_t
tcp_dispatch_connect(dns_dispatch_t *disp, dns_dispentry_t *resp) {
dns_transport_type_t transport_type = DNS_TRANSPORT_TCP;
isc_tlsctx_t *tlsctx = NULL;
isc_tlsctx_client_session_cache_t *sess_cache = NULL;
if (resp->transport != NULL) {
transport_type = dns_transport_get_type(resp->transport);
}
if (transport_type == DNS_TRANSPORT_TLS) {
isc_result_t result;
result = dns_transport_get_tlsctx(
resp->transport, &resp->peer, resp->tlsctx_cache,
resp->disp->mgr->mctx, &tlsctx, &sess_cache);
if (result != ISC_R_SUCCESS) {
return (result);
}
INSIST(tlsctx != NULL);
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
/* Check whether the dispatch is already connecting or connected. */
LOCK(&disp->lock);
switch (disp->state) {
case DNS_DISPATCHSTATE_NONE:
/* First connection, continue with connecting */
disp->state = DNS_DISPATCHSTATE_CONNECTING;
resp->state = DNS_DISPATCHSTATE_CONNECTING;
resp->start = isc_loop_now(resp->loop);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispentry_ref(resp); /* DISPENTRY005 */
ISC_LIST_APPEND(disp->pending, resp, plink);
UNLOCK(&disp->lock);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
char localbuf[ISC_SOCKADDR_FORMATSIZE];
char peerbuf[ISC_SOCKADDR_FORMATSIZE];
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
isc_sockaddr_format(&disp->local, localbuf,
ISC_SOCKADDR_FORMATSIZE);
isc_sockaddr_format(&disp->peer, peerbuf,
ISC_SOCKADDR_FORMATSIZE);
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispatch_ref(disp); /* DISPATCH003 */
dispentry_log(resp, LVL(90),
"connecting from %s to %s, timeout %u", localbuf,
peerbuf, resp->timeout);
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
isc_nm_streamdnsconnect(disp->mgr->nm, &disp->local,
&disp->peer, tcp_connected, disp,
resp->timeout, tlsctx, sess_cache);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
break;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
case DNS_DISPATCHSTATE_CONNECTING:
/* Connection pending; add resp to the list */
resp->state = DNS_DISPATCHSTATE_CONNECTING;
resp->start = isc_loop_now(resp->loop);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispentry_ref(resp); /* DISPENTRY005 */
ISC_LIST_APPEND(disp->pending, resp, plink);
UNLOCK(&disp->lock);
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
break;
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
case DNS_DISPATCHSTATE_CONNECTED:
resp->state = DNS_DISPATCHSTATE_CONNECTED;
resp->start = isc_loop_now(resp->loop);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
/* Add the resp to the reading list */
ISC_LIST_APPEND(disp->active, resp, alink);
dispentry_log(resp, LVL(90), "already connected; attaching");
resp->reading = true;
if (!disp->reading) {
/* Restart the reading */
tcp_startrecv(disp, resp);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
}
UNLOCK(&disp->lock);
/* We are already connected; call the connected cb */
dispentry_log(resp, LVL(90), "connect callback: %s",
isc_result_totext(ISC_R_SUCCESS));
resp->connected(ISC_R_SUCCESS, NULL, resp->arg);
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
break;
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
default:
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
UNREACHABLE();
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
}
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
return (ISC_R_SUCCESS);
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
isc_result_t
dns_dispatch_connect(dns_dispentry_t *resp) {
REQUIRE(VALID_RESPONSE(resp));
REQUIRE(VALID_DISPATCH(resp->disp));
dns_dispatch_t *disp = resp->disp;
switch (disp->socktype) {
case isc_socktype_tcp:
return (tcp_dispatch_connect(disp, resp));
case isc_socktype_udp:
udp_dispatch_connect(disp, resp);
return (ISC_R_SUCCESS);
default:
UNREACHABLE();
}
}
static void
send_done(isc_nmhandle_t *handle, isc_result_t result, void *cbarg) {
dns_dispentry_t *resp = (dns_dispentry_t *)cbarg;
REQUIRE(VALID_RESPONSE(resp));
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispatch_t *disp = resp->disp;
REQUIRE(VALID_DISPATCH(disp));
dispentry_log(resp, LVL(90), "sent: %s", isc_result_totext(result));
resp->sent(result, NULL, resp->arg);
if (result != ISC_R_SUCCESS) {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dispentry_cancel(resp, result);
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispentry_detach(&resp); /* DISPENTRY007 */
isc_nmhandle_detach(&handle);
}
static void
tcp_dispatch_getnext(dns_dispatch_t *disp, dns_dispentry_t *resp,
int32_t timeout) {
REQUIRE(timeout <= INT16_MAX);
dispentry_log(resp, LVL(90), "continue reading");
if (!resp->reading) {
ISC_LIST_APPEND(disp->active, resp, alink);
resp->reading = true;
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
if (disp->reading) {
return;
}
if (timeout > 0) {
isc_nmhandle_settimeout(disp->handle, timeout);
}
dns_dispatch_ref(disp); /* DISPATCH002 */
isc_nm_read(disp->handle, tcp_recv, disp);
disp->reading = true;
}
static void
udp_dispatch_getnext(dns_dispentry_t *resp, int32_t timeout) {
REQUIRE(timeout <= INT16_MAX);
if (resp->reading) {
return;
}
if (timeout > 0) {
isc_nmhandle_settimeout(resp->handle, timeout);
}
dispentry_log(resp, LVL(90), "continue reading");
dns_dispentry_ref(resp); /* DISPENTRY003 */
isc_nm_read(resp->handle, udp_recv, resp);
resp->reading = true;
}
void
dns_dispatch_resume(dns_dispentry_t *resp, uint16_t timeout) {
REQUIRE(VALID_RESPONSE(resp));
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
REQUIRE(VALID_DISPATCH(resp->disp));
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispatch_t *disp = resp->disp;
dispentry_log(resp, LVL(90), "resume");
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
LOCK(&disp->lock);
switch (disp->socktype) {
case isc_socktype_udp: {
udp_dispatch_getnext(resp, timeout);
break;
}
case isc_socktype_tcp:
INSIST(disp->timedout > 0);
disp->timedout--;
tcp_dispatch_getnext(disp, resp, timeout);
break;
default:
UNREACHABLE();
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
UNLOCK(&disp->lock);
}
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
void
dns_dispatch_send(dns_dispentry_t *resp, isc_region_t *r) {
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
REQUIRE(VALID_RESPONSE(resp));
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
REQUIRE(VALID_DISPATCH(resp->disp));
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispatch_t *disp = resp->disp;
isc_nmhandle_t *sendhandle = NULL;
dispentry_log(resp, LVL(90), "sending");
switch (disp->socktype) {
case isc_socktype_udp:
isc_nmhandle_attach(resp->handle, &sendhandle);
break;
case isc_socktype_tcp:
isc_nmhandle_attach(disp->handle, &sendhandle);
break;
default:
UNREACHABLE();
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispentry_ref(resp); /* DISPENTRY007 */
isc_nm_send(sendhandle, r, send_done, resp);
}
2001-03-13 05:48:41 +00:00
isc_result_t
dns_dispatch_getlocaladdress(dns_dispatch_t *disp, isc_sockaddr_t *addrp) {
REQUIRE(VALID_DISPATCH(disp));
REQUIRE(addrp != NULL);
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
if (disp->socktype == isc_socktype_udp) {
2001-03-13 05:48:41 +00:00
*addrp = disp->local;
return (ISC_R_SUCCESS);
}
return (ISC_R_NOTIMPLEMENTED);
}
isc_result_t
dns_dispentry_getlocaladdress(dns_dispentry_t *resp, isc_sockaddr_t *addrp) {
REQUIRE(VALID_RESPONSE(resp));
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
REQUIRE(VALID_DISPATCH(resp->disp));
REQUIRE(addrp != NULL);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispatch_t *disp = resp->disp;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
switch (disp->socktype) {
case isc_socktype_tcp:
*addrp = disp->local;
return (ISC_R_SUCCESS);
case isc_socktype_udp:
*addrp = isc_nmhandle_localaddr(resp->handle);
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
return (ISC_R_SUCCESS);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
default:
UNREACHABLE();
}
}
dns_dispatch_t *
dns_dispatchset_get(dns_dispatchset_t *dset) {
dns_dispatch_t *disp = NULL;
/* check that dispatch set is configured */
if (dset == NULL || dset->ndisp == 0) {
return (NULL);
}
LOCK(&dset->lock);
disp = dset->dispatches[dset->cur];
dset->cur++;
if (dset->cur == dset->ndisp) {
dset->cur = 0;
}
UNLOCK(&dset->lock);
return (disp);
}
isc_result_t
dns_dispatchset_create(isc_mem_t *mctx, dns_dispatch_t *source,
dns_dispatchset_t **dsetp, int n) {
isc_result_t result;
dns_dispatchset_t *dset = NULL;
dns_dispatchmgr_t *mgr = NULL;
int i, j;
REQUIRE(VALID_DISPATCH(source));
REQUIRE(source->socktype == isc_socktype_udp);
REQUIRE(dsetp != NULL && *dsetp == NULL);
mgr = source->mgr;
dset = isc_mem_get(mctx, sizeof(dns_dispatchset_t));
*dset = (dns_dispatchset_t){ .ndisp = n };
2018-11-16 15:33:22 +01:00
isc_mutex_init(&dset->lock);
2023-08-23 08:56:31 +02:00
dset->dispatches = isc_mem_cget(mctx, n, sizeof(dns_dispatch_t *));
isc_mem_attach(mctx, &dset->mctx);
dset->dispatches[0] = NULL;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispatch_attach(source, &dset->dispatches[0]); /* DISPATCH004 */
LOCK(&mgr->lock);
for (i = 1; i < n; i++) {
dset->dispatches[i] = NULL;
result = dispatch_createudp(mgr, &source->local,
&dset->dispatches[i]);
if (result != ISC_R_SUCCESS) {
goto fail;
}
}
UNLOCK(&mgr->lock);
*dsetp = dset;
return (ISC_R_SUCCESS);
fail:
UNLOCK(&mgr->lock);
2012-04-28 23:45:42 +00:00
for (j = 0; j < i; j++) {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispatch_detach(&(dset->dispatches[j])); /* DISPATCH004 */
}
2023-08-23 08:56:31 +02:00
isc_mem_cput(mctx, dset->dispatches, n, sizeof(dns_dispatch_t *));
if (dset->mctx == mctx) {
isc_mem_detach(&dset->mctx);
}
isc_mutex_destroy(&dset->lock);
isc_mem_put(mctx, dset, sizeof(dns_dispatchset_t));
return (result);
}
void
dns_dispatchset_destroy(dns_dispatchset_t **dsetp) {
dns_dispatchset_t *dset = NULL;
int i;
REQUIRE(dsetp != NULL && *dsetp != NULL);
dset = *dsetp;
*dsetp = NULL;
for (i = 0; i < dset->ndisp; i++) {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispatch_detach(&(dset->dispatches[i])); /* DISPATCH004 */
}
2023-08-23 08:56:31 +02:00
isc_mem_cput(dset->mctx, dset->dispatches, dset->ndisp,
sizeof(dns_dispatch_t *));
isc_mutex_destroy(&dset->lock);
isc_mem_putanddetach(&dset->mctx, dset, sizeof(dns_dispatchset_t));
}
isc_result_t
dns_dispatch_checkperm(dns_dispatch_t *disp) {
REQUIRE(VALID_DISPATCH(disp));
if (disp->handle == NULL || disp->socktype == isc_socktype_udp) {
return (ISC_R_NOPERM);
}
return (isc_nm_xfr_checkperm(disp->handle));
}