2
0
mirror of https://gitlab.isc.org/isc-projects/bind9 synced 2025-08-22 18:19:42 +00:00
bind/lib/dns/dispatch.c

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

2317 lines
56 KiB
C
Raw Normal View History

1999-06-16 01:32:31 +00:00
/*
2011-02-03 12:18:12 +00:00
* Copyright (C) Internet Systems Consortium, Inc. ("ISC")
1999-06-16 01:32:31 +00:00
*
* SPDX-License-Identifier: MPL-2.0
*
1999-06-16 01:32:31 +00:00
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, you can obtain one at https://mozilla.org/MPL/2.0/.
*
1999-06-16 01:32:31 +00:00
* See the COPYRIGHT file distributed with this work for additional
1999-07-08 22:12:37 +00:00
* information regarding copyright ownership.
1999-06-16 01:32:31 +00:00
*/
/*! \file */
2000-06-22 22:00:42 +00:00
/*
* FIXME: Might need dns_dispatch_shuttingdown()
*/
#include <inttypes.h>
#include <stdbool.h>
1999-06-16 01:32:31 +00:00
#include <stdlib.h>
#include <sys/types.h>
#include <unistd.h>
1999-06-16 01:32:31 +00:00
#include <isc/async.h>
#include <isc/hash.h>
#include <isc/hashmap.h>
#include <isc/log.h>
#include <isc/loop.h>
1999-06-18 02:01:42 +00:00
#include <isc/mem.h>
#include <isc/mutex.h>
#include <isc/net.h>
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
#include <isc/netmgr.h>
#include <isc/portset.h>
#include <isc/random.h>
#include <isc/stats.h>
2000-05-13 21:57:02 +00:00
#include <isc/string.h>
#include <isc/tid.h>
2007-06-26 06:02:37 +00:00
#include <isc/time.h>
#include <isc/tls.h>
#include <isc/urcu.h>
1999-12-16 22:24:22 +00:00
#include <isc/util.h>
1999-06-16 01:32:31 +00:00
#include <dns/acl.h>
1999-06-16 01:32:31 +00:00
#include <dns/dispatch.h>
#include <dns/message.h>
#include <dns/stats.h>
#include <dns/transport.h>
#include <dns/types.h>
typedef ISC_LIST(dns_dispentry_t) dns_displist_t;
struct dns_dispatchmgr {
/* Unlocked. */
unsigned int magic;
isc_refcount_t references;
isc_mem_t *mctx;
dns_acl_t *blackhole;
isc_stats_t *stats;
uint32_t nloops;
struct cds_lfht **tcps;
struct cds_lfht *qids;
in_port_t *v4ports; /*%< available ports for IPv4 */
unsigned int nv4ports; /*%< # of available ports for IPv4 */
in_port_t *v6ports; /*%< available ports for IPv4 */
unsigned int nv6ports; /*%< # of available ports for IPv4 */
};
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
typedef enum {
DNS_DISPATCHSTATE_NONE = 0UL,
DNS_DISPATCHSTATE_CONNECTING,
DNS_DISPATCHSTATE_CONNECTED,
DNS_DISPATCHSTATE_CANCELED,
} dns_dispatchstate_t;
1999-07-06 19:32:40 +00:00
struct dns_dispentry {
1999-06-18 02:01:42 +00:00
unsigned int magic;
isc_refcount_t references;
isc_mem_t *mctx;
dns_dispatch_t *disp;
isc_loop_t *loop;
isc_nmhandle_t *handle; /*%< netmgr handle for UDP connection */
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispatchstate_t state;
dns_transport_t *transport;
isc_tlsctx_cache_t *tlsctx_cache;
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
unsigned int retries;
unsigned int connect_timeout;
unsigned int timeout;
isc_time_t start;
isc_sockaddr_t local;
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
isc_sockaddr_t peer;
in_port_t port;
dns_messageid_t id;
dispatch_cb_t connected;
dispatch_cb_t sent;
dispatch_cb_t response;
1999-06-16 01:32:31 +00:00
void *arg;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
bool reading;
isc_result_t result;
ISC_LINK(dns_dispentry_t) alink;
ISC_LINK(dns_dispentry_t) plink;
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
ISC_LINK(dns_dispentry_t) rlink;
struct cds_lfht_node ht_node;
struct rcu_head rcu_head;
};
1999-06-16 01:32:31 +00:00
struct dns_dispatch {
/* Unlocked. */
unsigned int magic; /*%< magic */
isc_tid_t tid;
isc_socktype_t socktype;
isc_refcount_t references;
isc_mem_t *mctx;
dns_dispatchmgr_t *mgr; /*%< dispatch manager */
isc_nmhandle_t *handle; /*%< netmgr handle for TCP connection */
isc_sockaddr_t local; /*%< local address */
isc_sockaddr_t peer; /*%< peer address (TCP) */
dns_transport_t *transport; /*%< TCP transport parameters */
dns_dispatchopt_t options;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispatchstate_t state;
bool reading;
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
dns_displist_t pending;
dns_displist_t active;
uint_fast32_t requests; /*%< how many requests we have */
unsigned int timedout;
struct cds_lfht_node ht_node;
struct rcu_head rcu_head;
1999-06-16 01:32:31 +00:00
};
#define RESPONSE_MAGIC ISC_MAGIC('D', 'r', 's', 'p')
#define VALID_RESPONSE(e) ISC_MAGIC_VALID((e), RESPONSE_MAGIC)
1999-06-18 02:01:42 +00:00
#define DISPATCH_MAGIC ISC_MAGIC('D', 'i', 's', 'p')
#define VALID_DISPATCH(e) ISC_MAGIC_VALID((e), DISPATCH_MAGIC)
1999-06-18 02:01:42 +00:00
#define DNS_DISPATCHMGR_MAGIC ISC_MAGIC('D', 'M', 'g', 'r')
#define VALID_DISPATCHMGR(e) ISC_MAGIC_VALID((e), DNS_DISPATCHMGR_MAGIC)
1999-06-18 02:01:42 +00:00
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
#if DNS_DISPATCH_TRACE
#define dns_dispentry_ref(ptr) \
dns_dispentry__ref(ptr, __func__, __FILE__, __LINE__)
#define dns_dispentry_unref(ptr) \
dns_dispentry__unref(ptr, __func__, __FILE__, __LINE__)
#define dns_dispentry_attach(ptr, ptrp) \
dns_dispentry__attach(ptr, ptrp, __func__, __FILE__, __LINE__)
#define dns_dispentry_detach(ptrp) \
dns_dispentry__detach(ptrp, __func__, __FILE__, __LINE__)
ISC_REFCOUNT_TRACE_DECL(dns_dispentry);
#else
ISC_REFCOUNT_DECL(dns_dispentry);
#endif
/*
* The number of attempts to find unique <addr, port, query_id> combination
*/
#define QID_MAX_TRIES 64
/*
* Initial and minimum QID table sizes.
*/
#define QIDS_INIT_SIZE (1 << 4) /* Must be power of 2 */
#define QIDS_MIN_SIZE (1 << 4) /* Must be power of 2 */
/*
2000-05-13 21:57:02 +00:00
* Statics.
*/
static void
dispatchmgr_destroy(dns_dispatchmgr_t *mgr);
static void
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
udp_recv(isc_nmhandle_t *handle, isc_result_t eresult, isc_region_t *region,
void *arg);
static void
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
tcp_recv(isc_nmhandle_t *handle, isc_result_t eresult, isc_region_t *region,
void *arg);
static void
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dispentry_cancel(dns_dispentry_t *resp, isc_result_t result);
static isc_result_t
dispatch_createudp(dns_dispatchmgr_t *mgr, const isc_sockaddr_t *localaddr,
isc_tid_t tid, dns_dispatch_t **dispp);
Dispatch API simplification - Many dispatch attributes can be set implicitly instead of being passed in. we can infer whether to set DNS_DISPATCHATTR_TCP or _UDP from whether we're calling dns_dispatch_createtcp() or _createudp(). we can also infer DNS_DISPATCHATTR_IPV4 or _IPV6 from the addresses or the socket that were passed in. - We no longer use dup'd sockets in UDP dispatches, so the 'dup_socket' parameter has been removed from dns_dispatch_createudp(), along with the code implementing it. also removed isc_socket_dup() since it no longer has any callers. - The 'buffersize' parameter was ignored and has now been removed; buffersize is now fixed at 4096. - Maxbuffers and maxrequests don't need to be passed in on every call to dns_dispatch_createtcp() and _createudp(). In all current uses, the value for mgr->maxbuffers will either be raised once from its default of 20000 to 32768, or else left alone. (passing in a value lower than 20000 does not lower it.) there isn't enough difference between these values for there to be any need to configure this. The value for disp->maxrequests controls both the quota of concurrent requests for a dispatch and also the size of the dispatch socket memory pool. it's not clear that this quota is necessary at all. the memory pool size currently starts at 32768, but is sometimes lowered to 4096, which is definitely unnecessary. This commit sets both values permanently to 32768. - Previously TCP dispatches allocated their own separate QID table, which didn't incorporate a port table. this commit removes per-dispatch QID tables and shares the same table between all dispatches. since dispatches are created for each TCP socket, this may speed up the dispatch allocation process. there may be a slight increase in lock contention since all dispatches are sharing a single QID table, but since TCP sockets are used less often than UDP sockets (which were already sharing a QID table), it should not be a substantial change. - The dispatch port table was being used to determine whether a port was already in use; if so, then a UDP socket would be bound with REUSEADDR. this commit removes the port table, and always binds UDP sockets that way.
2020-12-17 00:43:00 -08:00
static void
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
udp_startrecv(isc_nmhandle_t *handle, dns_dispentry_t *resp);
static void
udp_dispatch_connect(dns_dispatch_t *disp, dns_dispentry_t *resp);
static void
tcp_startrecv(dns_dispatch_t *disp, dns_dispentry_t *resp);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
static void
tcp_dispatch_getnext(dns_dispatch_t *disp, dns_dispentry_t *resp,
int64_t timeout);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
static void
udp_dispatch_getnext(dns_dispentry_t *resp, int64_t timeout);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
static const char *
socktype2str(dns_dispentry_t *resp) {
dns_transport_type_t transport_type = DNS_TRANSPORT_UDP;
dns_dispatch_t *disp = resp->disp;
if (disp->socktype == isc_socktype_tcp) {
if (resp->transport != NULL) {
transport_type =
dns_transport_get_type(resp->transport);
} else {
transport_type = DNS_TRANSPORT_TCP;
}
}
switch (transport_type) {
case DNS_TRANSPORT_UDP:
return "UDP";
case DNS_TRANSPORT_TCP:
return "TCP";
case DNS_TRANSPORT_TLS:
return "TLS";
case DNS_TRANSPORT_HTTP:
return "HTTP";
default:
return "<unexpected>";
}
}
static const char *
state2str(dns_dispatchstate_t state) {
switch (state) {
case DNS_DISPATCHSTATE_NONE:
return "none";
case DNS_DISPATCHSTATE_CONNECTING:
return "connecting";
case DNS_DISPATCHSTATE_CONNECTED:
return "connected";
case DNS_DISPATCHSTATE_CANCELED:
return "canceled";
default:
return "<unexpected>";
}
}
static void
mgr_log(dns_dispatchmgr_t *mgr, int level, const char *fmt, ...)
ISC_FORMAT_PRINTF(3, 4);
2000-04-29 00:45:26 +00:00
static void
mgr_log(dns_dispatchmgr_t *mgr, int level, const char *fmt, ...) {
2000-04-29 00:45:26 +00:00
char msgbuf[2048];
va_list ap;
if (!isc_log_wouldlog(level)) {
return;
}
2000-04-29 00:45:26 +00:00
va_start(ap, fmt);
vsnprintf(msgbuf, sizeof(msgbuf), fmt, ap);
va_end(ap);
isc_log_write(DNS_LOGCATEGORY_DISPATCH, DNS_LOGMODULE_DISPATCH, level,
"dispatchmgr %p: %s", mgr, msgbuf);
}
static void
2009-01-31 00:37:04 +00:00
inc_stats(dns_dispatchmgr_t *mgr, isc_statscounter_t counter) {
if (mgr->stats != NULL) {
isc_stats_increment(mgr->stats, counter);
}
}
static void
dec_stats(dns_dispatchmgr_t *mgr, isc_statscounter_t counter) {
if (mgr->stats != NULL) {
isc_stats_decrement(mgr->stats, counter);
}
}
static void
dispatch_log(dns_dispatch_t *disp, int level, const char *fmt, ...)
ISC_FORMAT_PRINTF(3, 4);
static void
dispatch_log(dns_dispatch_t *disp, int level, const char *fmt, ...) {
char msgbuf[2048];
va_list ap;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
int r;
if (!isc_log_wouldlog(level)) {
2000-07-13 01:16:22 +00:00
return;
}
va_start(ap, fmt);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
r = vsnprintf(msgbuf, sizeof(msgbuf), fmt, ap);
if (r < 0) {
msgbuf[0] = '\0';
} else if ((unsigned int)r >= sizeof(msgbuf)) {
/* Truncated */
msgbuf[sizeof(msgbuf) - 1] = '\0';
}
va_end(ap);
isc_log_write(DNS_LOGCATEGORY_DISPATCH, DNS_LOGMODULE_DISPATCH, level,
"dispatch %p: %s", disp, msgbuf);
2000-04-29 00:45:26 +00:00
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
static void
dispentry_log(dns_dispentry_t *resp, int level, const char *fmt, ...)
ISC_FORMAT_PRINTF(3, 4);
static void
dispentry_log(dns_dispentry_t *resp, int level, const char *fmt, ...) {
char msgbuf[2048];
va_list ap;
int r;
if (!isc_log_wouldlog(level)) {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
return;
}
va_start(ap, fmt);
r = vsnprintf(msgbuf, sizeof(msgbuf), fmt, ap);
if (r < 0) {
msgbuf[0] = '\0';
} else if ((unsigned int)r >= sizeof(msgbuf)) {
/* Truncated */
msgbuf[sizeof(msgbuf) - 1] = '\0';
}
va_end(ap);
dispatch_log(resp->disp, level, "%s response %p: %s",
socktype2str(resp), resp, msgbuf);
}
/*%
* Choose a random port number for a dispatch entry.
*/
static isc_result_t
setup_socket(dns_dispatch_t *disp, dns_dispentry_t *resp,
const isc_sockaddr_t *dest, in_port_t *portp) {
dns_dispatchmgr_t *mgr = disp->mgr;
unsigned int nports;
in_port_t *ports = NULL;
in_port_t port = *portp;
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
if (resp->retries++ > 5) {
return ISC_R_FAILURE;
}
if (isc_sockaddr_pf(&disp->local) == AF_INET) {
nports = mgr->nv4ports;
ports = mgr->v4ports;
} else {
nports = mgr->nv6ports;
ports = mgr->v6ports;
}
if (nports == 0) {
return ISC_R_ADDRNOTAVAIL;
}
resp->local = disp->local;
resp->peer = *dest;
if (port == 0) {
port = ports[isc_random_uniform(nports)];
isc_sockaddr_setport(&resp->local, port);
*portp = port;
}
resp->port = port;
Dispatch API simplification - Many dispatch attributes can be set implicitly instead of being passed in. we can infer whether to set DNS_DISPATCHATTR_TCP or _UDP from whether we're calling dns_dispatch_createtcp() or _createudp(). we can also infer DNS_DISPATCHATTR_IPV4 or _IPV6 from the addresses or the socket that were passed in. - We no longer use dup'd sockets in UDP dispatches, so the 'dup_socket' parameter has been removed from dns_dispatch_createudp(), along with the code implementing it. also removed isc_socket_dup() since it no longer has any callers. - The 'buffersize' parameter was ignored and has now been removed; buffersize is now fixed at 4096. - Maxbuffers and maxrequests don't need to be passed in on every call to dns_dispatch_createtcp() and _createudp(). In all current uses, the value for mgr->maxbuffers will either be raised once from its default of 20000 to 32768, or else left alone. (passing in a value lower than 20000 does not lower it.) there isn't enough difference between these values for there to be any need to configure this. The value for disp->maxrequests controls both the quota of concurrent requests for a dispatch and also the size of the dispatch socket memory pool. it's not clear that this quota is necessary at all. the memory pool size currently starts at 32768, but is sometimes lowered to 4096, which is definitely unnecessary. This commit sets both values permanently to 32768. - Previously TCP dispatches allocated their own separate QID table, which didn't incorporate a port table. this commit removes per-dispatch QID tables and shares the same table between all dispatches. since dispatches are created for each TCP socket, this may speed up the dispatch allocation process. there may be a slight increase in lock contention since all dispatches are sharing a single QID table, but since TCP sockets are used less often than UDP sockets (which were already sharing a QID table), it should not be a substantial change. - The dispatch port table was being used to determine whether a port was already in use; if so, then a UDP socket would be bound with REUSEADDR. this commit removes the port table, and always binds UDP sockets that way.
2020-12-17 00:43:00 -08:00
return ISC_R_SUCCESS;
}
static uint32_t
qid_hash(const dns_dispentry_t *dispentry) {
isc_hash32_t hash;
isc_hash32_init(&hash);
isc_sockaddr_hash_ex(&hash, &dispentry->peer, true);
isc_hash32_hash(&hash, &dispentry->id, sizeof(dispentry->id), true);
isc_hash32_hash(&hash, &dispentry->port, sizeof(dispentry->port), true);
return isc_hash32_finalize(&hash);
}
static int
qid_match(struct cds_lfht_node *node, const void *key0) {
const dns_dispentry_t *dispentry =
caa_container_of(node, dns_dispentry_t, ht_node);
const dns_dispentry_t *key = key0;
return dispentry->id == key->id && dispentry->port == key->port &&
isc_sockaddr_equal(&dispentry->peer, &key->peer);
}
static void
dispentry_destroy_rcu(struct rcu_head *rcu_head) {
dns_dispentry_t *resp = caa_container_of(rcu_head, dns_dispentry_t,
rcu_head);
isc_mem_putanddetach(&resp->mctx, resp, sizeof(*resp));
}
static void
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dispentry_destroy(dns_dispentry_t *resp) {
dns_dispatch_t *disp = resp->disp;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
/*
* We need to call this from here in case there's an external event that
* shuts down our dispatch (like ISC_R_SHUTTINGDOWN).
*/
dispentry_cancel(resp, ISC_R_CANCELED);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
INSIST(disp->requests > 0);
disp->requests--;
resp->magic = 0;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
INSIST(!ISC_LINK_LINKED(resp, plink));
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
INSIST(!ISC_LINK_LINKED(resp, alink));
INSIST(!ISC_LINK_LINKED(resp, rlink));
dispentry_log(resp, ISC_LOG_DEBUG(90), "destroying");
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
if (resp->handle != NULL) {
dispentry_log(resp, ISC_LOG_DEBUG(90),
"detaching handle %p from %p", resp->handle,
&resp->handle);
isc_nmhandle_detach(&resp->handle);
}
if (resp->tlsctx_cache != NULL) {
isc_tlsctx_cache_detach(&resp->tlsctx_cache);
}
if (resp->transport != NULL) {
dns_transport_detach(&resp->transport);
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispatch_detach(&disp); /* DISPATCH001 */
call_rcu(&resp->rcu_head, dispentry_destroy_rcu);
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
#if DNS_DISPATCH_TRACE
ISC_REFCOUNT_TRACE_IMPL(dns_dispentry, dispentry_destroy);
#else
ISC_REFCOUNT_IMPL(dns_dispentry, dispentry_destroy);
#endif
/*
* How long in milliseconds has it been since this dispentry
* started reading?
*/
static unsigned int
dispentry_runtime(dns_dispentry_t *resp, const isc_time_t *now) {
if (isc_time_isepoch(&resp->start)) {
return 0;
}
return isc_time_microdiff(now, &resp->start) / 1000;
}
1999-07-08 02:50:00 +00:00
/*
* General flow:
*
* If I/O result == CANCELED or error, free the buffer.
1999-07-08 02:50:00 +00:00
*
* If query, free the buffer, restart.
1999-07-08 02:50:00 +00:00
*
* If response:
* Allocate event, fill in details.
* If cannot allocate, free buffer, restart.
* find target. If not found, free buffer, restart.
* if event queue is not empty, queue. else, send.
* restart.
*/
static void
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
udp_recv(isc_nmhandle_t *handle, isc_result_t eresult, isc_region_t *region,
void *arg) {
dns_dispentry_t *resp = (dns_dispentry_t *)arg;
dns_dispatch_t *disp = NULL;
dns_messageid_t id;
1999-07-22 01:34:31 +00:00
isc_result_t dres;
isc_buffer_t source;
unsigned int flags;
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
isc_sockaddr_t peer;
isc_netaddr_t netaddr;
int match;
int64_t timeout = 0;
bool respond = true;
isc_time_t now;
REQUIRE(VALID_RESPONSE(resp));
REQUIRE(VALID_DISPATCH(resp->disp));
disp = resp->disp;
REQUIRE(disp->tid == isc_tid());
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
INSIST(resp->reading);
resp->reading = false;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
if (resp->state == DNS_DISPATCHSTATE_CANCELED) {
/*
* Nobody is interested in the callback if the response
* has been canceled already. Detach from the response
* and the handle.
*/
respond = false;
eresult = ISC_R_CANCELED;
}
dispentry_log(resp, ISC_LOG_DEBUG(90),
"read callback:%s, requests %" PRIuFAST32,
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
isc_result_totext(eresult), disp->requests);
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
if (eresult != ISC_R_SUCCESS) {
/*
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
* This is most likely a network error on a connected
* socket, a timeout, or the query has been canceled.
* It makes no sense to check the address or parse the
* packet, but we can return the error to the caller.
*/
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
goto done;
}
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
peer = isc_nmhandle_peeraddr(handle);
isc_netaddr_fromsockaddr(&netaddr, &peer);
/*
* If this is from a blackholed address, drop it.
*/
if (disp->mgr->blackhole != NULL &&
dns_acl_match(&netaddr, NULL, disp->mgr->blackhole, NULL, &match,
NULL) == ISC_R_SUCCESS &&
match > 0)
{
if (isc_log_wouldlog(ISC_LOG_DEBUG(10))) {
char netaddrstr[ISC_NETADDR_FORMATSIZE];
isc_netaddr_format(&netaddr, netaddrstr,
sizeof(netaddrstr));
dispentry_log(resp, ISC_LOG_DEBUG(10),
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
"blackholed packet from %s", netaddrstr);
}
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
goto next;
}
/*
* Peek into the buffer to see what we can see.
*/
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
id = resp->id;
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
isc_buffer_init(&source, region->base, region->length);
isc_buffer_add(&source, region->length);
dres = dns_message_peekheader(&source, &id, &flags);
if (dres != ISC_R_SUCCESS) {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
char netaddrstr[ISC_NETADDR_FORMATSIZE];
isc_netaddr_format(&netaddr, netaddrstr, sizeof(netaddrstr));
dispentry_log(resp, ISC_LOG_DEBUG(10),
"got garbage packet from %s", netaddrstr);
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
goto next;
}
dispentry_log(resp, ISC_LOG_DEBUG(92),
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
"got valid DNS message header, /QR %c, id %u",
((flags & DNS_MESSAGEFLAG_QR) != 0) ? '1' : '0', id);
1999-07-08 22:12:37 +00:00
/*
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
* Look at the message flags. If it's a query, ignore it.
*/
1999-07-06 19:32:40 +00:00
if ((flags & DNS_MESSAGEFLAG_QR) == 0) {
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
goto next;
}
/*
* The QID and the address must match the expected ones.
*/
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
if (resp->id != id || !isc_sockaddr_equal(&peer, &resp->peer)) {
dispentry_log(resp, ISC_LOG_DEBUG(90),
"response doesn't match");
inc_stats(disp->mgr, dns_resstatscounter_mismatch);
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
goto next;
}
1999-07-08 22:12:37 +00:00
/*
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
* We have the right resp, so call the caller back.
1999-07-08 22:12:37 +00:00
*/
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
goto done;
1999-07-08 22:12:37 +00:00
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
next:
/*
2021-11-23 15:35:39 +01:00
* This is the wrong response. Check whether there is still enough
* time to wait for the correct one to arrive before the timeout fires.
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
*/
now = isc_loop_now(resp->loop);
if (resp->timeout > 0) {
timeout = resp->timeout - dispentry_runtime(resp, &now);
if (timeout <= 0) {
/*
* The time window for receiving the correct response is
* already closed, libuv has just not processed the
* socket timer yet. Invoke the read callback,
* indicating a timeout.
*/
eresult = ISC_R_TIMEDOUT;
goto done;
}
}
2021-11-23 15:35:39 +01:00
/*
* Do not invoke the read callback just yet and instead wait for the
* proper response to arrive until the original timeout fires.
*/
respond = false;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
udp_dispatch_getnext(resp, timeout);
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
done:
if (respond) {
dispentry_log(resp, ISC_LOG_DEBUG(90),
"UDP read callback on %p: %s", handle,
isc_result_totext(eresult));
resp->response(eresult, region, resp->arg);
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispentry_detach(&resp); /* DISPENTRY003 */
}
static isc_result_t
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
tcp_recv_oldest(dns_dispatch_t *disp, dns_dispentry_t **respp) {
dns_dispentry_t *resp = NULL;
resp = ISC_LIST_HEAD(disp->active);
if (resp != NULL) {
disp->timedout++;
*respp = resp;
return ISC_R_TIMEDOUT;
}
return ISC_R_NOTFOUND;
}
/*
* NOTE: Must be RCU read locked!
*/
static isc_result_t
tcp_recv_success(dns_dispatch_t *disp, isc_region_t *region,
isc_sockaddr_t *peer, dns_dispentry_t **respp) {
isc_buffer_t source;
dns_messageid_t id;
unsigned int flags;
isc_result_t result = ISC_R_SUCCESS;
dispatch_log(disp, ISC_LOG_DEBUG(90),
"TCP read success, length == %d, addr = %p",
region->length, region->base);
/*
* Peek into the buffer to see what we can see.
*/
isc_buffer_init(&source, region->base, region->length);
isc_buffer_add(&source, region->length);
result = dns_message_peekheader(&source, &id, &flags);
if (result != ISC_R_SUCCESS) {
dispatch_log(disp, ISC_LOG_DEBUG(10), "got garbage packet");
return ISC_R_UNEXPECTED;
}
dispatch_log(disp, ISC_LOG_DEBUG(92),
"got valid DNS message header, /QR %c, id %u",
((flags & DNS_MESSAGEFLAG_QR) != 0) ? '1' : '0', id);
/*
* Look at the message flags. If it's a query, ignore it and keep
* reading.
*/
if ((flags & DNS_MESSAGEFLAG_QR) == 0) {
dispatch_log(disp, ISC_LOG_DEBUG(10),
"got DNS query instead of answer");
return ISC_R_UNEXPECTED;
}
/*
* We have a valid response; find the associated dispentry object
* and call the caller back.
*/
dns_dispentry_t key = {
.id = id,
.peer = *peer,
.port = isc_sockaddr_getport(&disp->local),
};
struct cds_lfht_iter iter;
cds_lfht_lookup(disp->mgr->qids, qid_hash(&key), qid_match, &key,
&iter);
dns_dispentry_t *resp = cds_lfht_entry(cds_lfht_iter_get_node(&iter),
dns_dispentry_t, ht_node);
/* Skip responses that are not ours */
if (resp != NULL && resp->disp == disp) {
if (!resp->reading) {
/*
* We already got a message for this QID and weren't
* expecting any more.
*/
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
result = ISC_R_UNEXPECTED;
} else {
*respp = resp;
}
} else {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
result = ISC_R_NOTFOUND;
}
dispatch_log(disp, ISC_LOG_DEBUG(90),
"search for response in hashtable: %s",
isc_result_totext(result));
return result;
}
static void
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
tcp_recv_add(dns_displist_t *resps, dns_dispentry_t *resp,
isc_result_t result) {
dns_dispentry_ref(resp); /* DISPENTRY009 */
ISC_LIST_UNLINK(resp->disp->active, resp, alink);
ISC_LIST_APPEND(*resps, resp, rlink);
INSIST(resp->reading);
resp->reading = false;
resp->result = result;
}
static void
tcp_recv_shutdown(dns_dispatch_t *disp, dns_displist_t *resps,
isc_result_t result) {
/*
* If there are any active responses, shut them all down.
*/
ISC_LIST_FOREACH(disp->active, resp, alink) {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
tcp_recv_add(resps, resp, result);
}
disp->state = DNS_DISPATCHSTATE_CANCELED;
}
static void
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
tcp_recv_processall(dns_displist_t *resps, isc_region_t *region) {
ISC_LIST_FOREACH(*resps, resp, rlink) {
ISC_LIST_UNLINK(*resps, resp, rlink);
dispentry_log(resp, ISC_LOG_DEBUG(90), "read callback: %s",
isc_result_totext(resp->result));
resp->response(resp->result, region, resp->arg);
dns_dispentry_detach(&resp); /* DISPENTRY009 */
}
}
1999-07-12 23:44:31 +00:00
/*
* General flow:
*
* If I/O result == CANCELED, EOF, or error, notify everyone as the
* various queues drain.
1999-07-12 23:44:31 +00:00
*
* If response:
* Allocate event, fill in details.
* If cannot allocate, restart.
* find target. If not found, restart.
1999-07-12 23:44:31 +00:00
* if event queue is not empty, queue. else, send.
* restart.
*/
static void
tcp_recv(isc_nmhandle_t *handle, isc_result_t result, isc_region_t *region,
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
void *arg) {
dns_dispatch_t *disp = (dns_dispatch_t *)arg;
dns_dispentry_t *resp = NULL;
char buf[ISC_SOCKADDR_FORMATSIZE];
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
isc_sockaddr_t peer;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_displist_t resps = ISC_LIST_INITIALIZER;
isc_time_t now;
int timeout = 0;
1999-07-12 23:44:31 +00:00
REQUIRE(VALID_DISPATCH(disp));
REQUIRE(disp->tid == isc_tid());
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
INSIST(disp->reading);
disp->reading = false;
2019-11-22 10:49:40 +11:00
dispatch_log(disp, ISC_LOG_DEBUG(90),
"TCP read:%s:requests %" PRIuFAST32,
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
isc_result_totext(result), disp->requests);
1999-07-12 23:44:31 +00:00
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
peer = isc_nmhandle_peeraddr(handle);
rcu_read_lock();
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
/*
* Phase 1: Process timeout and success.
*/
switch (result) {
case ISC_R_TIMEDOUT:
/*
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
* Time out the oldest response in the active queue.
*/
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
result = tcp_recv_oldest(disp, &resp);
break;
case ISC_R_SUCCESS:
/* We got an answer */
result = tcp_recv_success(disp, region, &peer, &resp);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
break;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
default:
break;
}
if (resp != NULL) {
tcp_recv_add(&resps, resp, result);
}
/*
* Phase 2: Look if we timed out before.
*/
if (result == ISC_R_NOTFOUND) {
if (disp->timedout > 0) {
/* There was active query that timed-out before */
disp->timedout--;
} else {
result = ISC_R_UNEXPECTED;
}
}
/*
* Phase 3: Trigger timeouts. It's possible that the responses would
* have been timed out out already, but non-matching TCP reads have
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
* prevented this.
*/
resp = ISC_LIST_HEAD(disp->active);
if (resp != NULL) {
now = isc_loop_now(resp->loop);
}
while (resp != NULL) {
dns_dispentry_t *next = ISC_LIST_NEXT(resp, alink);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
if (resp->timeout > 0) {
timeout = resp->timeout - dispentry_runtime(resp, &now);
if (timeout <= 0) {
tcp_recv_add(&resps, resp, ISC_R_TIMEDOUT);
}
2000-09-08 22:02:21 +00:00
}
resp = next;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
}
/*
* Phase 4: log if we errored out.
*/
switch (result) {
case ISC_R_SUCCESS:
case ISC_R_TIMEDOUT:
case ISC_R_NOTFOUND:
break;
2000-04-29 00:45:26 +00:00
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
case ISC_R_SHUTTINGDOWN:
case ISC_R_CANCELED:
case ISC_R_EOF:
case ISC_R_CONNECTIONRESET:
isc_sockaddr_format(&peer, buf, sizeof(buf));
dispatch_log(disp, ISC_LOG_DEBUG(90),
"shutting down TCP: %s: %s", buf,
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
isc_result_totext(result));
tcp_recv_shutdown(disp, &resps, result);
break;
default:
isc_sockaddr_format(&peer, buf, sizeof(buf));
dispatch_log(disp, ISC_LOG_ERROR,
"shutting down due to TCP "
"receive error: %s: %s",
buf, isc_result_totext(result));
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
tcp_recv_shutdown(disp, &resps, result);
break;
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
/*
* Phase 5: Resume reading if there are still active responses
*/
resp = ISC_LIST_HEAD(disp->active);
if (resp != NULL) {
if (resp->timeout > 0) {
timeout = resp->timeout - dispentry_runtime(resp, &now);
INSIST(timeout > 0);
}
tcp_startrecv(disp, resp);
if (timeout > 0) {
isc_nmhandle_settimeout(handle, timeout);
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
}
rcu_read_unlock();
1999-07-12 23:44:31 +00:00
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
/*
* Phase 6: Process all scheduled callbacks.
*/
tcp_recv_processall(&resps, region);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispatch_detach(&disp); /* DISPATCH002 */
}
/*%
* Create a temporary port list to set the initial default set of dispatch
* ephemeral ports. This is almost meaningless as the application will
* normally set the ports explicitly, but is provided to fill some minor corner
* cases.
*/
static void
create_default_portset(isc_mem_t *mctx, int family, isc_portset_t **portsetp) {
in_port_t low, high;
isc_net_getudpportrange(family, &low, &high);
isc_portset_create(mctx, portsetp);
isc_portset_addrange(*portsetp, low, high);
}
static isc_result_t
setavailports(dns_dispatchmgr_t *mgr, isc_portset_t *v4portset,
isc_portset_t *v6portset) {
in_port_t *v4ports, *v6ports, p = 0;
unsigned int nv4ports, nv6ports, i4 = 0, i6 = 0;
nv4ports = isc_portset_nports(v4portset);
nv6ports = isc_portset_nports(v6portset);
v4ports = NULL;
if (nv4ports != 0) {
2023-08-23 08:56:31 +02:00
v4ports = isc_mem_cget(mgr->mctx, nv4ports, sizeof(in_port_t));
}
v6ports = NULL;
if (nv6ports != 0) {
2023-08-23 08:56:31 +02:00
v6ports = isc_mem_cget(mgr->mctx, nv6ports, sizeof(in_port_t));
}
do {
if (isc_portset_isset(v4portset, p)) {
INSIST(i4 < nv4ports);
v4ports[i4++] = p;
}
if (isc_portset_isset(v6portset, p)) {
INSIST(i6 < nv6ports);
v6ports[i6++] = p;
}
} while (p++ < 65535);
INSIST(i4 == nv4ports && i6 == nv6ports);
if (mgr->v4ports != NULL) {
2023-08-23 08:56:31 +02:00
isc_mem_cput(mgr->mctx, mgr->v4ports, mgr->nv4ports,
sizeof(in_port_t));
}
mgr->v4ports = v4ports;
mgr->nv4ports = nv4ports;
if (mgr->v6ports != NULL) {
2023-08-23 08:56:31 +02:00
isc_mem_cput(mgr->mctx, mgr->v6ports, mgr->nv6ports,
sizeof(in_port_t));
}
mgr->v6ports = v6ports;
mgr->nv6ports = nv6ports;
return ISC_R_SUCCESS;
}
/*
* Publics.
*/
1999-07-22 01:34:31 +00:00
isc_result_t
dns_dispatchmgr_create(isc_mem_t *mctx, dns_dispatchmgr_t **mgrp) {
dns_dispatchmgr_t *mgr = NULL;
isc_portset_t *v4portset = NULL;
isc_portset_t *v6portset = NULL;
REQUIRE(mctx != NULL);
REQUIRE(mgrp != NULL && *mgrp == NULL);
mgr = isc_mem_get(mctx, sizeof(dns_dispatchmgr_t));
*mgr = (dns_dispatchmgr_t){
.magic = 0,
.nloops = isc_loopmgr_nloops(),
};
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
#if DNS_DISPATCH_TRACE
fprintf(stderr, "dns_dispatchmgr__init:%s:%s:%d:%p->references = 1\n",
__func__, __FILE__, __LINE__, mgr);
#endif
isc_refcount_init(&mgr->references, 1);
isc_mem_attach(mctx, &mgr->mctx);
mgr->tcps = isc_mem_cget(mgr->mctx, mgr->nloops, sizeof(mgr->tcps[0]));
for (size_t i = 0; i < mgr->nloops; i++) {
mgr->tcps[i] = cds_lfht_new(
2, 2, 0, CDS_LFHT_AUTO_RESIZE | CDS_LFHT_ACCOUNTING,
NULL);
}
create_default_portset(mgr->mctx, AF_INET, &v4portset);
create_default_portset(mgr->mctx, AF_INET6, &v6portset);
setavailports(mgr, v4portset, v6portset);
isc_portset_destroy(mgr->mctx, &v4portset);
isc_portset_destroy(mgr->mctx, &v6portset);
mgr->qids = cds_lfht_new(QIDS_INIT_SIZE, QIDS_MIN_SIZE, 0,
CDS_LFHT_AUTO_RESIZE | CDS_LFHT_ACCOUNTING,
NULL);
mgr->magic = DNS_DISPATCHMGR_MAGIC;
*mgrp = mgr;
return ISC_R_SUCCESS;
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
#if DNS_DISPATCH_TRACE
ISC_REFCOUNT_TRACE_IMPL(dns_dispatchmgr, dispatchmgr_destroy);
#else
ISC_REFCOUNT_IMPL(dns_dispatchmgr, dispatchmgr_destroy);
#endif
void
dns_dispatchmgr_setblackhole(dns_dispatchmgr_t *mgr, dns_acl_t *blackhole) {
REQUIRE(VALID_DISPATCHMGR(mgr));
if (mgr->blackhole != NULL) {
dns_acl_detach(&mgr->blackhole);
}
dns_acl_attach(blackhole, &mgr->blackhole);
}
dns_acl_t *
dns_dispatchmgr_getblackhole(dns_dispatchmgr_t *mgr) {
REQUIRE(VALID_DISPATCHMGR(mgr));
return mgr->blackhole;
}
isc_result_t
dns_dispatchmgr_setavailports(dns_dispatchmgr_t *mgr, isc_portset_t *v4portset,
isc_portset_t *v6portset) {
REQUIRE(VALID_DISPATCHMGR(mgr));
return setavailports(mgr, v4portset, v6portset);
}
static void
dispatchmgr_destroy(dns_dispatchmgr_t *mgr) {
REQUIRE(VALID_DISPATCHMGR(mgr));
isc_refcount_destroy(&mgr->references);
mgr->magic = 0;
RUNTIME_CHECK(!cds_lfht_destroy(mgr->qids, NULL));
for (size_t i = 0; i < mgr->nloops; i++) {
RUNTIME_CHECK(!cds_lfht_destroy(mgr->tcps[i], NULL));
}
isc_mem_cput(mgr->mctx, mgr->tcps, mgr->nloops, sizeof(mgr->tcps[0]));
if (mgr->blackhole != NULL) {
dns_acl_detach(&mgr->blackhole);
}
if (mgr->stats != NULL) {
isc_stats_detach(&mgr->stats);
}
if (mgr->v4ports != NULL) {
2023-08-23 08:56:31 +02:00
isc_mem_cput(mgr->mctx, mgr->v4ports, mgr->nv4ports,
sizeof(in_port_t));
}
if (mgr->v6ports != NULL) {
2023-08-23 08:56:31 +02:00
isc_mem_cput(mgr->mctx, mgr->v6ports, mgr->nv6ports,
sizeof(in_port_t));
}
isc_mem_putanddetach(&mgr->mctx, mgr, sizeof(dns_dispatchmgr_t));
}
void
dns_dispatchmgr_setstats(dns_dispatchmgr_t *mgr, isc_stats_t *stats) {
REQUIRE(VALID_DISPATCHMGR(mgr));
REQUIRE(mgr->stats == NULL);
isc_stats_attach(stats, &mgr->stats);
}
/*
* Allocate and set important limits.
*/
static void
dispatch_allocate(dns_dispatchmgr_t *mgr, isc_socktype_t type, isc_tid_t tid,
dns_dispatch_t **dispp) {
dns_dispatch_t *disp = NULL;
1999-06-18 02:01:42 +00:00
REQUIRE(VALID_DISPATCHMGR(mgr));
1999-06-18 23:54:59 +00:00
REQUIRE(dispp != NULL && *dispp == NULL);
1999-06-18 02:01:42 +00:00
/*
* Set up the dispatcher, mostly. Don't bother setting some of
* the options that are controlled by tcp vs. udp, etc.
*/
1999-06-18 02:01:42 +00:00
disp = isc_mem_get(mgr->mctx, sizeof(*disp));
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
*disp = (dns_dispatch_t){
.socktype = type,
.active = ISC_LIST_INITIALIZER,
.pending = ISC_LIST_INITIALIZER,
.tid = tid,
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
.magic = DISPATCH_MAGIC,
};
1999-06-18 02:01:42 +00:00
isc_mem_attach(mgr->mctx, &disp->mctx);
dns_dispatchmgr_attach(mgr, &disp->mgr);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
#if DNS_DISPATCH_TRACE
fprintf(stderr, "dns_dispatch__init:%s:%s:%d:%p->references = 1\n",
__func__, __FILE__, __LINE__, disp);
#endif
isc_refcount_init(&disp->references, 1); /* DISPATCH000 */
*dispp = disp;
}
struct dispatch_key {
const isc_sockaddr_t *local;
const isc_sockaddr_t *peer;
const dns_transport_t *transport;
};
static uint32_t
dispatch_hash(struct dispatch_key *key) {
uint32_t hashval = isc_sockaddr_hash(key->peer, false);
if (key->local) {
hashval ^= isc_sockaddr_hash(key->local, true);
}
return hashval;
}
static int
dispatch_match(struct cds_lfht_node *node, const void *key0) {
dns_dispatch_t *disp = caa_container_of(node, dns_dispatch_t, ht_node);
const struct dispatch_key *key = key0;
Improve reuse of outgoing TCP connections The dns_dispatch_gettcp() function is used for finding an existing TCP connection that can be reused for sending a query from a specified local address to a specified remote address. The logic for matching the provided <local address, remote address> tuple to one of the existing TCP connections is implemented in the dispatch_match() function: - if the examined TCP connection already has a libuv handle assigned, it means the connection has already been established; therefore, compare the provided <local address, remote address> tuple against the corresponding address tuple for the libuv handle associated with the connection, - if the examined TCP connection does not yet have a libuv handle assigned, it means the connection has not yet been established; therefore, compare the provided <local address, remote address> tuple against the corresponding address tuple that the TCP connection was originally created for. This logic limits TCP connection reuse potential as the libuv handle assigned to an existing dispatch object may have a more specific local <address, port> tuple associated with it than the local <address, port> tuple that the dispatch object was originally created for. That's because the local address for outgoing connections can be set to a wildcard <address, port> tuple (indicating that the caller does not care what source <address, port> tuple will be used for establishing the connection, thereby delegating the task of picking it to the operating system) and then get "upgraded" to a specific <address, port> tuple when the socket is bound (and a libuv handle gets associated with it). When another dns_dispatch_gettcp() caller then tries to look for an existing TCP connection to the same peer and passes a wildcard address in the local part of the tuple, the function will not match that request to a previously-established TCP connection (unless isc_nmhandle_localaddr() returns a wildcard address as well). Simplify dispatch_match() so that the libuv handle associated with an existing dispatch object is not examined for the purpose of matching it to the provided <local address, remote address> tuple; instead, always examine the <local address, remote address> tuple that the dispatch object was originally created for. This enables reuse of TCP connections created without providing a specific local socket address while still preventing other connections (created for a specific local socket address) from being inadvertently shared.
2024-12-29 10:22:20 +01:00
return disp->transport == key->transport &&
isc_sockaddr_equal(&disp->peer, key->peer) &&
(key->local == NULL ||
isc_sockaddr_equal(&disp->local, key->local));
}
isc_result_t
dns_dispatch_createtcp(dns_dispatchmgr_t *mgr, const isc_sockaddr_t *localaddr,
const isc_sockaddr_t *destaddr,
dns_transport_t *transport, dns_dispatchopt_t options,
dns_dispatch_t **dispp) {
dns_dispatch_t *disp = NULL;
isc_tid_t tid = isc_tid();
REQUIRE(VALID_DISPATCHMGR(mgr));
REQUIRE(destaddr != NULL);
dispatch_allocate(mgr, isc_socktype_tcp, tid, &disp);
disp->options = options;
disp->peer = *destaddr;
if (transport != NULL) {
dns_transport_attach(transport, &disp->transport);
}
if (localaddr != NULL) {
disp->local = *localaddr;
} else {
int pf;
pf = isc_sockaddr_pf(destaddr);
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
isc_sockaddr_anyofpf(&disp->local, pf);
isc_sockaddr_setport(&disp->local, 0);
}
/*
* Append it to the dispatcher list.
*/
struct dispatch_key key = {
.local = &disp->local,
.peer = &disp->peer,
.transport = transport,
};
if ((disp->options & DNS_DISPATCHOPT_UNSHARED) == 0) {
rcu_read_lock();
cds_lfht_add(mgr->tcps[tid], dispatch_hash(&key),
&disp->ht_node);
rcu_read_unlock();
}
1999-07-12 23:44:31 +00:00
if (isc_log_wouldlog(90)) {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
char addrbuf[ISC_SOCKADDR_FORMATSIZE];
isc_sockaddr_format(&disp->local, addrbuf,
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
ISC_SOCKADDR_FORMATSIZE);
mgr_log(mgr, ISC_LOG_DEBUG(90),
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
"dns_dispatch_createtcp: created TCP dispatch %p for "
"%s",
disp, addrbuf);
}
*dispp = disp;
return ISC_R_SUCCESS;
}
isc_result_t
dns_dispatch_gettcp(dns_dispatchmgr_t *mgr, const isc_sockaddr_t *destaddr,
const isc_sockaddr_t *localaddr, dns_transport_t *transport,
dns_dispatch_t **dispp) {
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
dns_dispatch_t *disp_connected = NULL;
dns_dispatch_t *disp_fallback = NULL;
isc_result_t result = ISC_R_NOTFOUND;
isc_tid_t tid = isc_tid();
REQUIRE(VALID_DISPATCHMGR(mgr));
REQUIRE(destaddr != NULL);
REQUIRE(dispp != NULL && *dispp == NULL);
struct dispatch_key key = {
.local = localaddr,
.peer = destaddr,
.transport = transport,
};
rcu_read_lock();
struct cds_lfht_iter iter;
dns_dispatch_t *disp = NULL;
cds_lfht_for_each_entry_duplicate(mgr->tcps[tid], dispatch_hash(&key),
dispatch_match, &key, &iter, disp,
ht_node) {
INSIST(disp->tid == isc_tid());
INSIST(disp->socktype == isc_socktype_tcp);
switch (disp->state) {
case DNS_DISPATCHSTATE_NONE:
/* A dispatch in indeterminate state, skip it */
break;
case DNS_DISPATCHSTATE_CONNECTED:
if (ISC_LIST_EMPTY(disp->active)) {
/* Ignore dispatch with no responses */
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
break;
}
/* We found a connected dispatch */
dns_dispatch_attach(disp, &disp_connected);
break;
case DNS_DISPATCHSTATE_CONNECTING:
if (ISC_LIST_EMPTY(disp->pending)) {
/* Ignore dispatch with no responses */
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
break;
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
}
/* We found "a" dispatch, store it for later */
if (disp_fallback == NULL) {
dns_dispatch_attach(disp, &disp_fallback);
}
break;
case DNS_DISPATCHSTATE_CANCELED:
/* A canceled dispatch, skip it. */
break;
default:
UNREACHABLE();
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
if (disp_connected != NULL) {
break;
}
}
rcu_read_unlock();
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
if (disp_connected != NULL) {
/* We found connected dispatch */
INSIST(disp_connected->handle != NULL);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
*dispp = disp_connected;
disp_connected = NULL;
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
result = ISC_R_SUCCESS;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
if (disp_fallback != NULL) {
dns_dispatch_detach(&disp_fallback);
}
} else if (disp_fallback != NULL) {
*dispp = disp_fallback;
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
result = ISC_R_SUCCESS;
}
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
return result;
}
isc_result_t
dns_dispatch_createudp(dns_dispatchmgr_t *mgr, const isc_sockaddr_t *localaddr,
dns_dispatch_t **dispp) {
isc_result_t result;
dns_dispatch_t *disp = NULL;
REQUIRE(VALID_DISPATCHMGR(mgr));
REQUIRE(localaddr != NULL);
REQUIRE(dispp != NULL && *dispp == NULL);
result = dispatch_createudp(mgr, localaddr, isc_tid(), &disp);
if (result == ISC_R_SUCCESS) {
*dispp = disp;
}
return result;
}
static isc_result_t
dispatch_createudp(dns_dispatchmgr_t *mgr, const isc_sockaddr_t *localaddr,
isc_tid_t tid, dns_dispatch_t **dispp) {
isc_result_t result = ISC_R_SUCCESS;
dns_dispatch_t *disp = NULL;
isc_sockaddr_t sa_any;
/*
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
* Check whether this address/port is available locally.
*/
isc_sockaddr_anyofpf(&sa_any, isc_sockaddr_pf(localaddr));
if (!isc_sockaddr_eqaddr(&sa_any, localaddr)) {
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
result = isc_nm_checkaddr(localaddr, isc_socktype_udp);
if (result != ISC_R_SUCCESS) {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
return result;
}
}
dispatch_allocate(mgr, isc_socktype_udp, tid, &disp);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
if (isc_log_wouldlog(90)) {
char addrbuf[ISC_SOCKADDR_FORMATSIZE];
isc_sockaddr_format(localaddr, addrbuf,
ISC_SOCKADDR_FORMATSIZE);
mgr_log(mgr, ISC_LOG_DEBUG(90),
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
"dispatch_createudp: created UDP dispatch %p for %s",
disp, addrbuf);
}
disp->local = *localaddr;
/*
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
* Don't append it to the dispatcher list, we don't care about UDP, only
* TCP should be searched
*
* ISC_LIST_APPEND(mgr->list, disp, link);
*/
*dispp = disp;
return result;
1999-06-16 01:32:31 +00:00
}
static void
dispatch_destroy_rcu(struct rcu_head *rcu_head) {
dns_dispatch_t *disp = caa_container_of(rcu_head, dns_dispatch_t,
rcu_head);
isc_mem_putanddetach(&disp->mctx, disp, sizeof(*disp));
}
static void
dispatch_destroy(dns_dispatch_t *disp) {
dns_dispatchmgr_t *mgr = disp->mgr;
isc_tid_t tid = isc_tid();
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
disp->magic = 0;
if (disp->socktype == isc_socktype_tcp &&
(disp->options & DNS_DISPATCHOPT_UNSHARED) == 0)
{
(void)cds_lfht_del(mgr->tcps[tid], &disp->ht_node);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
INSIST(disp->requests == 0);
INSIST(ISC_LIST_EMPTY(disp->pending));
INSIST(ISC_LIST_EMPTY(disp->active));
dispatch_log(disp, ISC_LOG_DEBUG(90), "destroying dispatch %p", disp);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
if (disp->handle) {
dispatch_log(disp, ISC_LOG_DEBUG(90),
"detaching TCP handle %p from %p", disp->handle,
&disp->handle);
isc_nmhandle_detach(&disp->handle);
}
if (disp->transport != NULL) {
dns_transport_detach(&disp->transport);
}
dns_dispatchmgr_detach(&disp->mgr);
call_rcu(&disp->rcu_head, dispatch_destroy_rcu);
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
#if DNS_DISPATCH_TRACE
ISC_REFCOUNT_TRACE_IMPL(dns_dispatch, dispatch_destroy);
#else
ISC_REFCOUNT_IMPL(dns_dispatch, dispatch_destroy);
#endif
1999-06-16 01:32:31 +00:00
1999-07-22 01:34:31 +00:00
isc_result_t
dns_dispatch_add(dns_dispatch_t *disp, isc_loop_t *loop,
dns_dispatchopt_t options, unsigned int connect_timeout,
unsigned int timeout, const isc_sockaddr_t *dest,
dns_transport_t *transport, isc_tlsctx_cache_t *tlsctx_cache,
dispatch_cb_t connected, dispatch_cb_t sent,
dispatch_cb_t response, void *arg, dns_messageid_t *idp,
dns_dispentry_t **respp) {
1999-06-18 02:01:42 +00:00
REQUIRE(VALID_DISPATCH(disp));
REQUIRE(dest != NULL);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
REQUIRE(respp != NULL && *respp == NULL);
1999-06-18 02:01:42 +00:00
REQUIRE(idp != NULL);
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
REQUIRE(disp->socktype == isc_socktype_tcp ||
disp->socktype == isc_socktype_udp);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
REQUIRE(connected != NULL);
REQUIRE(response != NULL);
REQUIRE(sent != NULL);
REQUIRE(loop != NULL);
REQUIRE(disp->tid == isc_tid());
REQUIRE(disp->transport == transport);
1999-06-18 02:01:42 +00:00
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
if (disp->state == DNS_DISPATCHSTATE_CANCELED) {
return ISC_R_CANCELED;
1999-07-09 02:47:55 +00:00
}
in_port_t localport = isc_sockaddr_getport(&disp->local);
dns_dispentry_t *resp = isc_mem_get(disp->mctx, sizeof(*resp));
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
*resp = (dns_dispentry_t){
.connect_timeout = connect_timeout,
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
.timeout = timeout,
.port = localport,
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
.peer = *dest,
.loop = loop,
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
.connected = connected,
.sent = sent,
.response = response,
.arg = arg,
.alink = ISC_LINK_INITIALIZER,
.plink = ISC_LINK_INITIALIZER,
.rlink = ISC_LINK_INITIALIZER,
.magic = RESPONSE_MAGIC,
};
#if DNS_DISPATCH_TRACE
fprintf(stderr, "dns_dispentry__init:%s:%s:%d:%p->references = 1\n",
__func__, __FILE__, __LINE__, resp);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
#endif
isc_refcount_init(&resp->references, 1); /* DISPENTRY000 */
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
if (disp->socktype == isc_socktype_udp) {
isc_result_t result = setup_socket(disp, resp, dest,
&localport);
if (result != ISC_R_SUCCESS) {
isc_mem_put(disp->mctx, resp, sizeof(*resp));
inc_stats(disp->mgr, dns_resstatscounter_dispsockfail);
return result;
}
}
isc_result_t result = ISC_R_NOMORE;
size_t i = 0;
rcu_read_lock();
2014-01-09 15:57:59 +11:00
do {
/*
* Try somewhat hard to find a unique ID. Start with
* a random number unless DNS_DISPATCHOPT_FIXEDID is set,
* in which case we start with the ID passed in via *idp.
*/
resp->id = ((options & DNS_DISPATCHOPT_FIXEDID) != 0)
? *idp
: (dns_messageid_t)isc_random16();
struct cds_lfht_node *node =
cds_lfht_add_unique(disp->mgr->qids, qid_hash(resp),
qid_match, resp, &resp->ht_node);
if (node != &resp->ht_node) {
if ((options & DNS_DISPATCHOPT_FIXEDID) != 0) {
/*
* When using fixed ID, we either must
* use it or fail
*/
goto fail;
}
} else {
result = ISC_R_SUCCESS;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
break;
}
} while (i++ < QID_MAX_TRIES);
fail:
if (result != ISC_R_SUCCESS) {
isc_mem_put(disp->mctx, resp, sizeof(*resp));
rcu_read_unlock();
return result;
1999-06-18 02:01:42 +00:00
}
isc_mem_attach(disp->mctx, &resp->mctx);
if (transport != NULL) {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_transport_attach(transport, &resp->transport);
}
if (tlsctx_cache != NULL) {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
isc_tlsctx_cache_attach(tlsctx_cache, &resp->tlsctx_cache);
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispatch_attach(disp, &resp->disp); /* DISPATCH001 */
1999-06-18 02:01:42 +00:00
1999-07-09 00:51:08 +00:00
disp->requests++;
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
inc_stats(disp->mgr, (disp->socktype == isc_socktype_udp)
? dns_resstatscounter_disprequdp
: dns_resstatscounter_dispreqtcp);
rcu_read_unlock();
1999-06-18 02:01:42 +00:00
*idp = resp->id;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
*respp = resp;
1999-06-18 02:01:42 +00:00
return ISC_R_SUCCESS;
1999-06-16 01:32:31 +00:00
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
isc_result_t
dns_dispatch_getnext(dns_dispentry_t *resp) {
REQUIRE(VALID_RESPONSE(resp));
REQUIRE(VALID_DISPATCH(resp->disp));
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispatch_t *disp = resp->disp;
isc_result_t result = ISC_R_SUCCESS;
int64_t timeout = 0;
dispentry_log(resp, ISC_LOG_DEBUG(90), "getnext for QID %d", resp->id);
if (resp->timeout > 0) {
isc_time_t now = isc_loop_now(resp->loop);
timeout = resp->timeout - dispentry_runtime(resp, &now);
if (timeout <= 0) {
return ISC_R_TIMEDOUT;
}
}
INSIST(disp->tid == isc_tid());
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
switch (disp->socktype) {
case isc_socktype_udp:
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
udp_dispatch_getnext(resp, timeout);
break;
case isc_socktype_tcp:
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
tcp_dispatch_getnext(disp, resp, timeout);
break;
default:
UNREACHABLE();
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
return result;
}
/*
* NOTE: Must be RCU read locked!
*/
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
static void
udp_dispentry_cancel(dns_dispentry_t *resp, isc_result_t result) {
REQUIRE(VALID_RESPONSE(resp));
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
REQUIRE(VALID_DISPATCH(resp->disp));
REQUIRE(VALID_DISPATCHMGR(resp->disp->mgr));
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispatch_t *disp = resp->disp;
bool respond = false;
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
REQUIRE(disp->tid == isc_tid());
dispentry_log(resp, ISC_LOG_DEBUG(90),
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
"canceling response: %s, %s/%s (%s/%s), "
"requests %" PRIuFAST32,
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
isc_result_totext(result), state2str(resp->state),
resp->reading ? "reading" : "not reading",
state2str(disp->state),
disp->reading ? "reading" : "not reading",
disp->requests);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
if (ISC_LINK_LINKED(resp, alink)) {
ISC_LIST_UNLINK(disp->active, resp, alink);
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
switch (resp->state) {
case DNS_DISPATCHSTATE_NONE:
break;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
case DNS_DISPATCHSTATE_CONNECTING:
break;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
case DNS_DISPATCHSTATE_CONNECTED:
if (resp->reading) {
respond = true;
dispentry_log(resp, ISC_LOG_DEBUG(90),
"canceling read on %p", resp->handle);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
isc_nm_cancelread(resp->handle);
}
break;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
case DNS_DISPATCHSTATE_CANCELED:
goto unlock;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
default:
UNREACHABLE();
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dec_stats(disp->mgr, dns_resstatscounter_disprequdp);
(void)cds_lfht_del(disp->mgr->qids, &resp->ht_node);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
resp->state = DNS_DISPATCHSTATE_CANCELED;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
unlock:
if (respond) {
dispentry_log(resp, ISC_LOG_DEBUG(90), "read callback: %s",
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
isc_result_totext(result));
resp->response(result, NULL, resp->arg);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
}
}
/*
* NOTE: Must be RCU read locked!
*/
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
static void
tcp_dispentry_cancel(dns_dispentry_t *resp, isc_result_t result) {
REQUIRE(VALID_RESPONSE(resp));
REQUIRE(VALID_DISPATCH(resp->disp));
REQUIRE(VALID_DISPATCHMGR(resp->disp->mgr));
1999-06-18 02:01:42 +00:00
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispatch_t *disp = resp->disp;
dns_displist_t resps = ISC_LIST_INITIALIZER;
REQUIRE(disp->tid == isc_tid());
dispentry_log(resp, ISC_LOG_DEBUG(90),
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
"canceling response: %s, %s/%s (%s/%s), "
"requests %" PRIuFAST32,
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
isc_result_totext(result), state2str(resp->state),
resp->reading ? "reading" : "not reading",
state2str(disp->state),
disp->reading ? "reading" : "not reading",
disp->requests);
switch (resp->state) {
case DNS_DISPATCHSTATE_NONE:
break;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
case DNS_DISPATCHSTATE_CONNECTING:
break;
1999-06-18 02:01:42 +00:00
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
case DNS_DISPATCHSTATE_CONNECTED:
if (resp->reading) {
tcp_recv_add(&resps, resp, ISC_R_CANCELED);
}
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
INSIST(!ISC_LINK_LINKED(resp, alink));
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
if (ISC_LIST_EMPTY(disp->active)) {
INSIST(disp->handle != NULL);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
#if DISPATCH_TCP_KEEPALIVE
/*
* This is an experimental code that keeps the TCP
* connection open for 1 second before it is finally
* closed. By keeping the TCP connection open, it can
* be reused by dns_request that uses
* dns_dispatch_gettcp() to join existing TCP
* connections.
*
* It is disabled for now, because it changes the
* behaviour, but I am keeping the code here for future
* reference when we improve the dns_dispatch to reuse
* the TCP connections also in the resolver.
*
* The TCP connection reuse should be seamless and not
* require any extra handling on the client side though.
*/
isc_nmhandle_cleartimeout(disp->handle);
isc_nmhandle_settimeout(disp->handle, 1000);
if (!disp->reading) {
dispentry_log(resp, ISC_LOG_DEBUG(90),
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
"final 1 second timeout on %p",
disp->handle);
tcp_startrecv(disp, NULL);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
}
#else
if (disp->reading) {
dispentry_log(resp, ISC_LOG_DEBUG(90),
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
"canceling read on %p",
disp->handle);
isc_nm_cancelread(disp->handle);
}
#endif
}
break;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
case DNS_DISPATCHSTATE_CANCELED:
goto unlock;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
default:
UNREACHABLE();
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dec_stats(disp->mgr, dns_resstatscounter_dispreqtcp);
1999-06-18 02:01:42 +00:00
(void)cds_lfht_del(disp->mgr->qids, &resp->ht_node);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
resp->state = DNS_DISPATCHSTATE_CANCELED;
unlock:
1999-07-09 00:51:08 +00:00
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
/*
* NOTE: Calling the response callback directly from here should be done
* asynchronously, as the dns_dispatch_done() is usually called directly
* from the response callback, so there's a slight chance that the call
* stack will get higher here, but it's mitigated by the ".reading"
* flag, so we don't ever go into a loop.
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
*/
tcp_recv_processall(&resps, NULL);
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
}
static void
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dispentry_cancel(dns_dispentry_t *resp, isc_result_t result) {
REQUIRE(VALID_RESPONSE(resp));
REQUIRE(VALID_DISPATCH(resp->disp));
dns_dispatch_t *disp = resp->disp;
rcu_read_lock();
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
switch (disp->socktype) {
case isc_socktype_udp:
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
udp_dispentry_cancel(resp, result);
break;
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
case isc_socktype_tcp:
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
tcp_dispentry_cancel(resp, result);
break;
default:
UNREACHABLE();
}
rcu_read_unlock();
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
void
dns_dispatch_done(dns_dispentry_t **respp) {
REQUIRE(VALID_RESPONSE(*respp));
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispentry_t *resp = *respp;
*respp = NULL;
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dispentry_cancel(resp, ISC_R_CANCELED);
dns_dispentry_detach(&resp); /* DISPENTRY000 */
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
static void
udp_startrecv(isc_nmhandle_t *handle, dns_dispentry_t *resp) {
REQUIRE(VALID_RESPONSE(resp));
dispentry_log(resp, ISC_LOG_DEBUG(90), "attaching handle %p to %p",
handle, &resp->handle);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
isc_nmhandle_attach(handle, &resp->handle);
dns_dispentry_ref(resp); /* DISPENTRY003 */
dispentry_log(resp, ISC_LOG_DEBUG(90), "reading");
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
isc_nm_read(resp->handle, udp_recv, resp);
resp->reading = true;
}
static void
tcp_startrecv(dns_dispatch_t *disp, dns_dispentry_t *resp) {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
REQUIRE(VALID_DISPATCH(disp));
REQUIRE(disp->socktype == isc_socktype_tcp);
dns_dispatch_ref(disp); /* DISPATCH002 */
if (resp != NULL) {
dispentry_log(resp, ISC_LOG_DEBUG(90), "reading from %p",
disp->handle);
INSIST(!isc_time_isepoch(&resp->start));
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
} else {
dispatch_log(disp, ISC_LOG_DEBUG(90),
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
"TCP reading without response from %p",
disp->handle);
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
isc_nm_read(disp->handle, tcp_recv, disp);
disp->reading = true;
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
}
static void
resp_connected(void *arg) {
dns_dispentry_t *resp = arg;
dispentry_log(resp, ISC_LOG_DEBUG(90), "connect callback: %s",
isc_result_totext(resp->result));
resp->connected(resp->result, NULL, resp->arg);
dns_dispentry_detach(&resp); /* DISPENTRY005 */
}
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
static void
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
tcp_connected(isc_nmhandle_t *handle, isc_result_t eresult, void *arg) {
dns_dispatch_t *disp = (dns_dispatch_t *)arg;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_displist_t resps = ISC_LIST_INITIALIZER;
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
if (isc_log_wouldlog(90)) {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
char localbuf[ISC_SOCKADDR_FORMATSIZE];
char peerbuf[ISC_SOCKADDR_FORMATSIZE];
if (handle != NULL) {
isc_sockaddr_t local = isc_nmhandle_localaddr(handle);
isc_sockaddr_t peer = isc_nmhandle_peeraddr(handle);
isc_sockaddr_format(&local, localbuf,
ISC_SOCKADDR_FORMATSIZE);
isc_sockaddr_format(&peer, peerbuf,
ISC_SOCKADDR_FORMATSIZE);
} else {
isc_sockaddr_format(&disp->local, localbuf,
ISC_SOCKADDR_FORMATSIZE);
isc_sockaddr_format(&disp->peer, peerbuf,
ISC_SOCKADDR_FORMATSIZE);
}
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
dispatch_log(disp, ISC_LOG_DEBUG(90),
"connected from %s to %s: %s", localbuf, peerbuf,
isc_result_totext(eresult));
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
}
REQUIRE(disp->tid == isc_tid());
INSIST(disp->state == DNS_DISPATCHSTATE_CONNECTING);
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
/*
* If there are pending responses, call the connect
* callbacks for all of them.
*/
ISC_LIST_FOREACH(disp->pending, resp, plink) {
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
ISC_LIST_UNLINK(disp->pending, resp, plink);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
ISC_LIST_APPEND(resps, resp, rlink);
resp->result = eresult;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
if (resp->state == DNS_DISPATCHSTATE_CANCELED) {
resp->result = ISC_R_CANCELED;
} else if (eresult == ISC_R_SUCCESS) {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
resp->state = DNS_DISPATCHSTATE_CONNECTED;
ISC_LIST_APPEND(disp->active, resp, alink);
resp->reading = true;
dispentry_log(resp, ISC_LOG_DEBUG(90), "start reading");
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
} else {
resp->state = DNS_DISPATCHSTATE_NONE;
}
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
}
/* Take the oldest active response. */
dns_dispentry_t *oldest = ISC_LIST_HEAD(disp->active);
if (oldest == NULL) {
/* All responses have been canceled */
disp->state = DNS_DISPATCHSTATE_CANCELED;
} else if (eresult == ISC_R_SUCCESS) {
disp->state = DNS_DISPATCHSTATE_CONNECTED;
isc_nmhandle_attach(handle, &disp->handle);
isc_nmhandle_cleartimeout(disp->handle);
if (oldest->timeout != 0) {
isc_nmhandle_settimeout(disp->handle, oldest->timeout);
}
tcp_startrecv(disp, oldest);
} else {
disp->state = DNS_DISPATCHSTATE_NONE;
}
ISC_LIST_FOREACH(resps, resp, rlink) {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
ISC_LIST_UNLINK(resps, resp, rlink);
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
resp_connected(resp);
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispatch_detach(&disp); /* DISPATCH003 */
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
}
static void
udp_connected(isc_nmhandle_t *handle, isc_result_t eresult, void *arg) {
dns_dispentry_t *resp = (dns_dispentry_t *)arg;
dns_dispatch_t *disp = resp->disp;
dispentry_log(resp, ISC_LOG_DEBUG(90), "connected: %s",
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
isc_result_totext(eresult));
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
REQUIRE(disp->tid == isc_tid());
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
switch (resp->state) {
case DNS_DISPATCHSTATE_CANCELED:
eresult = ISC_R_CANCELED;
ISC_LIST_UNLINK(disp->pending, resp, plink);
goto unlock;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
case DNS_DISPATCHSTATE_CONNECTING:
ISC_LIST_UNLINK(disp->pending, resp, plink);
break;
default:
UNREACHABLE();
}
switch (eresult) {
case ISC_R_CANCELED:
break;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
case ISC_R_SUCCESS:
resp->state = DNS_DISPATCHSTATE_CONNECTED;
udp_startrecv(handle, resp);
break;
case ISC_R_NOPERM:
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
case ISC_R_ADDRINUSE: {
in_port_t localport = isc_sockaddr_getport(&disp->local);
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
isc_result_t result;
/* probably a port collision; try a different one */
result = setup_socket(disp, resp, &resp->peer, &localport);
if (result == ISC_R_SUCCESS) {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
udp_dispatch_connect(disp, resp);
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
goto detach;
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
resp->state = DNS_DISPATCHSTATE_NONE;
break;
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
default:
resp->state = DNS_DISPATCHSTATE_NONE;
break;
}
unlock:
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dispentry_log(resp, ISC_LOG_DEBUG(90), "connect callback: %s",
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
isc_result_totext(eresult));
resp->connected(eresult, NULL, resp->arg);
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
detach:
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispentry_detach(&resp); /* DISPENTRY004 */
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
static void
udp_dispatch_connect(dns_dispatch_t *disp, dns_dispentry_t *resp) {
REQUIRE(disp->tid == isc_tid());
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
resp->state = DNS_DISPATCHSTATE_CONNECTING;
resp->start = isc_loop_now(resp->loop);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispentry_ref(resp); /* DISPENTRY004 */
ISC_LIST_APPEND(disp->pending, resp, plink);
isc_nm_udpconnect(&resp->local, &resp->peer, udp_connected, resp,
resp->timeout);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
}
static inline const char *
get_tls_sni_hostname(dns_dispentry_t *resp) {
char *hostname = NULL;
if (resp->transport != NULL) {
hostname = dns_transport_get_remote_hostname(resp->transport);
}
if (hostname == NULL) {
return NULL;
}
if (isc_tls_valid_sni_hostname(hostname)) {
return hostname;
}
return NULL;
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
static isc_result_t
tcp_dispatch_connect(dns_dispatch_t *disp, dns_dispentry_t *resp) {
dns_transport_type_t transport_type = DNS_TRANSPORT_TCP;
isc_tlsctx_t *tlsctx = NULL;
isc_tlsctx_client_session_cache_t *sess_cache = NULL;
if (resp->transport != NULL) {
transport_type = dns_transport_get_type(resp->transport);
}
if (transport_type == DNS_TRANSPORT_TLS) {
isc_result_t result;
result = dns_transport_get_tlsctx(
resp->transport, &resp->peer, resp->tlsctx_cache,
resp->mctx, &tlsctx, &sess_cache);
if (result != ISC_R_SUCCESS) {
return result;
}
INSIST(tlsctx != NULL);
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
/* Check whether the dispatch is already connecting or connected. */
REQUIRE(disp->tid == isc_tid());
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
switch (disp->state) {
case DNS_DISPATCHSTATE_NONE:
/* First connection, continue with connecting */
disp->state = DNS_DISPATCHSTATE_CONNECTING;
resp->state = DNS_DISPATCHSTATE_CONNECTING;
resp->start = isc_loop_now(resp->loop);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispentry_ref(resp); /* DISPENTRY005 */
ISC_LIST_APPEND(disp->pending, resp, plink);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
char localbuf[ISC_SOCKADDR_FORMATSIZE];
char peerbuf[ISC_SOCKADDR_FORMATSIZE];
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
isc_sockaddr_format(&disp->local, localbuf,
ISC_SOCKADDR_FORMATSIZE);
isc_sockaddr_format(&disp->peer, peerbuf,
ISC_SOCKADDR_FORMATSIZE);
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispatch_ref(disp); /* DISPATCH003 */
dispentry_log(resp, ISC_LOG_DEBUG(90),
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
"connecting from %s to %s, timeout %u", localbuf,
peerbuf, resp->connect_timeout);
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
const char *hostname = get_tls_sni_hostname(resp);
isc_nm_streamdnsconnect(&disp->local, &disp->peer,
tcp_connected, disp,
resp->connect_timeout, tlsctx, hostname,
sess_cache, ISC_NM_PROXY_NONE, NULL);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
break;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
case DNS_DISPATCHSTATE_CONNECTING:
/* Connection pending; add resp to the list */
resp->state = DNS_DISPATCHSTATE_CONNECTING;
resp->start = isc_loop_now(resp->loop);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispentry_ref(resp); /* DISPENTRY005 */
ISC_LIST_APPEND(disp->pending, resp, plink);
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
break;
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
case DNS_DISPATCHSTATE_CONNECTED:
resp->state = DNS_DISPATCHSTATE_CONNECTED;
resp->start = isc_loop_now(resp->loop);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
/* Add the resp to the reading list */
ISC_LIST_APPEND(disp->active, resp, alink);
dispentry_log(resp, ISC_LOG_DEBUG(90),
"already connected; attaching");
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
resp->reading = true;
if (!disp->reading) {
/* Restart the reading */
isc_nmhandle_cleartimeout(disp->handle);
if (resp->timeout != 0) {
isc_nmhandle_settimeout(disp->handle,
resp->timeout);
}
tcp_startrecv(disp, resp);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
}
/* Already connected; call the connected cb asynchronously */
dns_dispentry_ref(resp); /* DISPENTRY005 */
isc_async_run(resp->loop, resp_connected, resp);
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
break;
dispatch: Clean up connect and recv callbacks - disp_connected() has been split into two functions, udp_connected() (which takes 'resp' as an argument) and tcp_connected() (which takes 'disp', and calls the connect callbacks for all pending resps). - In dns_dispatch_connect(), if a connection is already open, we need to detach the dispentry immediately because we won't be running tcp_connected(). - dns_disptach_cancel() also now calls the connect callbacks for pending TCP responses, and the response callbacks for open TCP connections waiting on read. - If udp_connected() runs after dns_dispatch_cancel() has been called, ensure that the caller's connect callback is run. - If a UDP connection fails with EADDRINUSE, we try again up to five times with a different local port number before giving up. - If a TCP connection is canceled while still pending connection, the connect timeout may still fire. we attach the dispatch before connecting to ensure that it won't be detached too soon in this case. - The dispentry is no longer removed from the pending list when deactivating, so that the connect callback can still be run if dns_dispatch_removeresponse() was run while the connecting was pending. - Rewrote dns_dispatch_gettcp() to avoid a data race. - startrecv() and dispatch_getnext() can be called with a NULL resp when using TCP. - Refactored udp_recv() and tcp_recv() and added result logging. - EOF is now treated the same as CANCELED in response callbacks. - ISC_R_SHUTTINGDOWN is sent to the reponse callbacks for all resps if tcp_recv() is triggered by a netmgr shutdown. (response callbacks are *not* sent by udp_recv() in this case.)
2021-08-04 13:14:11 -07:00
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
default:
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
UNREACHABLE();
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
}
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
return ISC_R_SUCCESS;
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
isc_result_t
dns_dispatch_connect(dns_dispentry_t *resp) {
REQUIRE(VALID_RESPONSE(resp));
REQUIRE(VALID_DISPATCH(resp->disp));
dns_dispatch_t *disp = resp->disp;
switch (disp->socktype) {
case isc_socktype_tcp:
return tcp_dispatch_connect(disp, resp);
case isc_socktype_udp:
udp_dispatch_connect(disp, resp);
return ISC_R_SUCCESS;
default:
UNREACHABLE();
}
}
static void
send_done(isc_nmhandle_t *handle, isc_result_t result, void *cbarg) {
dns_dispentry_t *resp = (dns_dispentry_t *)cbarg;
REQUIRE(VALID_RESPONSE(resp));
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispatch_t *disp = resp->disp;
REQUIRE(VALID_DISPATCH(disp));
dispentry_log(resp, ISC_LOG_DEBUG(90), "sent: %s",
isc_result_totext(result));
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
resp->sent(result, NULL, resp->arg);
if (result != ISC_R_SUCCESS) {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dispentry_cancel(resp, result);
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispentry_detach(&resp); /* DISPENTRY007 */
isc_nmhandle_detach(&handle);
}
static void
tcp_dispatch_getnext(dns_dispatch_t *disp, dns_dispentry_t *resp,
int64_t timeout) {
dispentry_log(resp, ISC_LOG_DEBUG(90), "continue reading");
if (!resp->reading) {
ISC_LIST_APPEND(disp->active, resp, alink);
resp->reading = true;
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
if (disp->reading) {
return;
}
if (timeout != 0) {
INSIST(timeout > 0 && timeout <= UINT32_MAX);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
isc_nmhandle_settimeout(disp->handle, timeout);
}
dns_dispatch_ref(disp); /* DISPATCH002 */
isc_nm_read(disp->handle, tcp_recv, disp);
disp->reading = true;
}
static void
udp_dispatch_getnext(dns_dispentry_t *resp, int64_t timeout) {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
if (resp->reading) {
return;
}
if (timeout != 0) {
INSIST(timeout > 0 && timeout <= UINT32_MAX);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
isc_nmhandle_settimeout(resp->handle, timeout);
}
dispentry_log(resp, ISC_LOG_DEBUG(90), "continue reading");
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispentry_ref(resp); /* DISPENTRY003 */
isc_nm_read(resp->handle, udp_recv, resp);
resp->reading = true;
}
void
dns_dispatch_resume(dns_dispentry_t *resp, unsigned int timeout) {
REQUIRE(VALID_RESPONSE(resp));
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
REQUIRE(VALID_DISPATCH(resp->disp));
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispatch_t *disp = resp->disp;
dispentry_log(resp, ISC_LOG_DEBUG(90), "resume");
REQUIRE(disp->tid == isc_tid());
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
switch (disp->socktype) {
case isc_socktype_udp: {
udp_dispatch_getnext(resp, timeout);
break;
}
case isc_socktype_tcp:
INSIST(disp->timedout > 0);
disp->timedout--;
tcp_dispatch_getnext(disp, resp, timeout);
break;
default:
UNREACHABLE();
}
}
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
void
dns_dispatch_send(dns_dispentry_t *resp, isc_region_t *r) {
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
REQUIRE(VALID_RESPONSE(resp));
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
REQUIRE(VALID_DISPATCH(resp->disp));
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispatch_t *disp = resp->disp;
isc_nmhandle_t *sendhandle = NULL;
dispentry_log(resp, ISC_LOG_DEBUG(90), "sending");
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
switch (disp->socktype) {
case isc_socktype_udp:
isc_nmhandle_attach(resp->handle, &sendhandle);
break;
case isc_socktype_tcp:
isc_nmhandle_attach(disp->handle, &sendhandle);
break;
default:
UNREACHABLE();
}
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispentry_ref(resp); /* DISPENTRY007 */
isc_nm_send(sendhandle, r, send_done, resp);
}
2001-03-13 05:48:41 +00:00
isc_result_t
dns_dispatch_getlocaladdress(dns_dispatch_t *disp, isc_sockaddr_t *addrp) {
REQUIRE(VALID_DISPATCH(disp));
REQUIRE(addrp != NULL);
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
if (disp->socktype == isc_socktype_udp) {
2001-03-13 05:48:41 +00:00
*addrp = disp->local;
return ISC_R_SUCCESS;
}
return ISC_R_NOTIMPLEMENTED;
}
isc_result_t
dns_dispentry_getlocaladdress(dns_dispentry_t *resp, isc_sockaddr_t *addrp) {
REQUIRE(VALID_RESPONSE(resp));
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
REQUIRE(VALID_DISPATCH(resp->disp));
REQUIRE(addrp != NULL);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispatch_t *disp = resp->disp;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
switch (disp->socktype) {
case isc_socktype_tcp:
*addrp = isc_nmhandle_localaddr(disp->handle);
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
return ISC_R_SUCCESS;
case isc_socktype_udp:
*addrp = isc_nmhandle_localaddr(resp->handle);
Convert dispatch to netmgr The flow of operations in dispatch is changing and will now be similar for both UDP and TCP queries: 1) Call dns_dispatch_addresponse() to assign a query ID and register that we'll be listening for a response with that ID soon. the parameters for this function include callback functions to inform the caller when the socket is connected and when the message has been sent, as well as a task action that will be sent when the response arrives. (later this could become a netmgr callback, but at this stage to minimize disruption to the calling code, we continue to use isc_task for the response event.) on successful completion of this function, a dispatch entry object will be instantiated. 2) Call dns_dispatch_connect() on the dispatch entry. this runs isc_nm_udpconnect() or isc_nm_tcpdnsconnect(), as needed, and begins listening for responses. the caller is informed via a callback function when the connection is established. 3) Call dns_dispatch_send() on the dispatch entry. this runs isc_nm_send() to send a request. 4) Call dns_dispatch_removeresponse() to terminate listening and close the connection. Implementation comments below: - As we will be using netmgr buffers now. code to send the length in TCP queries has also been removed as that is handled by the netmgr. - TCP dispatches can be used by multiple simultaneous queries, so dns_dispatch_connect() now checks whether the dispatch is already connected before calling isc_nm_tcpdnsconnect() again. - Running dns_dispatch_getnext() from a non-network thread caused a crash due to assertions in the netmgr read functions that appear to be unnecessary now. the assertions have been removed. - fctx->nqueries was formerly incremented when the connection was successful, but is now incremented when the query is started and decremented if the connection fails. - It's no longer necessary for each dispatch to have a pool of tasks, so there's now a single task per dispatch. - Dispatch code to avoid UDP ports already in use has been removed. - dns_resolver and dns_request have been modified to use netmgr callback functions instead of task events. some additional changes were needed to handle shutdown processing correctly. - Timeout processing is not yet fully converted to use netmgr timeouts. - Fixed a lock order cycle reported by TSAN (view -> zone-> adb -> view) by by calling dns_zt functions without holding the view lock.
2021-01-14 13:02:57 -08:00
return ISC_R_SUCCESS;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
default:
UNREACHABLE();
}
}
dns_dispatch_t *
dns_dispatchset_get(dns_dispatchset_t *dset) {
isc_tid_t tid = isc_tid();
/* check that dispatch set is configured */
if (dset == NULL || dset->ndisp == 0) {
return NULL;
}
INSIST((uint32_t)tid < dset->ndisp);
return dset->dispatches[tid];
}
isc_result_t
dns_dispatchset_create(isc_mem_t *mctx, dns_dispatch_t *source,
dns_dispatchset_t **dsetp, uint32_t ndisp) {
isc_result_t result;
dns_dispatchset_t *dset = NULL;
dns_dispatchmgr_t *mgr = NULL;
size_t i;
REQUIRE(VALID_DISPATCH(source));
REQUIRE(source->socktype == isc_socktype_udp);
REQUIRE(dsetp != NULL && *dsetp == NULL);
mgr = source->mgr;
dset = isc_mem_get(mctx, sizeof(dns_dispatchset_t));
*dset = (dns_dispatchset_t){ .ndisp = ndisp };
isc_mem_attach(mctx, &dset->mctx);
dset->dispatches = isc_mem_cget(dset->mctx, ndisp,
sizeof(dns_dispatch_t *));
dset->dispatches[0] = NULL;
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispatch_attach(source, &dset->dispatches[0]); /* DISPATCH004 */
for (i = 1; i < dset->ndisp; i++) {
result = dispatch_createudp(mgr, &source->local, i,
&dset->dispatches[i]);
if (result != ISC_R_SUCCESS) {
goto fail;
}
}
*dsetp = dset;
return ISC_R_SUCCESS;
fail:
for (size_t j = 0; j < i; j++) {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispatch_detach(&(dset->dispatches[j])); /* DISPATCH004 */
}
isc_mem_cput(dset->mctx, dset->dispatches, ndisp,
sizeof(dns_dispatch_t *));
isc_mem_putanddetach(&dset->mctx, dset, sizeof(dns_dispatchset_t));
return result;
}
void
dns_dispatchset_destroy(dns_dispatchset_t **dsetp) {
REQUIRE(dsetp != NULL && *dsetp != NULL);
dns_dispatchset_t *dset = *dsetp;
*dsetp = NULL;
for (size_t i = 0; i < dset->ndisp; i++) {
Fix the thread safety in the dns_dispatch unit The dispatches are not thread-bound, and used freely between various threads (see the dns_resolver and dns_request units for details). This refactoring make sure that all non-const dns_dispatch_t and dns_dispentry_t members are accessed under a lock, and both object now track their internal state (NONE, CONNECTING, CONNECTED, CANCELED) instead of guessing the state from the state of various struct members. During the refactoring, the artificial limit DNS_DISPATCH_SOCKSQUOTA on UDP sockets per dispatch was removed as the limiting needs to happen and happens on in dns_resolver and limiting the number of UDP sockets artificially in dispatch could lead to unpredictable behaviour in case one dispatch has the limit exhausted by others are idle. The TCP artificial limit of DNS_DISPATCH_MAXREQUESTS makes even less sense as the TCP connections are only reused in the dns_request API that's not a heavy user of the outgoing connections. As a side note, the fact that UDP and TCP dispatch pretends to be same thing, but in fact the connected UDP is handled from dns_dispentry_t and dns_dispatch_t acts as a broker, but connected TCP is handled from dns_dispatch_t and dns_dispatchmgr_t acts as a broker doesn't really help the clarity of this unit. This refactoring kept to API almost same - only dns_dispatch_cancel() and dns_dispatch_done() were merged into dns_dispatch_done() as we need to cancel active netmgr handles in any case to not leave dangling connections around. The functions handling UDP and TCP have been mostly split to their matching counterparts and the dns_dispatch_<function> functions are now thing wrappers that call <udp|tcp>_dispatch_<function> based on the socket type. More debugging-level logging was added to the unit to accomodate for this fact.
2022-11-30 17:58:35 +01:00
dns_dispatch_detach(&(dset->dispatches[i])); /* DISPATCH004 */
}
2023-08-23 08:56:31 +02:00
isc_mem_cput(dset->mctx, dset->dispatches, dset->ndisp,
sizeof(dns_dispatch_t *));
isc_mem_putanddetach(&dset->mctx, dset, sizeof(dns_dispatchset_t));
}
isc_result_t
dns_dispatch_checkperm(dns_dispatch_t *disp) {
REQUIRE(VALID_DISPATCH(disp));
if (disp->handle == NULL || disp->socktype == isc_socktype_udp) {
return ISC_R_NOPERM;
}
return isc_nm_xfr_checkperm(disp->handle);
}