1999-06-16 01:32:31 +00:00
|
|
|
/*
|
2011-02-03 12:18:12 +00:00
|
|
|
* Copyright (C) Internet Systems Consortium, Inc. ("ISC")
|
1999-06-16 01:32:31 +00:00
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: MPL-2.0
|
2021-06-03 08:37:05 +02:00
|
|
|
*
|
1999-06-16 01:32:31 +00:00
|
|
|
* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
* file, you can obtain one at https://mozilla.org/MPL/2.0/.
|
2018-02-23 09:53:12 +01:00
|
|
|
*
|
1999-06-16 01:32:31 +00:00
|
|
|
* See the COPYRIGHT file distributed with this work for additional
|
1999-07-08 22:12:37 +00:00
|
|
|
* information regarding copyright ownership.
|
1999-06-16 01:32:31 +00:00
|
|
|
*/
|
|
|
|
|
2005-04-27 04:57:32 +00:00
|
|
|
/*! \file */
|
2000-06-22 22:00:42 +00:00
|
|
|
|
2018-03-28 14:19:37 +02:00
|
|
|
#include <inttypes.h>
|
2018-04-17 08:29:14 -07:00
|
|
|
#include <stdbool.h>
|
1999-06-16 01:32:31 +00:00
|
|
|
#include <stdlib.h>
|
2020-03-09 16:17:26 +01:00
|
|
|
#include <sys/types.h>
|
2007-06-26 02:52:15 +00:00
|
|
|
#include <unistd.h>
|
1999-06-16 01:32:31 +00:00
|
|
|
|
2021-08-03 18:24:27 -07:00
|
|
|
#include <isc/atomic.h>
|
1999-06-18 02:01:42 +00:00
|
|
|
#include <isc/mem.h>
|
2000-05-10 21:34:50 +00:00
|
|
|
#include <isc/mutex.h>
|
2021-10-04 14:11:57 +02:00
|
|
|
#include <isc/net.h>
|
2021-01-14 13:02:57 -08:00
|
|
|
#include <isc/netmgr.h>
|
2008-06-23 19:41:20 +00:00
|
|
|
#include <isc/portset.h>
|
2000-05-08 14:38:29 +00:00
|
|
|
#include <isc/print.h>
|
2008-06-23 19:41:20 +00:00
|
|
|
#include <isc/random.h>
|
2009-01-27 22:30:00 +00:00
|
|
|
#include <isc/stats.h>
|
2000-05-13 21:57:02 +00:00
|
|
|
#include <isc/string.h>
|
2007-06-26 06:02:37 +00:00
|
|
|
#include <isc/time.h>
|
2022-09-19 11:04:22 +00:00
|
|
|
#include <isc/tls.h>
|
1999-12-16 22:24:22 +00:00
|
|
|
#include <isc/util.h>
|
1999-06-16 01:32:31 +00:00
|
|
|
|
2000-11-03 02:45:55 +00:00
|
|
|
#include <dns/acl.h>
|
1999-06-16 01:32:31 +00:00
|
|
|
#include <dns/dispatch.h>
|
2000-04-29 00:45:26 +00:00
|
|
|
#include <dns/log.h>
|
1999-06-30 01:33:11 +00:00
|
|
|
#include <dns/message.h>
|
2008-04-03 05:55:52 +00:00
|
|
|
#include <dns/stats.h>
|
2022-09-19 11:04:22 +00:00
|
|
|
#include <dns/transport.h>
|
2000-05-10 21:34:50 +00:00
|
|
|
#include <dns/types.h>
|
|
|
|
|
2000-09-18 04:50:05 +00:00
|
|
|
typedef ISC_LIST(dns_dispentry_t) dns_displist_t;
|
|
|
|
|
|
|
|
typedef struct dns_qid {
|
|
|
|
unsigned int magic;
|
2021-01-14 13:02:57 -08:00
|
|
|
isc_mutex_t lock;
|
2005-04-27 04:57:32 +00:00
|
|
|
unsigned int qid_nbuckets; /*%< hash table size */
|
|
|
|
unsigned int qid_increment; /*%< id increment on collision */
|
2021-01-14 13:02:57 -08:00
|
|
|
dns_displist_t *qid_table; /*%< the table itself */
|
2000-09-18 04:50:05 +00:00
|
|
|
} dns_qid_t;
|
|
|
|
|
2000-05-10 21:34:50 +00:00
|
|
|
struct dns_dispatchmgr {
|
|
|
|
/* Unlocked. */
|
|
|
|
unsigned int magic;
|
2021-05-25 22:54:17 -07:00
|
|
|
isc_refcount_t references;
|
2000-05-10 21:34:50 +00:00
|
|
|
isc_mem_t *mctx;
|
2000-11-03 02:45:55 +00:00
|
|
|
dns_acl_t *blackhole;
|
2009-01-27 22:30:00 +00:00
|
|
|
isc_stats_t *stats;
|
2021-01-14 13:02:57 -08:00
|
|
|
isc_nm_t *nm;
|
2000-05-10 21:34:50 +00:00
|
|
|
|
|
|
|
/* Locked by "lock". */
|
|
|
|
isc_mutex_t lock;
|
|
|
|
ISC_LIST(dns_dispatch_t) list;
|
2000-09-19 06:59:28 +00:00
|
|
|
|
2000-09-18 04:50:05 +00:00
|
|
|
dns_qid_t *qid;
|
Merge the mlg-20000518 branch onto the mainline. Change summary:
dns_dispatch_create() no longer exists. dns_dispatch_createtcp()
and dns_dispatch_getudp() are the replacements. _createtcp() takes
a bound, connected TCP socket, while _getudp() will search for
a sharable UDP socket, and if found, attach to it and return a
pointer to it. If one is not found, it will create a udp socket,
bind it to a supplied local address, and create a new dispatcher
around it.
dns_dispatch_remove{request,response}() no longer take the dispatch
as an argument.
query-source can now be set per view.
The dispatch manager holds onto three memory pools, one for
allocating dispatchers from, one for events, and one for
requests/replies. The free list on these pools is hard-coded,
but set to 1024. This keeps us from having to dig into the
isc_mem_t the pools draw from as often.
dns_resolver_create() and dns_view_createresolver() require that
valid dispatchers be passed in; dispatchers are no longer created
for the caller.
2000-05-19 21:46:46 +00:00
|
|
|
|
2008-06-23 19:41:20 +00:00
|
|
|
in_port_t *v4ports; /*%< available ports for IPv4 */
|
|
|
|
unsigned int nv4ports; /*%< # of available ports for IPv4 */
|
|
|
|
in_port_t *v6ports; /*%< available ports for IPv4 */
|
|
|
|
unsigned int nv6ports; /*%< # of available ports for IPv4 */
|
2000-05-10 21:34:50 +00:00
|
|
|
};
|
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
typedef enum {
|
|
|
|
DNS_DISPATCHSTATE_NONE = 0UL,
|
|
|
|
DNS_DISPATCHSTATE_CONNECTING,
|
|
|
|
DNS_DISPATCHSTATE_CONNECTED,
|
|
|
|
DNS_DISPATCHSTATE_CANCELED,
|
|
|
|
} dns_dispatchstate_t;
|
|
|
|
|
1999-07-06 19:32:40 +00:00
|
|
|
struct dns_dispentry {
|
1999-06-18 02:01:42 +00:00
|
|
|
unsigned int magic;
|
2021-05-25 22:54:17 -07:00
|
|
|
isc_refcount_t references;
|
Merge the mlg-20000518 branch onto the mainline. Change summary:
dns_dispatch_create() no longer exists. dns_dispatch_createtcp()
and dns_dispatch_getudp() are the replacements. _createtcp() takes
a bound, connected TCP socket, while _getudp() will search for
a sharable UDP socket, and if found, attach to it and return a
pointer to it. If one is not found, it will create a udp socket,
bind it to a supplied local address, and create a new dispatcher
around it.
dns_dispatch_remove{request,response}() no longer take the dispatch
as an argument.
query-source can now be set per view.
The dispatch manager holds onto three memory pools, one for
allocating dispatchers from, one for events, and one for
requests/replies. The free list on these pools is hard-coded,
but set to 1024. This keeps us from having to dig into the
isc_mem_t the pools draw from as often.
dns_resolver_create() and dns_view_createresolver() require that
valid dispatchers be passed in; dispatchers are no longer created
for the caller.
2000-05-19 21:46:46 +00:00
|
|
|
dns_dispatch_t *disp;
|
2021-05-25 22:54:17 -07:00
|
|
|
isc_nmhandle_t *handle; /*%< netmgr handle for UDP connection */
|
2022-11-30 17:58:35 +01:00
|
|
|
dns_dispatchstate_t state;
|
2022-09-19 11:04:22 +00:00
|
|
|
dns_transport_t *transport;
|
|
|
|
isc_tlsctx_cache_t *tlsctx_cache;
|
1999-06-18 02:01:42 +00:00
|
|
|
unsigned int bucket;
|
2021-08-04 13:14:11 -07:00
|
|
|
unsigned int retries;
|
2021-10-01 12:53:31 -07:00
|
|
|
unsigned int timeout;
|
|
|
|
isc_time_t start;
|
2021-05-25 22:54:17 -07:00
|
|
|
isc_sockaddr_t local;
|
2021-01-14 13:02:57 -08:00
|
|
|
isc_sockaddr_t peer;
|
2021-05-25 22:54:17 -07:00
|
|
|
in_port_t port;
|
|
|
|
dns_messageid_t id;
|
2021-08-03 15:27:06 +02:00
|
|
|
dispatch_cb_t connected;
|
|
|
|
dispatch_cb_t sent;
|
|
|
|
dispatch_cb_t response;
|
1999-06-16 01:32:31 +00:00
|
|
|
void *arg;
|
2022-11-30 17:58:35 +01:00
|
|
|
bool reading;
|
|
|
|
isc_result_t result;
|
1999-07-08 02:50:00 +00:00
|
|
|
ISC_LINK(dns_dispentry_t) link;
|
2021-05-25 22:54:17 -07:00
|
|
|
ISC_LINK(dns_dispentry_t) alink;
|
2021-08-03 18:24:27 -07:00
|
|
|
ISC_LINK(dns_dispentry_t) plink;
|
2021-08-04 13:14:11 -07:00
|
|
|
ISC_LINK(dns_dispentry_t) rlink;
|
2008-06-23 19:41:20 +00:00
|
|
|
};
|
|
|
|
|
1999-06-16 01:32:31 +00:00
|
|
|
struct dns_dispatch {
|
|
|
|
/* Unlocked. */
|
2005-04-27 04:57:32 +00:00
|
|
|
unsigned int magic; /*%< magic */
|
|
|
|
dns_dispatchmgr_t *mgr; /*%< dispatch manager */
|
2021-01-14 13:02:57 -08:00
|
|
|
isc_nmhandle_t *handle; /*%< netmgr handle for TCP connection */
|
|
|
|
isc_sockaddr_t local; /*%< local address */
|
|
|
|
in_port_t localport; /*%< local UDP port */
|
|
|
|
isc_sockaddr_t peer; /*%< peer address (TCP) */
|
2012-04-28 14:52:28 -07:00
|
|
|
|
2005-04-27 04:57:32 +00:00
|
|
|
/*% Locked by mgr->lock. */
|
2000-05-10 21:34:50 +00:00
|
|
|
ISC_LINK(dns_dispatch_t) link;
|
|
|
|
|
|
|
|
/* Locked by "lock". */
|
2005-04-27 04:57:32 +00:00
|
|
|
isc_mutex_t lock; /*%< locks all below */
|
2021-01-14 13:02:57 -08:00
|
|
|
isc_socktype_t socktype;
|
2022-11-30 17:58:35 +01:00
|
|
|
dns_dispatchstate_t state;
|
2021-05-25 22:54:17 -07:00
|
|
|
isc_refcount_t references;
|
2022-11-30 17:58:35 +01:00
|
|
|
|
|
|
|
bool reading;
|
2021-05-25 22:54:17 -07:00
|
|
|
|
2021-08-04 13:14:11 -07:00
|
|
|
dns_displist_t pending;
|
|
|
|
dns_displist_t active;
|
2021-05-25 22:54:17 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
unsigned int requests; /*%< how many requests we have */
|
2021-11-29 09:59:33 +01:00
|
|
|
|
|
|
|
unsigned int timedout;
|
1999-06-16 01:32:31 +00:00
|
|
|
};
|
|
|
|
|
2000-09-18 04:50:05 +00:00
|
|
|
#define QID_MAGIC ISC_MAGIC('Q', 'i', 'd', ' ')
|
|
|
|
#define VALID_QID(e) ISC_MAGIC_VALID((e), QID_MAGIC)
|
|
|
|
|
2000-05-10 21:34:50 +00:00
|
|
|
#define RESPONSE_MAGIC ISC_MAGIC('D', 'r', 's', 'p')
|
|
|
|
#define VALID_RESPONSE(e) ISC_MAGIC_VALID((e), RESPONSE_MAGIC)
|
1999-06-18 02:01:42 +00:00
|
|
|
|
2008-06-23 19:41:20 +00:00
|
|
|
#define DISPSOCK_MAGIC ISC_MAGIC('D', 's', 'o', 'c')
|
|
|
|
#define VALID_DISPSOCK(e) ISC_MAGIC_VALID((e), DISPSOCK_MAGIC)
|
|
|
|
|
2000-05-10 21:34:50 +00:00
|
|
|
#define DISPATCH_MAGIC ISC_MAGIC('D', 'i', 's', 'p')
|
|
|
|
#define VALID_DISPATCH(e) ISC_MAGIC_VALID((e), DISPATCH_MAGIC)
|
1999-06-18 02:01:42 +00:00
|
|
|
|
2000-05-10 21:34:50 +00:00
|
|
|
#define DNS_DISPATCHMGR_MAGIC ISC_MAGIC('D', 'M', 'g', 'r')
|
|
|
|
#define VALID_DISPATCHMGR(e) ISC_MAGIC_VALID((e), DNS_DISPATCHMGR_MAGIC)
|
1999-06-18 02:01:42 +00:00
|
|
|
|
2008-06-23 19:41:20 +00:00
|
|
|
/*%
|
Dispatch API simplification
- Many dispatch attributes can be set implicitly instead of being passed
in. we can infer whether to set DNS_DISPATCHATTR_TCP or _UDP from
whether we're calling dns_dispatch_createtcp() or _createudp(). we
can also infer DNS_DISPATCHATTR_IPV4 or _IPV6 from the addresses or
the socket that were passed in.
- We no longer use dup'd sockets in UDP dispatches, so the 'dup_socket'
parameter has been removed from dns_dispatch_createudp(), along with
the code implementing it. also removed isc_socket_dup() since it no
longer has any callers.
- The 'buffersize' parameter was ignored and has now been removed;
buffersize is now fixed at 4096.
- Maxbuffers and maxrequests don't need to be passed in on every call to
dns_dispatch_createtcp() and _createudp().
In all current uses, the value for mgr->maxbuffers will either be
raised once from its default of 20000 to 32768, or else left
alone. (passing in a value lower than 20000 does not lower it.) there
isn't enough difference between these values for there to be any need
to configure this.
The value for disp->maxrequests controls both the quota of concurrent
requests for a dispatch and also the size of the dispatch socket
memory pool. it's not clear that this quota is necessary at all. the
memory pool size currently starts at 32768, but is sometimes lowered
to 4096, which is definitely unnecessary.
This commit sets both values permanently to 32768.
- Previously TCP dispatches allocated their own separate QID table,
which didn't incorporate a port table. this commit removes
per-dispatch QID tables and shares the same table between all
dispatches. since dispatches are created for each TCP socket, this may
speed up the dispatch allocation process. there may be a slight
increase in lock contention since all dispatches are sharing a single
QID table, but since TCP sockets are used less often than UDP
sockets (which were already sharing a QID table), it should not be a
substantial change.
- The dispatch port table was being used to determine whether a port was
already in use; if so, then a UDP socket would be bound with
REUSEADDR. this commit removes the port table, and always binds UDP
sockets that way.
2020-12-17 00:43:00 -08:00
|
|
|
* Number of buckets in the QID hash table, and the value to
|
|
|
|
* increment the QID by when attempting to avoid collisions.
|
|
|
|
* The number of buckets should be prime, and the increment
|
|
|
|
* should be the next higher prime number.
|
2008-06-23 19:41:20 +00:00
|
|
|
*/
|
Dispatch API simplification
- Many dispatch attributes can be set implicitly instead of being passed
in. we can infer whether to set DNS_DISPATCHATTR_TCP or _UDP from
whether we're calling dns_dispatch_createtcp() or _createudp(). we
can also infer DNS_DISPATCHATTR_IPV4 or _IPV6 from the addresses or
the socket that were passed in.
- We no longer use dup'd sockets in UDP dispatches, so the 'dup_socket'
parameter has been removed from dns_dispatch_createudp(), along with
the code implementing it. also removed isc_socket_dup() since it no
longer has any callers.
- The 'buffersize' parameter was ignored and has now been removed;
buffersize is now fixed at 4096.
- Maxbuffers and maxrequests don't need to be passed in on every call to
dns_dispatch_createtcp() and _createudp().
In all current uses, the value for mgr->maxbuffers will either be
raised once from its default of 20000 to 32768, or else left
alone. (passing in a value lower than 20000 does not lower it.) there
isn't enough difference between these values for there to be any need
to configure this.
The value for disp->maxrequests controls both the quota of concurrent
requests for a dispatch and also the size of the dispatch socket
memory pool. it's not clear that this quota is necessary at all. the
memory pool size currently starts at 32768, but is sometimes lowered
to 4096, which is definitely unnecessary.
This commit sets both values permanently to 32768.
- Previously TCP dispatches allocated their own separate QID table,
which didn't incorporate a port table. this commit removes
per-dispatch QID tables and shares the same table between all
dispatches. since dispatches are created for each TCP socket, this may
speed up the dispatch allocation process. there may be a slight
increase in lock contention since all dispatches are sharing a single
QID table, but since TCP sockets are used less often than UDP
sockets (which were already sharing a QID table), it should not be a
substantial change.
- The dispatch port table was being used to determine whether a port was
already in use; if so, then a UDP socket would be bound with
REUSEADDR. this commit removes the port table, and always binds UDP
sockets that way.
2020-12-17 00:43:00 -08:00
|
|
|
#ifndef DNS_QID_BUCKETS
|
|
|
|
#define DNS_QID_BUCKETS 16411
|
|
|
|
#endif /* ifndef DNS_QID_BUCKETS */
|
|
|
|
#ifndef DNS_QID_INCREMENT
|
|
|
|
#define DNS_QID_INCREMENT 16433
|
|
|
|
#endif /* ifndef DNS_QID_INCREMENT */
|
2008-06-23 19:41:20 +00:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
#if DNS_DISPATCH_TRACE
|
|
|
|
#define dns_dispentry_ref(ptr) \
|
|
|
|
dns_dispentry__ref(ptr, __func__, __FILE__, __LINE__)
|
|
|
|
#define dns_dispentry_unref(ptr) \
|
|
|
|
dns_dispentry__unref(ptr, __func__, __FILE__, __LINE__)
|
|
|
|
#define dns_dispentry_attach(ptr, ptrp) \
|
|
|
|
dns_dispentry__attach(ptr, ptrp, __func__, __FILE__, __LINE__)
|
|
|
|
#define dns_dispentry_detach(ptrp) \
|
|
|
|
dns_dispentry__detach(ptrp, __func__, __FILE__, __LINE__)
|
|
|
|
ISC_REFCOUNT_TRACE_DECL(dns_dispentry);
|
|
|
|
#else
|
|
|
|
ISC_REFCOUNT_DECL(dns_dispentry);
|
|
|
|
#endif
|
|
|
|
|
1999-06-30 01:33:11 +00:00
|
|
|
/*
|
2000-05-13 21:57:02 +00:00
|
|
|
* Statics.
|
1999-06-30 01:33:11 +00:00
|
|
|
*/
|
2021-05-25 22:54:17 -07:00
|
|
|
static void
|
|
|
|
dispatchmgr_destroy(dns_dispatchmgr_t *mgr);
|
|
|
|
|
2016-12-30 15:45:08 +11:00
|
|
|
static dns_dispentry_t *
|
|
|
|
entry_search(dns_qid_t *, const isc_sockaddr_t *, dns_messageid_t, in_port_t,
|
|
|
|
unsigned int);
|
2008-06-23 19:41:20 +00:00
|
|
|
static void
|
2021-01-14 13:02:57 -08:00
|
|
|
udp_recv(isc_nmhandle_t *handle, isc_result_t eresult, isc_region_t *region,
|
|
|
|
void *arg);
|
1999-12-15 03:11:00 +00:00
|
|
|
static void
|
2021-01-14 13:02:57 -08:00
|
|
|
tcp_recv(isc_nmhandle_t *handle, isc_result_t eresult, isc_region_t *region,
|
|
|
|
void *arg);
|
2022-11-30 17:58:35 +01:00
|
|
|
static void
|
|
|
|
tcp_recv_done(dns_dispentry_t *resp, isc_result_t eresult,
|
|
|
|
isc_region_t *region);
|
2018-03-28 14:19:37 +02:00
|
|
|
static uint32_t
|
|
|
|
dns_hash(dns_qid_t *, const isc_sockaddr_t *, dns_messageid_t, in_port_t);
|
2004-07-21 00:48:19 +00:00
|
|
|
static void
|
2022-11-30 17:58:35 +01:00
|
|
|
dispentry_cancel(dns_dispentry_t *resp, isc_result_t result);
|
2008-06-23 19:41:20 +00:00
|
|
|
static isc_result_t
|
2021-07-26 20:23:18 -07:00
|
|
|
dispatch_createudp(dns_dispatchmgr_t *mgr, const isc_sockaddr_t *localaddr,
|
2021-08-03 18:24:27 -07:00
|
|
|
dns_dispatch_t **dispp);
|
Dispatch API simplification
- Many dispatch attributes can be set implicitly instead of being passed
in. we can infer whether to set DNS_DISPATCHATTR_TCP or _UDP from
whether we're calling dns_dispatch_createtcp() or _createudp(). we
can also infer DNS_DISPATCHATTR_IPV4 or _IPV6 from the addresses or
the socket that were passed in.
- We no longer use dup'd sockets in UDP dispatches, so the 'dup_socket'
parameter has been removed from dns_dispatch_createudp(), along with
the code implementing it. also removed isc_socket_dup() since it no
longer has any callers.
- The 'buffersize' parameter was ignored and has now been removed;
buffersize is now fixed at 4096.
- Maxbuffers and maxrequests don't need to be passed in on every call to
dns_dispatch_createtcp() and _createudp().
In all current uses, the value for mgr->maxbuffers will either be
raised once from its default of 20000 to 32768, or else left
alone. (passing in a value lower than 20000 does not lower it.) there
isn't enough difference between these values for there to be any need
to configure this.
The value for disp->maxrequests controls both the quota of concurrent
requests for a dispatch and also the size of the dispatch socket
memory pool. it's not clear that this quota is necessary at all. the
memory pool size currently starts at 32768, but is sometimes lowered
to 4096, which is definitely unnecessary.
This commit sets both values permanently to 32768.
- Previously TCP dispatches allocated their own separate QID table,
which didn't incorporate a port table. this commit removes
per-dispatch QID tables and shares the same table between all
dispatches. since dispatches are created for each TCP socket, this may
speed up the dispatch allocation process. there may be a slight
increase in lock contention since all dispatches are sharing a single
QID table, but since TCP sockets are used less often than UDP
sockets (which were already sharing a QID table), it should not be a
substantial change.
- The dispatch port table was being used to determine whether a port was
already in use; if so, then a UDP socket would be bound with
REUSEADDR. this commit removes the port table, and always binds UDP
sockets that way.
2020-12-17 00:43:00 -08:00
|
|
|
static void
|
|
|
|
qid_allocate(dns_dispatchmgr_t *mgr, dns_qid_t **qidp);
|
2000-09-19 06:59:28 +00:00
|
|
|
static void
|
|
|
|
qid_destroy(isc_mem_t *mctx, dns_qid_t **qidp);
|
2021-01-14 13:02:57 -08:00
|
|
|
static void
|
2022-11-30 17:58:35 +01:00
|
|
|
udp_startrecv(isc_nmhandle_t *handle, dns_dispentry_t *resp);
|
|
|
|
static void
|
|
|
|
tcp_startrecv(isc_nmhandle_t *handle, dns_dispatch_t *disp,
|
|
|
|
dns_dispentry_t *resp);
|
|
|
|
static void
|
|
|
|
tcp_dispatch_getnext(dns_dispatch_t *disp, dns_dispentry_t *resp,
|
|
|
|
int32_t timeout);
|
|
|
|
static void
|
|
|
|
udp_dispatch_getnext(dns_dispentry_t *resp, int32_t timeout);
|
Merge the mlg-20000518 branch onto the mainline. Change summary:
dns_dispatch_create() no longer exists. dns_dispatch_createtcp()
and dns_dispatch_getudp() are the replacements. _createtcp() takes
a bound, connected TCP socket, while _getudp() will search for
a sharable UDP socket, and if found, attach to it and return a
pointer to it. If one is not found, it will create a udp socket,
bind it to a supplied local address, and create a new dispatcher
around it.
dns_dispatch_remove{request,response}() no longer take the dispatch
as an argument.
query-source can now be set per view.
The dispatch manager holds onto three memory pools, one for
allocating dispatchers from, one for events, and one for
requests/replies. The free list on these pools is hard-coded,
but set to 1024. This keeps us from having to dig into the
isc_mem_t the pools draw from as often.
dns_resolver_create() and dns_view_createresolver() require that
valid dispatchers be passed in; dispatchers are no longer created
for the caller.
2000-05-19 21:46:46 +00:00
|
|
|
|
|
|
|
#define LVL(x) ISC_LOG_DEBUG(x)
|
1999-07-08 02:50:00 +00:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
static const char *
|
|
|
|
socktype2str(dns_dispentry_t *resp) {
|
|
|
|
dns_transport_type_t transport_type = DNS_TRANSPORT_UDP;
|
|
|
|
dns_dispatch_t *disp = resp->disp;
|
|
|
|
|
|
|
|
if (disp->socktype == isc_socktype_tcp) {
|
|
|
|
if (resp->transport != NULL) {
|
|
|
|
transport_type =
|
|
|
|
dns_transport_get_type(resp->transport);
|
|
|
|
} else {
|
|
|
|
transport_type = DNS_TRANSPORT_TCP;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (transport_type) {
|
|
|
|
case DNS_TRANSPORT_UDP:
|
|
|
|
return ("UDP");
|
|
|
|
case DNS_TRANSPORT_TCP:
|
|
|
|
return ("TCP");
|
|
|
|
case DNS_TRANSPORT_TLS:
|
|
|
|
return ("TLS");
|
|
|
|
case DNS_TRANSPORT_HTTP:
|
|
|
|
return ("HTTP");
|
|
|
|
default:
|
|
|
|
return ("<unexpected>");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static const char *
|
|
|
|
state2str(dns_dispatchstate_t state) {
|
|
|
|
switch (state) {
|
|
|
|
case DNS_DISPATCHSTATE_NONE:
|
|
|
|
return ("none");
|
|
|
|
case DNS_DISPATCHSTATE_CONNECTING:
|
|
|
|
return ("connecting");
|
|
|
|
case DNS_DISPATCHSTATE_CONNECTED:
|
|
|
|
return ("connected");
|
|
|
|
case DNS_DISPATCHSTATE_CANCELED:
|
|
|
|
return ("canceled");
|
|
|
|
default:
|
|
|
|
return ("<unexpected>");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2001-08-08 22:54:55 +00:00
|
|
|
static void
|
|
|
|
mgr_log(dns_dispatchmgr_t *mgr, int level, const char *fmt, ...)
|
|
|
|
ISC_FORMAT_PRINTF(3, 4);
|
|
|
|
|
2000-04-29 00:45:26 +00:00
|
|
|
static void
|
Merge the mlg-20000518 branch onto the mainline. Change summary:
dns_dispatch_create() no longer exists. dns_dispatch_createtcp()
and dns_dispatch_getudp() are the replacements. _createtcp() takes
a bound, connected TCP socket, while _getudp() will search for
a sharable UDP socket, and if found, attach to it and return a
pointer to it. If one is not found, it will create a udp socket,
bind it to a supplied local address, and create a new dispatcher
around it.
dns_dispatch_remove{request,response}() no longer take the dispatch
as an argument.
query-source can now be set per view.
The dispatch manager holds onto three memory pools, one for
allocating dispatchers from, one for events, and one for
requests/replies. The free list on these pools is hard-coded,
but set to 1024. This keeps us from having to dig into the
isc_mem_t the pools draw from as often.
dns_resolver_create() and dns_view_createresolver() require that
valid dispatchers be passed in; dispatchers are no longer created
for the caller.
2000-05-19 21:46:46 +00:00
|
|
|
mgr_log(dns_dispatchmgr_t *mgr, int level, const char *fmt, ...) {
|
2000-04-29 00:45:26 +00:00
|
|
|
char msgbuf[2048];
|
|
|
|
va_list ap;
|
|
|
|
|
2000-07-17 23:25:35 +00:00
|
|
|
if (!isc_log_wouldlog(dns_lctx, level)) {
|
|
|
|
return;
|
2020-02-13 21:48:23 +01:00
|
|
|
}
|
2000-07-17 23:25:35 +00:00
|
|
|
|
2000-04-29 00:45:26 +00:00
|
|
|
va_start(ap, fmt);
|
|
|
|
vsnprintf(msgbuf, sizeof(msgbuf), fmt, ap);
|
|
|
|
va_end(ap);
|
|
|
|
|
Merge the mlg-20000518 branch onto the mainline. Change summary:
dns_dispatch_create() no longer exists. dns_dispatch_createtcp()
and dns_dispatch_getudp() are the replacements. _createtcp() takes
a bound, connected TCP socket, while _getudp() will search for
a sharable UDP socket, and if found, attach to it and return a
pointer to it. If one is not found, it will create a udp socket,
bind it to a supplied local address, and create a new dispatcher
around it.
dns_dispatch_remove{request,response}() no longer take the dispatch
as an argument.
query-source can now be set per view.
The dispatch manager holds onto three memory pools, one for
allocating dispatchers from, one for events, and one for
requests/replies. The free list on these pools is hard-coded,
but set to 1024. This keeps us from having to dig into the
isc_mem_t the pools draw from as often.
dns_resolver_create() and dns_view_createresolver() require that
valid dispatchers be passed in; dispatchers are no longer created
for the caller.
2000-05-19 21:46:46 +00:00
|
|
|
isc_log_write(dns_lctx, DNS_LOGCATEGORY_DISPATCH,
|
|
|
|
DNS_LOGMODULE_DISPATCH, level, "dispatchmgr %p: %s", mgr,
|
|
|
|
msgbuf);
|
|
|
|
}
|
|
|
|
|
2021-10-11 13:43:12 +02:00
|
|
|
static void
|
2009-01-31 00:37:04 +00:00
|
|
|
inc_stats(dns_dispatchmgr_t *mgr, isc_statscounter_t counter) {
|
2009-01-31 00:10:24 +00:00
|
|
|
if (mgr->stats != NULL) {
|
|
|
|
isc_stats_increment(mgr->stats, counter);
|
2020-02-13 21:48:23 +01:00
|
|
|
}
|
2009-01-31 00:10:24 +00:00
|
|
|
}
|
|
|
|
|
2021-10-11 13:43:12 +02:00
|
|
|
static void
|
2012-05-14 10:06:05 -07:00
|
|
|
dec_stats(dns_dispatchmgr_t *mgr, isc_statscounter_t counter) {
|
|
|
|
if (mgr->stats != NULL) {
|
|
|
|
isc_stats_decrement(mgr->stats, counter);
|
2020-02-13 21:48:23 +01:00
|
|
|
}
|
2012-05-14 10:06:05 -07:00
|
|
|
}
|
|
|
|
|
2001-08-08 22:54:55 +00:00
|
|
|
static void
|
|
|
|
dispatch_log(dns_dispatch_t *disp, int level, const char *fmt, ...)
|
|
|
|
ISC_FORMAT_PRINTF(3, 4);
|
|
|
|
|
Merge the mlg-20000518 branch onto the mainline. Change summary:
dns_dispatch_create() no longer exists. dns_dispatch_createtcp()
and dns_dispatch_getudp() are the replacements. _createtcp() takes
a bound, connected TCP socket, while _getudp() will search for
a sharable UDP socket, and if found, attach to it and return a
pointer to it. If one is not found, it will create a udp socket,
bind it to a supplied local address, and create a new dispatcher
around it.
dns_dispatch_remove{request,response}() no longer take the dispatch
as an argument.
query-source can now be set per view.
The dispatch manager holds onto three memory pools, one for
allocating dispatchers from, one for events, and one for
requests/replies. The free list on these pools is hard-coded,
but set to 1024. This keeps us from having to dig into the
isc_mem_t the pools draw from as often.
dns_resolver_create() and dns_view_createresolver() require that
valid dispatchers be passed in; dispatchers are no longer created
for the caller.
2000-05-19 21:46:46 +00:00
|
|
|
static void
|
|
|
|
dispatch_log(dns_dispatch_t *disp, int level, const char *fmt, ...) {
|
|
|
|
char msgbuf[2048];
|
|
|
|
va_list ap;
|
2022-11-30 17:58:35 +01:00
|
|
|
int r;
|
Merge the mlg-20000518 branch onto the mainline. Change summary:
dns_dispatch_create() no longer exists. dns_dispatch_createtcp()
and dns_dispatch_getudp() are the replacements. _createtcp() takes
a bound, connected TCP socket, while _getudp() will search for
a sharable UDP socket, and if found, attach to it and return a
pointer to it. If one is not found, it will create a udp socket,
bind it to a supplied local address, and create a new dispatcher
around it.
dns_dispatch_remove{request,response}() no longer take the dispatch
as an argument.
query-source can now be set per view.
The dispatch manager holds onto three memory pools, one for
allocating dispatchers from, one for events, and one for
requests/replies. The free list on these pools is hard-coded,
but set to 1024. This keeps us from having to dig into the
isc_mem_t the pools draw from as often.
dns_resolver_create() and dns_view_createresolver() require that
valid dispatchers be passed in; dispatchers are no longer created
for the caller.
2000-05-19 21:46:46 +00:00
|
|
|
|
2000-07-13 01:16:22 +00:00
|
|
|
if (!isc_log_wouldlog(dns_lctx, level)) {
|
|
|
|
return;
|
2020-02-13 21:48:23 +01:00
|
|
|
}
|
2000-08-01 01:33:37 +00:00
|
|
|
|
Merge the mlg-20000518 branch onto the mainline. Change summary:
dns_dispatch_create() no longer exists. dns_dispatch_createtcp()
and dns_dispatch_getudp() are the replacements. _createtcp() takes
a bound, connected TCP socket, while _getudp() will search for
a sharable UDP socket, and if found, attach to it and return a
pointer to it. If one is not found, it will create a udp socket,
bind it to a supplied local address, and create a new dispatcher
around it.
dns_dispatch_remove{request,response}() no longer take the dispatch
as an argument.
query-source can now be set per view.
The dispatch manager holds onto three memory pools, one for
allocating dispatchers from, one for events, and one for
requests/replies. The free list on these pools is hard-coded,
but set to 1024. This keeps us from having to dig into the
isc_mem_t the pools draw from as often.
dns_resolver_create() and dns_view_createresolver() require that
valid dispatchers be passed in; dispatchers are no longer created
for the caller.
2000-05-19 21:46:46 +00:00
|
|
|
va_start(ap, fmt);
|
2022-11-30 17:58:35 +01:00
|
|
|
r = vsnprintf(msgbuf, sizeof(msgbuf), fmt, ap);
|
|
|
|
if (r < 0) {
|
|
|
|
msgbuf[0] = '\0';
|
|
|
|
} else if ((unsigned int)r >= sizeof(msgbuf)) {
|
|
|
|
/* Truncated */
|
|
|
|
msgbuf[sizeof(msgbuf) - 1] = '\0';
|
|
|
|
}
|
Merge the mlg-20000518 branch onto the mainline. Change summary:
dns_dispatch_create() no longer exists. dns_dispatch_createtcp()
and dns_dispatch_getudp() are the replacements. _createtcp() takes
a bound, connected TCP socket, while _getudp() will search for
a sharable UDP socket, and if found, attach to it and return a
pointer to it. If one is not found, it will create a udp socket,
bind it to a supplied local address, and create a new dispatcher
around it.
dns_dispatch_remove{request,response}() no longer take the dispatch
as an argument.
query-source can now be set per view.
The dispatch manager holds onto three memory pools, one for
allocating dispatchers from, one for events, and one for
requests/replies. The free list on these pools is hard-coded,
but set to 1024. This keeps us from having to dig into the
isc_mem_t the pools draw from as often.
dns_resolver_create() and dns_view_createresolver() require that
valid dispatchers be passed in; dispatchers are no longer created
for the caller.
2000-05-19 21:46:46 +00:00
|
|
|
va_end(ap);
|
|
|
|
|
|
|
|
isc_log_write(dns_lctx, DNS_LOGCATEGORY_DISPATCH,
|
|
|
|
DNS_LOGMODULE_DISPATCH, level, "dispatch %p: %s", disp,
|
|
|
|
msgbuf);
|
2000-04-29 00:45:26 +00:00
|
|
|
}
|
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
static void
|
|
|
|
dispentry_log(dns_dispentry_t *resp, int level, const char *fmt, ...)
|
|
|
|
ISC_FORMAT_PRINTF(3, 4);
|
|
|
|
|
|
|
|
static void
|
|
|
|
dispentry_log(dns_dispentry_t *resp, int level, const char *fmt, ...) {
|
|
|
|
char msgbuf[2048];
|
|
|
|
va_list ap;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
if (!isc_log_wouldlog(dns_lctx, level)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
|
|
|
r = vsnprintf(msgbuf, sizeof(msgbuf), fmt, ap);
|
|
|
|
if (r < 0) {
|
|
|
|
msgbuf[0] = '\0';
|
|
|
|
} else if ((unsigned int)r >= sizeof(msgbuf)) {
|
|
|
|
/* Truncated */
|
|
|
|
msgbuf[sizeof(msgbuf) - 1] = '\0';
|
|
|
|
}
|
|
|
|
va_end(ap);
|
|
|
|
|
|
|
|
dispatch_log(resp->disp, level, "%s response %p: %s",
|
|
|
|
socktype2str(resp), resp, msgbuf);
|
|
|
|
}
|
|
|
|
|
2000-01-07 01:17:47 +00:00
|
|
|
/*
|
|
|
|
* Return a hash of the destination and message id.
|
|
|
|
*/
|
2018-03-28 14:19:37 +02:00
|
|
|
static uint32_t
|
2016-12-30 15:45:08 +11:00
|
|
|
dns_hash(dns_qid_t *qid, const isc_sockaddr_t *dest, dns_messageid_t id,
|
2008-06-23 19:41:20 +00:00
|
|
|
in_port_t port) {
|
2018-03-28 14:19:37 +02:00
|
|
|
uint32_t ret;
|
2000-01-07 01:17:47 +00:00
|
|
|
|
2018-04-17 08:29:14 -07:00
|
|
|
ret = isc_sockaddr_hash(dest, true);
|
2018-03-28 14:19:37 +02:00
|
|
|
ret ^= ((uint32_t)id << 16) | port;
|
2000-09-18 04:50:05 +00:00
|
|
|
ret %= qid->qid_nbuckets;
|
2000-01-07 01:17:47 +00:00
|
|
|
|
2000-09-18 04:50:05 +00:00
|
|
|
INSIST(ret < qid->qid_nbuckets);
|
2000-01-07 01:17:47 +00:00
|
|
|
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2008-06-23 19:41:20 +00:00
|
|
|
/*%
|
2021-05-25 22:54:17 -07:00
|
|
|
* Choose a random port number for a dispatch entry.
|
2012-04-28 14:52:28 -07:00
|
|
|
* The caller must hold the disp->lock
|
2008-06-23 19:41:20 +00:00
|
|
|
*/
|
|
|
|
static isc_result_t
|
2021-05-25 22:54:17 -07:00
|
|
|
setup_socket(dns_dispatch_t *disp, dns_dispentry_t *resp,
|
|
|
|
const isc_sockaddr_t *dest, in_port_t *portp) {
|
2008-06-23 19:41:20 +00:00
|
|
|
dns_dispatchmgr_t *mgr = disp->mgr;
|
|
|
|
unsigned int nports;
|
2020-12-09 19:44:41 -08:00
|
|
|
in_port_t *ports = NULL;
|
2021-05-25 22:54:17 -07:00
|
|
|
in_port_t port;
|
2008-06-23 19:41:20 +00:00
|
|
|
|
2021-08-04 13:14:11 -07:00
|
|
|
if (resp->retries++ > 5) {
|
|
|
|
return (ISC_R_FAILURE);
|
|
|
|
}
|
|
|
|
|
2008-06-23 19:41:20 +00:00
|
|
|
if (isc_sockaddr_pf(&disp->local) == AF_INET) {
|
2020-12-09 19:44:41 -08:00
|
|
|
nports = mgr->nv4ports;
|
|
|
|
ports = mgr->v4ports;
|
2008-06-23 19:41:20 +00:00
|
|
|
} else {
|
2020-12-09 19:44:41 -08:00
|
|
|
nports = mgr->nv6ports;
|
|
|
|
ports = mgr->v6ports;
|
2008-06-23 19:41:20 +00:00
|
|
|
}
|
|
|
|
if (nports == 0) {
|
|
|
|
return (ISC_R_ADDRNOTAVAIL);
|
2020-02-13 21:48:23 +01:00
|
|
|
}
|
2008-06-23 19:41:20 +00:00
|
|
|
|
2021-05-25 22:54:17 -07:00
|
|
|
resp->local = disp->local;
|
|
|
|
resp->peer = *dest;
|
2008-06-23 19:41:20 +00:00
|
|
|
|
2021-01-14 13:02:57 -08:00
|
|
|
port = ports[isc_random_uniform(nports)];
|
2021-05-25 22:54:17 -07:00
|
|
|
isc_sockaddr_setport(&resp->local, port);
|
|
|
|
resp->port = port;
|
Dispatch API simplification
- Many dispatch attributes can be set implicitly instead of being passed
in. we can infer whether to set DNS_DISPATCHATTR_TCP or _UDP from
whether we're calling dns_dispatch_createtcp() or _createudp(). we
can also infer DNS_DISPATCHATTR_IPV4 or _IPV6 from the addresses or
the socket that were passed in.
- We no longer use dup'd sockets in UDP dispatches, so the 'dup_socket'
parameter has been removed from dns_dispatch_createudp(), along with
the code implementing it. also removed isc_socket_dup() since it no
longer has any callers.
- The 'buffersize' parameter was ignored and has now been removed;
buffersize is now fixed at 4096.
- Maxbuffers and maxrequests don't need to be passed in on every call to
dns_dispatch_createtcp() and _createudp().
In all current uses, the value for mgr->maxbuffers will either be
raised once from its default of 20000 to 32768, or else left
alone. (passing in a value lower than 20000 does not lower it.) there
isn't enough difference between these values for there to be any need
to configure this.
The value for disp->maxrequests controls both the quota of concurrent
requests for a dispatch and also the size of the dispatch socket
memory pool. it's not clear that this quota is necessary at all. the
memory pool size currently starts at 32768, but is sometimes lowered
to 4096, which is definitely unnecessary.
This commit sets both values permanently to 32768.
- Previously TCP dispatches allocated their own separate QID table,
which didn't incorporate a port table. this commit removes
per-dispatch QID tables and shares the same table between all
dispatches. since dispatches are created for each TCP socket, this may
speed up the dispatch allocation process. there may be a slight
increase in lock contention since all dispatches are sharing a single
QID table, but since TCP sockets are used less often than UDP
sockets (which were already sharing a QID table), it should not be a
substantial change.
- The dispatch port table was being used to determine whether a port was
already in use; if so, then a UDP socket would be bound with
REUSEADDR. this commit removes the port table, and always binds UDP
sockets that way.
2020-12-17 00:43:00 -08:00
|
|
|
|
2020-12-16 01:32:06 -08:00
|
|
|
*portp = port;
|
|
|
|
|
|
|
|
return (ISC_R_SUCCESS);
|
2008-06-23 19:41:20 +00:00
|
|
|
}
|
|
|
|
|
2001-01-25 13:47:59 +00:00
|
|
|
/*
|
2008-06-23 19:41:20 +00:00
|
|
|
* Find an entry for query ID 'id', socket address 'dest', and port number
|
2008-08-15 17:29:52 +00:00
|
|
|
* 'port'.
|
2001-01-25 13:47:59 +00:00
|
|
|
* Return NULL if no such entry exists.
|
|
|
|
*/
|
1999-07-06 19:32:40 +00:00
|
|
|
static dns_dispentry_t *
|
2016-12-30 15:45:08 +11:00
|
|
|
entry_search(dns_qid_t *qid, const isc_sockaddr_t *dest, dns_messageid_t id,
|
2008-08-15 17:29:52 +00:00
|
|
|
in_port_t port, unsigned int bucket) {
|
2020-12-09 19:44:41 -08:00
|
|
|
dns_dispentry_t *res = NULL;
|
1999-06-30 01:33:11 +00:00
|
|
|
|
2013-12-23 09:50:18 -08:00
|
|
|
REQUIRE(VALID_QID(qid));
|
2000-09-18 04:50:05 +00:00
|
|
|
REQUIRE(bucket < qid->qid_nbuckets);
|
1999-07-09 20:34:26 +00:00
|
|
|
|
2008-08-15 17:29:52 +00:00
|
|
|
res = ISC_LIST_HEAD(qid->qid_table[bucket]);
|
1999-06-30 01:33:11 +00:00
|
|
|
|
|
|
|
while (res != NULL) {
|
2021-01-14 13:02:57 -08:00
|
|
|
if (res->id == id && isc_sockaddr_equal(dest, &res->peer) &&
|
2022-11-02 19:33:14 +01:00
|
|
|
res->port == port)
|
|
|
|
{
|
1999-06-30 01:33:11 +00:00
|
|
|
return (res);
|
2008-06-23 19:41:20 +00:00
|
|
|
}
|
1999-06-30 01:33:11 +00:00
|
|
|
res = ISC_LIST_NEXT(res, link);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
2021-05-25 22:54:17 -07:00
|
|
|
static void
|
2022-11-30 17:58:35 +01:00
|
|
|
dispentry_destroy(dns_dispentry_t *resp) {
|
|
|
|
dns_dispatch_t *disp = resp->disp;
|
2021-05-25 22:54:17 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
/*
|
|
|
|
* We need to call this from here in case there's an external event that
|
|
|
|
* shuts down our dispatch (like ISC_R_SHUTTINGDOWN).
|
|
|
|
*/
|
|
|
|
dispentry_cancel(resp, ISC_R_CANCELED);
|
2021-05-25 22:54:17 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
LOCK(&disp->lock);
|
|
|
|
INSIST(disp->requests > 0);
|
|
|
|
disp->requests--;
|
|
|
|
UNLOCK(&disp->lock);
|
2021-05-25 22:54:17 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
isc_refcount_destroy(&resp->references);
|
2021-05-25 22:54:17 -07:00
|
|
|
|
|
|
|
resp->magic = 0;
|
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
INSIST(!ISC_LINK_LINKED(resp, link));
|
|
|
|
INSIST(!ISC_LINK_LINKED(resp, plink));
|
2021-08-04 13:14:11 -07:00
|
|
|
INSIST(!ISC_LINK_LINKED(resp, alink));
|
|
|
|
INSIST(!ISC_LINK_LINKED(resp, rlink));
|
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
dispentry_log(resp, LVL(90), "destroying");
|
|
|
|
|
2021-05-25 22:54:17 -07:00
|
|
|
if (resp->handle != NULL) {
|
2022-11-30 17:58:35 +01:00
|
|
|
dispentry_log(resp, LVL(90), "detaching handle %p from %p",
|
|
|
|
resp->handle, &resp->handle);
|
2021-05-25 22:54:17 -07:00
|
|
|
isc_nmhandle_detach(&resp->handle);
|
|
|
|
}
|
|
|
|
|
2022-09-19 11:04:22 +00:00
|
|
|
if (resp->tlsctx_cache != NULL) {
|
|
|
|
isc_tlsctx_cache_detach(&resp->tlsctx_cache);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (resp->transport != NULL) {
|
|
|
|
dns_transport_detach(&resp->transport);
|
|
|
|
}
|
|
|
|
|
2021-05-25 22:54:17 -07:00
|
|
|
isc_mem_put(disp->mgr->mctx, resp, sizeof(*resp));
|
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
dns_dispatch_detach(&disp); /* DISPATCH001 */
|
2021-05-25 22:54:17 -07:00
|
|
|
}
|
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
#if DNS_DISPATCH_TRACE
|
|
|
|
ISC_REFCOUNT_TRACE_IMPL(dns_dispentry, dispentry_destroy);
|
|
|
|
#else
|
|
|
|
ISC_REFCOUNT_IMPL(dns_dispentry, dispentry_destroy);
|
|
|
|
#endif
|
2021-05-25 22:54:17 -07:00
|
|
|
|
2021-10-01 12:53:31 -07:00
|
|
|
/*
|
|
|
|
* How long in milliseconds has it been since this dispentry
|
|
|
|
* started reading? (Only used for UDP, to adjust the timeout
|
|
|
|
* downward when running getnext.)
|
|
|
|
*/
|
|
|
|
static unsigned int
|
|
|
|
dispentry_runtime(dns_dispentry_t *resp) {
|
|
|
|
isc_time_t now;
|
|
|
|
|
|
|
|
if (isc_time_isepoch(&resp->start)) {
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
TIME_NOW(&now);
|
|
|
|
return (isc_time_microdiff(&now, &resp->start) / 1000);
|
|
|
|
}
|
|
|
|
|
1999-07-08 02:50:00 +00:00
|
|
|
/*
|
|
|
|
* General flow:
|
|
|
|
*
|
2001-05-14 23:10:19 +00:00
|
|
|
* If I/O result == CANCELED or error, free the buffer.
|
1999-07-08 02:50:00 +00:00
|
|
|
*
|
2002-09-04 02:26:13 +00:00
|
|
|
* If query, free the buffer, restart.
|
1999-07-08 02:50:00 +00:00
|
|
|
*
|
|
|
|
* If response:
|
|
|
|
* Allocate event, fill in details.
|
|
|
|
* If cannot allocate, free buffer, restart.
|
|
|
|
* find target. If not found, free buffer, restart.
|
|
|
|
* if event queue is not empty, queue. else, send.
|
|
|
|
* restart.
|
|
|
|
*/
|
1999-06-30 01:33:11 +00:00
|
|
|
static void
|
2021-01-14 13:02:57 -08:00
|
|
|
udp_recv(isc_nmhandle_t *handle, isc_result_t eresult, isc_region_t *region,
|
|
|
|
void *arg) {
|
2021-05-25 22:54:17 -07:00
|
|
|
dns_dispentry_t *resp = (dns_dispentry_t *)arg;
|
2020-12-16 01:32:06 -08:00
|
|
|
dns_dispatch_t *disp = NULL;
|
1999-06-30 01:33:11 +00:00
|
|
|
dns_messageid_t id;
|
1999-07-22 01:34:31 +00:00
|
|
|
isc_result_t dres;
|
1999-06-30 01:33:11 +00:00
|
|
|
isc_buffer_t source;
|
|
|
|
unsigned int flags;
|
2021-01-14 13:02:57 -08:00
|
|
|
isc_sockaddr_t peer;
|
2000-11-03 02:45:55 +00:00
|
|
|
isc_netaddr_t netaddr;
|
2022-11-30 17:58:35 +01:00
|
|
|
int match, timeout = 0;
|
2021-08-03 15:27:06 +02:00
|
|
|
dispatch_cb_t response = NULL;
|
2020-12-16 01:32:06 -08:00
|
|
|
|
2021-05-25 22:54:17 -07:00
|
|
|
REQUIRE(VALID_RESPONSE(resp));
|
2021-07-26 20:23:18 -07:00
|
|
|
REQUIRE(VALID_DISPATCH(resp->disp));
|
2020-12-16 01:32:06 -08:00
|
|
|
|
2021-05-25 22:54:17 -07:00
|
|
|
disp = resp->disp;
|
1999-06-30 01:33:11 +00:00
|
|
|
|
|
|
|
LOCK(&disp->lock);
|
2022-11-30 17:58:35 +01:00
|
|
|
INSIST(resp->reading);
|
|
|
|
resp->reading = false;
|
1999-06-30 01:33:11 +00:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
response = resp->response;
|
2021-08-04 13:14:11 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
if (resp->state == DNS_DISPATCHSTATE_CANCELED) {
|
|
|
|
/*
|
|
|
|
* Nobody is interested in the callback if the response
|
|
|
|
* has been canceled already. Detach from the response
|
|
|
|
* and the handle.
|
|
|
|
*/
|
|
|
|
response = NULL;
|
2022-04-27 12:29:50 +00:00
|
|
|
eresult = ISC_R_CANCELED;
|
2008-06-23 19:41:20 +00:00
|
|
|
}
|
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
dispentry_log(resp, LVL(90), "read callback:%s, requests %d",
|
|
|
|
isc_result_totext(eresult), disp->requests);
|
|
|
|
|
2021-08-04 13:14:11 -07:00
|
|
|
if (eresult != ISC_R_SUCCESS) {
|
2000-02-02 23:29:47 +00:00
|
|
|
/*
|
2021-08-04 13:14:11 -07:00
|
|
|
* This is most likely a network error on a connected
|
|
|
|
* socket, a timeout, or the query has been canceled.
|
|
|
|
* It makes no sense to check the address or parse the
|
|
|
|
* packet, but we can return the error to the caller.
|
2000-02-02 23:29:47 +00:00
|
|
|
*/
|
2021-08-04 13:14:11 -07:00
|
|
|
goto done;
|
2000-02-02 23:29:47 +00:00
|
|
|
}
|
|
|
|
|
2021-01-14 13:02:57 -08:00
|
|
|
peer = isc_nmhandle_peeraddr(handle);
|
|
|
|
isc_netaddr_fromsockaddr(&netaddr, &peer);
|
|
|
|
|
2000-11-03 02:45:55 +00:00
|
|
|
/*
|
|
|
|
* If this is from a blackholed address, drop it.
|
|
|
|
*/
|
2001-02-08 18:25:09 +00:00
|
|
|
if (disp->mgr->blackhole != NULL &&
|
2018-04-26 20:57:41 -07:00
|
|
|
dns_acl_match(&netaddr, NULL, disp->mgr->blackhole, NULL, &match,
|
2008-04-03 02:01:08 +00:00
|
|
|
NULL) == ISC_R_SUCCESS &&
|
2000-11-03 02:45:55 +00:00
|
|
|
match > 0)
|
|
|
|
{
|
|
|
|
if (isc_log_wouldlog(dns_lctx, LVL(10))) {
|
|
|
|
char netaddrstr[ISC_NETADDR_FORMATSIZE];
|
|
|
|
isc_netaddr_format(&netaddr, netaddrstr,
|
|
|
|
sizeof(netaddrstr));
|
2022-11-30 17:58:35 +01:00
|
|
|
dispentry_log(resp, LVL(10),
|
|
|
|
"blackholed packet from %s", netaddrstr);
|
2000-11-03 02:45:55 +00:00
|
|
|
}
|
2021-08-04 13:14:11 -07:00
|
|
|
goto next;
|
2000-11-03 02:45:55 +00:00
|
|
|
}
|
|
|
|
|
1999-06-30 01:33:11 +00:00
|
|
|
/*
|
|
|
|
* Peek into the buffer to see what we can see.
|
|
|
|
*/
|
2021-08-04 13:14:11 -07:00
|
|
|
id = resp->id;
|
2021-01-14 13:02:57 -08:00
|
|
|
isc_buffer_init(&source, region->base, region->length);
|
|
|
|
isc_buffer_add(&source, region->length);
|
1999-06-30 01:33:11 +00:00
|
|
|
dres = dns_message_peekheader(&source, &id, &flags);
|
2000-04-06 22:03:35 +00:00
|
|
|
if (dres != ISC_R_SUCCESS) {
|
2022-11-30 17:58:35 +01:00
|
|
|
char netaddrstr[ISC_NETADDR_FORMATSIZE];
|
|
|
|
isc_netaddr_format(&netaddr, netaddrstr, sizeof(netaddrstr));
|
|
|
|
dispentry_log(resp, LVL(10), "got garbage packet from %s",
|
|
|
|
netaddrstr);
|
2021-08-04 13:14:11 -07:00
|
|
|
goto next;
|
1999-06-30 01:33:11 +00:00
|
|
|
}
|
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
dispentry_log(resp, LVL(92),
|
|
|
|
"got valid DNS message header, /QR %c, id %u",
|
|
|
|
(((flags & DNS_MESSAGEFLAG_QR) != 0) ? '1' : '0'), id);
|
1999-07-08 22:12:37 +00:00
|
|
|
|
1999-06-30 01:33:11 +00:00
|
|
|
/*
|
2021-08-04 13:14:11 -07:00
|
|
|
* Look at the message flags. If it's a query, ignore it.
|
1999-06-30 01:33:11 +00:00
|
|
|
*/
|
1999-07-06 19:32:40 +00:00
|
|
|
if ((flags & DNS_MESSAGEFLAG_QR) == 0) {
|
2021-08-04 13:14:11 -07:00
|
|
|
goto next;
|
2004-07-21 00:48:19 +00:00
|
|
|
}
|
|
|
|
|
2008-06-23 19:41:20 +00:00
|
|
|
/*
|
2020-12-16 01:32:06 -08:00
|
|
|
* The QID and the address must match the expected ones.
|
2008-06-23 19:41:20 +00:00
|
|
|
*/
|
2021-01-14 13:02:57 -08:00
|
|
|
if (resp->id != id || !isc_sockaddr_equal(&peer, &resp->peer)) {
|
2022-11-30 17:58:35 +01:00
|
|
|
dispentry_log(resp, LVL(90), "response doesn't match");
|
2021-05-25 22:54:17 -07:00
|
|
|
inc_stats(disp->mgr, dns_resstatscounter_mismatch);
|
2021-08-04 13:14:11 -07:00
|
|
|
goto next;
|
2020-11-25 12:45:47 +01:00
|
|
|
}
|
|
|
|
|
1999-07-08 22:12:37 +00:00
|
|
|
/*
|
2021-08-04 13:14:11 -07:00
|
|
|
* We have the right resp, so call the caller back.
|
1999-07-08 22:12:37 +00:00
|
|
|
*/
|
2021-08-04 13:14:11 -07:00
|
|
|
goto done;
|
1999-07-08 22:12:37 +00:00
|
|
|
|
2021-08-04 13:14:11 -07:00
|
|
|
next:
|
|
|
|
/*
|
Fix handling of mismatched responses past timeout
When a UDP dispatch receives a mismatched response, it checks whether
there is still enough time to wait for the correct one to arrive before
the timeout fires. If there is not, the result code is set to
ISC_R_TIMEDOUT, but it is not subsequently used anywhere as 'response'
is set to NULL a few lines earlier. This results in the higher-level
read callback (resquery_response() in case of resolver code) not being
called. However, shortly afterwards, a few levels up the call chain,
isc__nm_udp_read_cb() calls isc__nmsocket_timer_stop() on the dispatch
socket, effectively disabling read timeout handling for that socket.
Combined with the fact that reading is not restarted in such a case
(e.g. by calling dispatch_getnext() from udp_recv()), this leads to the
higher-level query structure remaining referenced indefinitely because
the dispatch socket it uses will neither be read from nor closed due to
a timeout. This in turn causes fetch contexts to linger around
indefinitely, which in turn i.a. prevents certain cache nodes (those
containing rdatasets used by fetch contexts, like fctx->nameservers)
from being cleaned.
Fix by making sure the higher-level callback does get invoked with the
ISC_R_TIMEDOUT result code when udp_recv() determines there is no more
time left to receive the correct UDP response before the timeout fires.
This allows the higher-level callback to clean things up, preventing the
reference leak described above.
2021-11-23 15:35:39 +01:00
|
|
|
* This is the wrong response. Check whether there is still enough
|
|
|
|
* time to wait for the correct one to arrive before the timeout fires.
|
2021-08-04 13:14:11 -07:00
|
|
|
*/
|
2021-10-01 12:53:31 -07:00
|
|
|
timeout = resp->timeout - dispentry_runtime(resp);
|
|
|
|
if (timeout <= 0) {
|
Fix handling of mismatched responses past timeout
When a UDP dispatch receives a mismatched response, it checks whether
there is still enough time to wait for the correct one to arrive before
the timeout fires. If there is not, the result code is set to
ISC_R_TIMEDOUT, but it is not subsequently used anywhere as 'response'
is set to NULL a few lines earlier. This results in the higher-level
read callback (resquery_response() in case of resolver code) not being
called. However, shortly afterwards, a few levels up the call chain,
isc__nm_udp_read_cb() calls isc__nmsocket_timer_stop() on the dispatch
socket, effectively disabling read timeout handling for that socket.
Combined with the fact that reading is not restarted in such a case
(e.g. by calling dispatch_getnext() from udp_recv()), this leads to the
higher-level query structure remaining referenced indefinitely because
the dispatch socket it uses will neither be read from nor closed due to
a timeout. This in turn causes fetch contexts to linger around
indefinitely, which in turn i.a. prevents certain cache nodes (those
containing rdatasets used by fetch contexts, like fctx->nameservers)
from being cleaned.
Fix by making sure the higher-level callback does get invoked with the
ISC_R_TIMEDOUT result code when udp_recv() determines there is no more
time left to receive the correct UDP response before the timeout fires.
This allows the higher-level callback to clean things up, preventing the
reference leak described above.
2021-11-23 15:35:39 +01:00
|
|
|
/*
|
|
|
|
* The time window for receiving the correct response is
|
|
|
|
* already closed, libuv has just not processed the socket
|
|
|
|
* timer yet. Invoke the read callback, indicating a timeout.
|
|
|
|
*/
|
2021-10-01 12:53:31 -07:00
|
|
|
eresult = ISC_R_TIMEDOUT;
|
|
|
|
goto done;
|
|
|
|
}
|
Fix handling of mismatched responses past timeout
When a UDP dispatch receives a mismatched response, it checks whether
there is still enough time to wait for the correct one to arrive before
the timeout fires. If there is not, the result code is set to
ISC_R_TIMEDOUT, but it is not subsequently used anywhere as 'response'
is set to NULL a few lines earlier. This results in the higher-level
read callback (resquery_response() in case of resolver code) not being
called. However, shortly afterwards, a few levels up the call chain,
isc__nm_udp_read_cb() calls isc__nmsocket_timer_stop() on the dispatch
socket, effectively disabling read timeout handling for that socket.
Combined with the fact that reading is not restarted in such a case
(e.g. by calling dispatch_getnext() from udp_recv()), this leads to the
higher-level query structure remaining referenced indefinitely because
the dispatch socket it uses will neither be read from nor closed due to
a timeout. This in turn causes fetch contexts to linger around
indefinitely, which in turn i.a. prevents certain cache nodes (those
containing rdatasets used by fetch contexts, like fctx->nameservers)
from being cleaned.
Fix by making sure the higher-level callback does get invoked with the
ISC_R_TIMEDOUT result code when udp_recv() determines there is no more
time left to receive the correct UDP response before the timeout fires.
This allows the higher-level callback to clean things up, preventing the
reference leak described above.
2021-11-23 15:35:39 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Do not invoke the read callback just yet and instead wait for the
|
|
|
|
* proper response to arrive until the original timeout fires.
|
|
|
|
*/
|
|
|
|
response = NULL;
|
2022-11-30 17:58:35 +01:00
|
|
|
udp_dispatch_getnext(resp, timeout);
|
2021-05-25 22:54:17 -07:00
|
|
|
|
2021-08-04 13:14:11 -07:00
|
|
|
done:
|
2014-08-06 18:49:53 +10:00
|
|
|
UNLOCK(&disp->lock);
|
2021-05-25 22:54:17 -07:00
|
|
|
|
2021-07-26 20:23:18 -07:00
|
|
|
if (response != NULL) {
|
2022-11-30 17:58:35 +01:00
|
|
|
dispentry_log(resp, LVL(90), "UDP read callback on %p: %s",
|
|
|
|
handle, isc_result_totext(eresult));
|
2021-08-03 15:27:06 +02:00
|
|
|
response(eresult, region, resp->arg);
|
2021-05-25 22:54:17 -07:00
|
|
|
}
|
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
dns_dispentry_detach(&resp); /* DISPENTRY003 */
|
1999-06-30 01:33:11 +00:00
|
|
|
}
|
|
|
|
|
2021-11-30 09:57:27 +01:00
|
|
|
static isc_result_t
|
2022-11-30 17:58:35 +01:00
|
|
|
tcp_recv_oldest(dns_dispatch_t *disp, dns_dispentry_t **respp) {
|
|
|
|
dns_dispentry_t *resp = NULL;
|
|
|
|
resp = ISC_LIST_HEAD(disp->active);
|
2021-11-30 09:57:27 +01:00
|
|
|
if (resp != NULL) {
|
|
|
|
disp->timedout++;
|
|
|
|
|
|
|
|
*respp = resp;
|
|
|
|
return (ISC_R_TIMEDOUT);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (ISC_R_NOTFOUND);
|
|
|
|
}
|
|
|
|
|
2021-11-26 09:14:58 +01:00
|
|
|
static isc_result_t
|
|
|
|
tcp_recv_success(dns_dispatch_t *disp, isc_region_t *region, dns_qid_t *qid,
|
|
|
|
isc_sockaddr_t *peer, dns_dispentry_t **respp) {
|
|
|
|
isc_buffer_t source;
|
|
|
|
dns_messageid_t id;
|
|
|
|
unsigned int flags;
|
|
|
|
unsigned int bucket;
|
|
|
|
isc_result_t result = ISC_R_SUCCESS;
|
|
|
|
dns_dispentry_t *resp = NULL;
|
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
dispatch_log(disp, LVL(90), "TCP read success, length == %d, addr = %p",
|
2021-11-26 09:14:58 +01:00
|
|
|
region->length, region->base);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Peek into the buffer to see what we can see.
|
|
|
|
*/
|
|
|
|
isc_buffer_init(&source, region->base, region->length);
|
|
|
|
isc_buffer_add(&source, region->length);
|
|
|
|
result = dns_message_peekheader(&source, &id, &flags);
|
|
|
|
if (result != ISC_R_SUCCESS) {
|
|
|
|
dispatch_log(disp, LVL(10), "got garbage packet");
|
2021-11-30 09:57:27 +01:00
|
|
|
return (ISC_R_UNEXPECTED);
|
2021-11-26 09:14:58 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
dispatch_log(disp, LVL(92),
|
|
|
|
"got valid DNS message header, /QR %c, id %u",
|
|
|
|
(((flags & DNS_MESSAGEFLAG_QR) != 0) ? '1' : '0'), id);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Look at the message flags. If it's a query, ignore it and keep
|
|
|
|
* reading.
|
|
|
|
*/
|
|
|
|
if ((flags & DNS_MESSAGEFLAG_QR) == 0) {
|
|
|
|
dispatch_log(disp, LVL(10), "got DNS query instead of answer");
|
2021-11-30 09:57:27 +01:00
|
|
|
return (ISC_R_UNEXPECTED);
|
2021-11-26 09:14:58 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We have a valid response; find the associated dispentry object
|
|
|
|
* and call the caller back.
|
|
|
|
*/
|
|
|
|
bucket = dns_hash(qid, peer, id, disp->localport);
|
|
|
|
LOCK(&qid->lock);
|
|
|
|
resp = entry_search(qid, peer, id, disp->localport, bucket);
|
|
|
|
if (resp != NULL) {
|
2022-11-30 17:58:35 +01:00
|
|
|
if (resp->reading) {
|
|
|
|
*respp = resp;
|
2021-11-30 09:57:27 +01:00
|
|
|
} else {
|
2022-11-30 17:58:35 +01:00
|
|
|
/* We already got our DNS message. */
|
|
|
|
result = ISC_R_UNEXPECTED;
|
2021-11-30 09:57:27 +01:00
|
|
|
}
|
2021-11-29 09:59:33 +01:00
|
|
|
} else {
|
|
|
|
/* We are not expecting this DNS message */
|
2022-11-30 17:58:35 +01:00
|
|
|
result = ISC_R_NOTFOUND;
|
2021-11-26 09:14:58 +01:00
|
|
|
}
|
|
|
|
dispatch_log(disp, LVL(90), "search for response in bucket %d: %s",
|
|
|
|
bucket, isc_result_totext(result));
|
|
|
|
UNLOCK(&qid->lock);
|
|
|
|
|
|
|
|
return (result);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2022-11-30 17:58:35 +01:00
|
|
|
tcp_recv_add(dns_displist_t *resps, dns_dispentry_t *resp,
|
|
|
|
isc_result_t result) {
|
|
|
|
dns_dispentry_ref(resp); /* DISPENTRY009 */
|
|
|
|
ISC_LIST_UNLINK(resp->disp->active, resp, alink);
|
|
|
|
ISC_LIST_APPEND(*resps, resp, rlink);
|
|
|
|
INSIST(resp->reading);
|
|
|
|
resp->reading = false;
|
|
|
|
resp->result = result;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
tcp_recv_shutdown(dns_dispatch_t *disp, dns_displist_t *resps,
|
|
|
|
isc_result_t result) {
|
2021-11-26 09:14:58 +01:00
|
|
|
dns_dispentry_t *resp = NULL, *next = NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If there are any active responses, shut them all down.
|
|
|
|
*/
|
|
|
|
for (resp = ISC_LIST_HEAD(disp->active); resp != NULL; resp = next) {
|
|
|
|
next = ISC_LIST_NEXT(resp, alink);
|
2022-11-30 17:58:35 +01:00
|
|
|
tcp_recv_add(resps, resp, result);
|
2021-11-26 09:14:58 +01:00
|
|
|
}
|
2022-12-20 06:11:26 +01:00
|
|
|
disp->state = DNS_DISPATCHSTATE_CANCELED;
|
2021-11-26 09:14:58 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
tcp_recv_done(dns_dispentry_t *resp, isc_result_t eresult,
|
|
|
|
isc_region_t *region) {
|
2022-11-30 17:58:35 +01:00
|
|
|
dispentry_log(resp, LVL(90), "read callback: %s",
|
|
|
|
isc_result_totext(eresult));
|
|
|
|
|
2021-11-26 09:14:58 +01:00
|
|
|
resp->response(eresult, region, resp->arg);
|
2022-11-30 17:58:35 +01:00
|
|
|
dns_dispentry_detach(&resp); /* DISPENTRY009 */
|
2021-11-26 09:14:58 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2022-11-30 17:58:35 +01:00
|
|
|
tcp_recv_processall(dns_displist_t *resps, isc_region_t *region) {
|
2021-11-26 09:14:58 +01:00
|
|
|
dns_dispentry_t *resp = NULL, *next = NULL;
|
|
|
|
|
|
|
|
for (resp = ISC_LIST_HEAD(*resps); resp != NULL; resp = next) {
|
|
|
|
next = ISC_LIST_NEXT(resp, rlink);
|
|
|
|
ISC_LIST_UNLINK(*resps, resp, rlink);
|
2022-11-30 17:58:35 +01:00
|
|
|
tcp_recv_done(resp, resp->result, region);
|
2021-11-26 09:14:58 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1999-07-12 23:44:31 +00:00
|
|
|
/*
|
|
|
|
* General flow:
|
|
|
|
*
|
2002-09-04 02:26:13 +00:00
|
|
|
* If I/O result == CANCELED, EOF, or error, notify everyone as the
|
|
|
|
* various queues drain.
|
1999-07-12 23:44:31 +00:00
|
|
|
*
|
|
|
|
* If response:
|
|
|
|
* Allocate event, fill in details.
|
2002-09-04 02:26:13 +00:00
|
|
|
* If cannot allocate, restart.
|
|
|
|
* find target. If not found, restart.
|
1999-07-12 23:44:31 +00:00
|
|
|
* if event queue is not empty, queue. else, send.
|
|
|
|
* restart.
|
|
|
|
*/
|
|
|
|
static void
|
2021-11-26 09:14:58 +01:00
|
|
|
tcp_recv(isc_nmhandle_t *handle, isc_result_t result, isc_region_t *region,
|
2021-01-14 13:02:57 -08:00
|
|
|
void *arg) {
|
2021-08-03 18:24:27 -07:00
|
|
|
dns_dispatch_t *disp = (dns_dispatch_t *)arg;
|
2021-11-26 09:14:58 +01:00
|
|
|
dns_dispentry_t *resp = NULL;
|
2020-12-09 19:44:41 -08:00
|
|
|
dns_qid_t *qid = NULL;
|
2002-07-29 01:03:24 +00:00
|
|
|
char buf[ISC_SOCKADDR_FORMATSIZE];
|
2021-01-14 13:02:57 -08:00
|
|
|
isc_sockaddr_t peer;
|
2022-11-30 17:58:35 +01:00
|
|
|
dns_displist_t resps = ISC_LIST_INITIALIZER;
|
1999-07-12 23:44:31 +00:00
|
|
|
|
2021-08-03 18:24:27 -07:00
|
|
|
REQUIRE(VALID_DISPATCH(disp));
|
1999-11-16 21:05:09 +00:00
|
|
|
|
Dispatch API simplification
- Many dispatch attributes can be set implicitly instead of being passed
in. we can infer whether to set DNS_DISPATCHATTR_TCP or _UDP from
whether we're calling dns_dispatch_createtcp() or _createudp(). we
can also infer DNS_DISPATCHATTR_IPV4 or _IPV6 from the addresses or
the socket that were passed in.
- We no longer use dup'd sockets in UDP dispatches, so the 'dup_socket'
parameter has been removed from dns_dispatch_createudp(), along with
the code implementing it. also removed isc_socket_dup() since it no
longer has any callers.
- The 'buffersize' parameter was ignored and has now been removed;
buffersize is now fixed at 4096.
- Maxbuffers and maxrequests don't need to be passed in on every call to
dns_dispatch_createtcp() and _createudp().
In all current uses, the value for mgr->maxbuffers will either be
raised once from its default of 20000 to 32768, or else left
alone. (passing in a value lower than 20000 does not lower it.) there
isn't enough difference between these values for there to be any need
to configure this.
The value for disp->maxrequests controls both the quota of concurrent
requests for a dispatch and also the size of the dispatch socket
memory pool. it's not clear that this quota is necessary at all. the
memory pool size currently starts at 32768, but is sometimes lowered
to 4096, which is definitely unnecessary.
This commit sets both values permanently to 32768.
- Previously TCP dispatches allocated their own separate QID table,
which didn't incorporate a port table. this commit removes
per-dispatch QID tables and shares the same table between all
dispatches. since dispatches are created for each TCP socket, this may
speed up the dispatch allocation process. there may be a slight
increase in lock contention since all dispatches are sharing a single
QID table, but since TCP sockets are used less often than UDP
sockets (which were already sharing a QID table), it should not be a
substantial change.
- The dispatch port table was being used to determine whether a port was
already in use; if so, then a UDP socket would be bound with
REUSEADDR. this commit removes the port table, and always binds UDP
sockets that way.
2020-12-17 00:43:00 -08:00
|
|
|
qid = disp->mgr->qid;
|
Merge the mlg-20000518 branch onto the mainline. Change summary:
dns_dispatch_create() no longer exists. dns_dispatch_createtcp()
and dns_dispatch_getudp() are the replacements. _createtcp() takes
a bound, connected TCP socket, while _getudp() will search for
a sharable UDP socket, and if found, attach to it and return a
pointer to it. If one is not found, it will create a udp socket,
bind it to a supplied local address, and create a new dispatcher
around it.
dns_dispatch_remove{request,response}() no longer take the dispatch
as an argument.
query-source can now be set per view.
The dispatch manager holds onto three memory pools, one for
allocating dispatchers from, one for events, and one for
requests/replies. The free list on these pools is hard-coded,
but set to 1024. This keeps us from having to dig into the
isc_mem_t the pools draw from as often.
dns_resolver_create() and dns_view_createresolver() require that
valid dispatchers be passed in; dispatchers are no longer created
for the caller.
2000-05-19 21:46:46 +00:00
|
|
|
|
2019-11-22 10:49:40 +11:00
|
|
|
LOCK(&disp->lock);
|
2022-11-30 17:58:35 +01:00
|
|
|
INSIST(disp->reading);
|
|
|
|
disp->reading = false;
|
2019-11-22 10:49:40 +11:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
dispatch_log(disp, LVL(90), "TCP read:%s:requests %u",
|
|
|
|
isc_result_totext(result), disp->requests);
|
1999-07-12 23:44:31 +00:00
|
|
|
|
2021-01-14 13:02:57 -08:00
|
|
|
peer = isc_nmhandle_peeraddr(handle);
|
2008-04-03 02:01:08 +00:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
/*
|
|
|
|
* Phase 1: Process timeout and success.
|
|
|
|
*/
|
2021-11-26 09:14:58 +01:00
|
|
|
switch (result) {
|
2021-08-03 18:24:27 -07:00
|
|
|
case ISC_R_TIMEDOUT:
|
|
|
|
/*
|
2022-11-30 17:58:35 +01:00
|
|
|
* Time out the oldest response in the active queue.
|
2021-08-03 18:24:27 -07:00
|
|
|
*/
|
2022-11-30 17:58:35 +01:00
|
|
|
result = tcp_recv_oldest(disp, &resp);
|
2021-11-26 09:14:58 +01:00
|
|
|
break;
|
|
|
|
case ISC_R_SUCCESS:
|
|
|
|
/* We got an answer */
|
|
|
|
result = tcp_recv_success(disp, region, qid, &peer, &resp);
|
2022-11-30 17:58:35 +01:00
|
|
|
break;
|
2021-11-30 09:57:27 +01:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (resp != NULL) {
|
|
|
|
tcp_recv_add(&resps, resp, result);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Phase 2: Look if we timed out before.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (result == ISC_R_NOTFOUND) {
|
|
|
|
if (disp->timedout > 0) {
|
|
|
|
/* There was active query that timed-out before */
|
|
|
|
disp->timedout--;
|
|
|
|
} else {
|
|
|
|
result = ISC_R_UNEXPECTED;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Phase 3: Trigger timeouts. It's possible that the responses would
|
|
|
|
* have been timedout out already, but non-matching TCP reads have
|
|
|
|
* prevented this.
|
|
|
|
*/
|
|
|
|
dns_dispentry_t *next = NULL;
|
|
|
|
for (resp = ISC_LIST_HEAD(disp->active); resp != NULL; resp = next) {
|
|
|
|
next = ISC_LIST_NEXT(resp, alink);
|
|
|
|
|
|
|
|
/* FIXME: dispentry_runtime is always 0 for TCP */
|
|
|
|
int timeout = resp->timeout - dispentry_runtime(resp);
|
|
|
|
if (timeout <= 0) {
|
|
|
|
tcp_recv_add(&resps, resp, ISC_R_TIMEDOUT);
|
2000-09-08 22:02:21 +00:00
|
|
|
}
|
2022-11-30 17:58:35 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Phase 4: log if we errored out.
|
|
|
|
*/
|
|
|
|
switch (result) {
|
|
|
|
case ISC_R_SUCCESS:
|
|
|
|
case ISC_R_TIMEDOUT:
|
|
|
|
case ISC_R_NOTFOUND:
|
|
|
|
break;
|
2000-04-29 00:45:26 +00:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
case ISC_R_SHUTTINGDOWN:
|
|
|
|
case ISC_R_CANCELED:
|
|
|
|
case ISC_R_EOF:
|
|
|
|
case ISC_R_CONNECTIONRESET:
|
|
|
|
isc_sockaddr_format(&peer, buf, sizeof(buf));
|
|
|
|
dispatch_log(disp, LVL(90), "shutting down TCP: %s: %s", buf,
|
|
|
|
isc_result_totext(result));
|
|
|
|
tcp_recv_shutdown(disp, &resps, result);
|
|
|
|
break;
|
2021-11-26 09:14:58 +01:00
|
|
|
default:
|
2021-08-03 18:24:27 -07:00
|
|
|
isc_sockaddr_format(&peer, buf, sizeof(buf));
|
2021-11-26 09:14:58 +01:00
|
|
|
dispatch_log(disp, ISC_LOG_ERROR,
|
2021-08-03 18:24:27 -07:00
|
|
|
"shutting down due to TCP "
|
|
|
|
"receive error: %s: %s",
|
2021-11-26 09:14:58 +01:00
|
|
|
buf, isc_result_totext(result));
|
2022-11-30 17:58:35 +01:00
|
|
|
tcp_recv_shutdown(disp, &resps, result);
|
2021-11-26 09:14:58 +01:00
|
|
|
break;
|
2021-11-26 09:14:58 +01:00
|
|
|
}
|
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
/*
|
|
|
|
* Phase 5: Resume reading if there are still active responses
|
|
|
|
*/
|
|
|
|
if (!ISC_LIST_EMPTY(disp->active)) {
|
|
|
|
tcp_startrecv(NULL, disp, ISC_LIST_HEAD(disp->active));
|
|
|
|
}
|
|
|
|
|
2014-08-06 18:49:53 +10:00
|
|
|
UNLOCK(&disp->lock);
|
1999-07-12 23:44:31 +00:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
/*
|
|
|
|
* Phase 6: Process all scheduled callbacks.
|
|
|
|
*/
|
|
|
|
tcp_recv_processall(&resps, region);
|
2021-08-03 18:24:27 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
dns_dispatch_detach(&disp); /* DISPATCH002 */
|
2000-05-10 21:34:50 +00:00
|
|
|
}
|
|
|
|
|
2021-05-25 22:54:17 -07:00
|
|
|
/*%
|
|
|
|
* Create a temporary port list to set the initial default set of dispatch
|
2021-10-04 14:11:57 +02:00
|
|
|
* ephemeral ports. This is almost meaningless as the application will
|
2021-05-25 22:54:17 -07:00
|
|
|
* normally set the ports explicitly, but is provided to fill some minor corner
|
|
|
|
* cases.
|
2000-05-10 21:34:50 +00:00
|
|
|
*/
|
|
|
|
static void
|
2021-10-04 14:11:57 +02:00
|
|
|
create_default_portset(isc_mem_t *mctx, int family, isc_portset_t **portsetp) {
|
|
|
|
in_port_t low, high;
|
|
|
|
|
|
|
|
isc_net_getudpportrange(family, &low, &high);
|
|
|
|
|
2021-05-25 22:54:17 -07:00
|
|
|
isc_portset_create(mctx, portsetp);
|
2021-10-04 14:11:57 +02:00
|
|
|
isc_portset_addrange(*portsetp, low, high);
|
2021-05-25 22:54:17 -07:00
|
|
|
}
|
2008-06-23 19:41:20 +00:00
|
|
|
|
2020-12-09 15:45:13 -08:00
|
|
|
static isc_result_t
|
|
|
|
setavailports(dns_dispatchmgr_t *mgr, isc_portset_t *v4portset,
|
|
|
|
isc_portset_t *v6portset) {
|
|
|
|
in_port_t *v4ports, *v6ports, p = 0;
|
|
|
|
unsigned int nv4ports, nv6ports, i4 = 0, i6 = 0;
|
|
|
|
|
|
|
|
nv4ports = isc_portset_nports(v4portset);
|
|
|
|
nv6ports = isc_portset_nports(v6portset);
|
|
|
|
|
|
|
|
v4ports = NULL;
|
|
|
|
if (nv4ports != 0) {
|
|
|
|
v4ports = isc_mem_get(mgr->mctx, sizeof(in_port_t) * nv4ports);
|
2020-02-13 21:48:23 +01:00
|
|
|
}
|
2020-12-09 15:45:13 -08:00
|
|
|
v6ports = NULL;
|
|
|
|
if (nv6ports != 0) {
|
|
|
|
v6ports = isc_mem_get(mgr->mctx, sizeof(in_port_t) * nv6ports);
|
|
|
|
}
|
|
|
|
|
|
|
|
do {
|
|
|
|
if (isc_portset_isset(v4portset, p)) {
|
|
|
|
INSIST(i4 < nv4ports);
|
|
|
|
v4ports[i4++] = p;
|
|
|
|
}
|
|
|
|
if (isc_portset_isset(v6portset, p)) {
|
|
|
|
INSIST(i6 < nv6ports);
|
|
|
|
v6ports[i6++] = p;
|
|
|
|
}
|
|
|
|
} while (p++ < 65535);
|
|
|
|
INSIST(i4 == nv4ports && i6 == nv6ports);
|
|
|
|
|
|
|
|
if (mgr->v4ports != NULL) {
|
|
|
|
isc_mem_put(mgr->mctx, mgr->v4ports,
|
|
|
|
mgr->nv4ports * sizeof(in_port_t));
|
|
|
|
}
|
|
|
|
mgr->v4ports = v4ports;
|
|
|
|
mgr->nv4ports = nv4ports;
|
|
|
|
|
|
|
|
if (mgr->v6ports != NULL) {
|
|
|
|
isc_mem_put(mgr->mctx, mgr->v6ports,
|
|
|
|
mgr->nv6ports * sizeof(in_port_t));
|
|
|
|
}
|
|
|
|
mgr->v6ports = v6ports;
|
|
|
|
mgr->nv6ports = nv6ports;
|
2008-06-23 19:41:20 +00:00
|
|
|
|
|
|
|
return (ISC_R_SUCCESS);
|
|
|
|
}
|
|
|
|
|
1999-06-30 01:33:11 +00:00
|
|
|
/*
|
|
|
|
* Publics.
|
|
|
|
*/
|
|
|
|
|
1999-07-22 01:34:31 +00:00
|
|
|
isc_result_t
|
2021-01-14 13:02:57 -08:00
|
|
|
dns_dispatchmgr_create(isc_mem_t *mctx, isc_nm_t *nm,
|
|
|
|
dns_dispatchmgr_t **mgrp) {
|
2020-12-09 15:45:13 -08:00
|
|
|
dns_dispatchmgr_t *mgr = NULL;
|
2008-06-23 19:41:20 +00:00
|
|
|
isc_portset_t *v4portset = NULL;
|
|
|
|
isc_portset_t *v6portset = NULL;
|
2000-05-10 21:34:50 +00:00
|
|
|
|
|
|
|
REQUIRE(mctx != NULL);
|
|
|
|
REQUIRE(mgrp != NULL && *mgrp == NULL);
|
|
|
|
|
|
|
|
mgr = isc_mem_get(mctx, sizeof(dns_dispatchmgr_t));
|
2021-01-14 13:02:57 -08:00
|
|
|
*mgr = (dns_dispatchmgr_t){ .magic = 0 };
|
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
#if DNS_DISPATCH_TRACE
|
|
|
|
fprintf(stderr, "dns_dispatchmgr__init:%s:%s:%d:%p->references = 1\n",
|
|
|
|
__func__, __FILE__, __LINE__, mgr);
|
|
|
|
#endif
|
2021-05-25 22:54:17 -07:00
|
|
|
isc_refcount_init(&mgr->references, 1);
|
2000-05-10 21:34:50 +00:00
|
|
|
|
Merge the mlg-20000518 branch onto the mainline. Change summary:
dns_dispatch_create() no longer exists. dns_dispatch_createtcp()
and dns_dispatch_getudp() are the replacements. _createtcp() takes
a bound, connected TCP socket, while _getudp() will search for
a sharable UDP socket, and if found, attach to it and return a
pointer to it. If one is not found, it will create a udp socket,
bind it to a supplied local address, and create a new dispatcher
around it.
dns_dispatch_remove{request,response}() no longer take the dispatch
as an argument.
query-source can now be set per view.
The dispatch manager holds onto three memory pools, one for
allocating dispatchers from, one for events, and one for
requests/replies. The free list on these pools is hard-coded,
but set to 1024. This keeps us from having to dig into the
isc_mem_t the pools draw from as often.
dns_resolver_create() and dns_view_createresolver() require that
valid dispatchers be passed in; dispatchers are no longer created
for the caller.
2000-05-19 21:46:46 +00:00
|
|
|
isc_mem_attach(mctx, &mgr->mctx);
|
2021-01-14 13:02:57 -08:00
|
|
|
isc_nm_attach(nm, &mgr->nm);
|
Merge the mlg-20000518 branch onto the mainline. Change summary:
dns_dispatch_create() no longer exists. dns_dispatch_createtcp()
and dns_dispatch_getudp() are the replacements. _createtcp() takes
a bound, connected TCP socket, while _getudp() will search for
a sharable UDP socket, and if found, attach to it and return a
pointer to it. If one is not found, it will create a udp socket,
bind it to a supplied local address, and create a new dispatcher
around it.
dns_dispatch_remove{request,response}() no longer take the dispatch
as an argument.
query-source can now be set per view.
The dispatch manager holds onto three memory pools, one for
allocating dispatchers from, one for events, and one for
requests/replies. The free list on these pools is hard-coded,
but set to 1024. This keeps us from having to dig into the
isc_mem_t the pools draw from as often.
dns_resolver_create() and dns_view_createresolver() require that
valid dispatchers be passed in; dispatchers are no longer created
for the caller.
2000-05-19 21:46:46 +00:00
|
|
|
|
2018-11-16 15:33:22 +01:00
|
|
|
isc_mutex_init(&mgr->lock);
|
2021-05-12 21:16:17 +02:00
|
|
|
|
2000-05-10 21:34:50 +00:00
|
|
|
ISC_LIST_INIT(mgr->list);
|
2021-05-12 21:16:17 +02:00
|
|
|
|
2021-10-04 14:11:57 +02:00
|
|
|
create_default_portset(mctx, AF_INET, &v4portset);
|
|
|
|
create_default_portset(mctx, AF_INET6, &v6portset);
|
2000-05-10 21:34:50 +00:00
|
|
|
|
2020-12-09 15:45:13 -08:00
|
|
|
setavailports(mgr, v4portset, v6portset);
|
2008-06-23 19:41:20 +00:00
|
|
|
|
2020-12-09 15:45:13 -08:00
|
|
|
isc_portset_destroy(mctx, &v4portset);
|
|
|
|
isc_portset_destroy(mctx, &v6portset);
|
Merge the mlg-20000518 branch onto the mainline. Change summary:
dns_dispatch_create() no longer exists. dns_dispatch_createtcp()
and dns_dispatch_getudp() are the replacements. _createtcp() takes
a bound, connected TCP socket, while _getudp() will search for
a sharable UDP socket, and if found, attach to it and return a
pointer to it. If one is not found, it will create a udp socket,
bind it to a supplied local address, and create a new dispatcher
around it.
dns_dispatch_remove{request,response}() no longer take the dispatch
as an argument.
query-source can now be set per view.
The dispatch manager holds onto three memory pools, one for
allocating dispatchers from, one for events, and one for
requests/replies. The free list on these pools is hard-coded,
but set to 1024. This keeps us from having to dig into the
isc_mem_t the pools draw from as often.
dns_resolver_create() and dns_view_createresolver() require that
valid dispatchers be passed in; dispatchers are no longer created
for the caller.
2000-05-19 21:46:46 +00:00
|
|
|
|
Dispatch API simplification
- Many dispatch attributes can be set implicitly instead of being passed
in. we can infer whether to set DNS_DISPATCHATTR_TCP or _UDP from
whether we're calling dns_dispatch_createtcp() or _createudp(). we
can also infer DNS_DISPATCHATTR_IPV4 or _IPV6 from the addresses or
the socket that were passed in.
- We no longer use dup'd sockets in UDP dispatches, so the 'dup_socket'
parameter has been removed from dns_dispatch_createudp(), along with
the code implementing it. also removed isc_socket_dup() since it no
longer has any callers.
- The 'buffersize' parameter was ignored and has now been removed;
buffersize is now fixed at 4096.
- Maxbuffers and maxrequests don't need to be passed in on every call to
dns_dispatch_createtcp() and _createudp().
In all current uses, the value for mgr->maxbuffers will either be
raised once from its default of 20000 to 32768, or else left
alone. (passing in a value lower than 20000 does not lower it.) there
isn't enough difference between these values for there to be any need
to configure this.
The value for disp->maxrequests controls both the quota of concurrent
requests for a dispatch and also the size of the dispatch socket
memory pool. it's not clear that this quota is necessary at all. the
memory pool size currently starts at 32768, but is sometimes lowered
to 4096, which is definitely unnecessary.
This commit sets both values permanently to 32768.
- Previously TCP dispatches allocated their own separate QID table,
which didn't incorporate a port table. this commit removes
per-dispatch QID tables and shares the same table between all
dispatches. since dispatches are created for each TCP socket, this may
speed up the dispatch allocation process. there may be a slight
increase in lock contention since all dispatches are sharing a single
QID table, but since TCP sockets are used less often than UDP
sockets (which were already sharing a QID table), it should not be a
substantial change.
- The dispatch port table was being used to determine whether a port was
already in use; if so, then a UDP socket would be bound with
REUSEADDR. this commit removes the port table, and always binds UDP
sockets that way.
2020-12-17 00:43:00 -08:00
|
|
|
qid_allocate(mgr, &mgr->qid);
|
2020-12-09 15:45:13 -08:00
|
|
|
mgr->magic = DNS_DISPATCHMGR_MAGIC;
|
Merge the mlg-20000518 branch onto the mainline. Change summary:
dns_dispatch_create() no longer exists. dns_dispatch_createtcp()
and dns_dispatch_getudp() are the replacements. _createtcp() takes
a bound, connected TCP socket, while _getudp() will search for
a sharable UDP socket, and if found, attach to it and return a
pointer to it. If one is not found, it will create a udp socket,
bind it to a supplied local address, and create a new dispatcher
around it.
dns_dispatch_remove{request,response}() no longer take the dispatch
as an argument.
query-source can now be set per view.
The dispatch manager holds onto three memory pools, one for
allocating dispatchers from, one for events, and one for
requests/replies. The free list on these pools is hard-coded,
but set to 1024. This keeps us from having to dig into the
isc_mem_t the pools draw from as often.
dns_resolver_create() and dns_view_createresolver() require that
valid dispatchers be passed in; dispatchers are no longer created
for the caller.
2000-05-19 21:46:46 +00:00
|
|
|
|
2020-12-09 15:45:13 -08:00
|
|
|
*mgrp = mgr;
|
|
|
|
return (ISC_R_SUCCESS);
|
2000-05-10 21:34:50 +00:00
|
|
|
}
|
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
#if DNS_DISPATCH_TRACE
|
|
|
|
ISC_REFCOUNT_TRACE_IMPL(dns_dispatchmgr, dispatchmgr_destroy);
|
|
|
|
#else
|
|
|
|
ISC_REFCOUNT_IMPL(dns_dispatchmgr, dispatchmgr_destroy);
|
|
|
|
#endif
|
2021-05-25 22:54:17 -07:00
|
|
|
|
2000-11-03 02:45:55 +00:00
|
|
|
void
|
|
|
|
dns_dispatchmgr_setblackhole(dns_dispatchmgr_t *mgr, dns_acl_t *blackhole) {
|
|
|
|
REQUIRE(VALID_DISPATCHMGR(mgr));
|
2000-12-26 09:48:41 +00:00
|
|
|
if (mgr->blackhole != NULL) {
|
|
|
|
dns_acl_detach(&mgr->blackhole);
|
2020-02-13 21:48:23 +01:00
|
|
|
}
|
2000-11-03 02:45:55 +00:00
|
|
|
dns_acl_attach(blackhole, &mgr->blackhole);
|
|
|
|
}
|
|
|
|
|
2001-02-09 00:23:16 +00:00
|
|
|
dns_acl_t *
|
|
|
|
dns_dispatchmgr_getblackhole(dns_dispatchmgr_t *mgr) {
|
2000-11-03 02:45:55 +00:00
|
|
|
REQUIRE(VALID_DISPATCHMGR(mgr));
|
2001-02-09 00:23:16 +00:00
|
|
|
return (mgr->blackhole);
|
2000-11-03 02:45:55 +00:00
|
|
|
}
|
|
|
|
|
2008-06-23 19:41:20 +00:00
|
|
|
isc_result_t
|
|
|
|
dns_dispatchmgr_setavailports(dns_dispatchmgr_t *mgr, isc_portset_t *v4portset,
|
|
|
|
isc_portset_t *v6portset) {
|
|
|
|
REQUIRE(VALID_DISPATCHMGR(mgr));
|
2020-12-09 15:45:13 -08:00
|
|
|
return (setavailports(mgr, v4portset, v6portset));
|
2003-02-26 05:05:16 +00:00
|
|
|
}
|
|
|
|
|
2021-05-25 22:54:17 -07:00
|
|
|
static void
|
|
|
|
dispatchmgr_destroy(dns_dispatchmgr_t *mgr) {
|
|
|
|
REQUIRE(VALID_DISPATCHMGR(mgr));
|
2000-05-10 21:34:50 +00:00
|
|
|
|
2021-05-25 22:54:17 -07:00
|
|
|
isc_refcount_destroy(&mgr->references);
|
2000-05-10 21:34:50 +00:00
|
|
|
|
2021-05-25 22:54:17 -07:00
|
|
|
mgr->magic = 0;
|
|
|
|
isc_mutex_destroy(&mgr->lock);
|
2000-05-10 21:34:50 +00:00
|
|
|
|
2021-05-25 22:54:17 -07:00
|
|
|
qid_destroy(mgr->mctx, &mgr->qid);
|
|
|
|
|
|
|
|
if (mgr->blackhole != NULL) {
|
|
|
|
dns_acl_detach(&mgr->blackhole);
|
|
|
|
}
|
2000-05-10 21:34:50 +00:00
|
|
|
|
2021-05-25 22:54:17 -07:00
|
|
|
if (mgr->stats != NULL) {
|
|
|
|
isc_stats_detach(&mgr->stats);
|
|
|
|
}
|
2000-08-01 01:33:37 +00:00
|
|
|
|
2021-05-25 22:54:17 -07:00
|
|
|
if (mgr->v4ports != NULL) {
|
|
|
|
isc_mem_put(mgr->mctx, mgr->v4ports,
|
|
|
|
mgr->nv4ports * sizeof(in_port_t));
|
2020-02-13 21:48:23 +01:00
|
|
|
}
|
2021-05-25 22:54:17 -07:00
|
|
|
if (mgr->v6ports != NULL) {
|
|
|
|
isc_mem_put(mgr->mctx, mgr->v6ports,
|
|
|
|
mgr->nv6ports * sizeof(in_port_t));
|
|
|
|
}
|
|
|
|
|
|
|
|
isc_nm_detach(&mgr->nm);
|
|
|
|
|
|
|
|
isc_mem_putanddetach(&mgr->mctx, mgr, sizeof(dns_dispatchmgr_t));
|
2000-05-10 21:34:50 +00:00
|
|
|
}
|
|
|
|
|
2008-04-03 05:55:52 +00:00
|
|
|
void
|
2009-01-27 22:30:00 +00:00
|
|
|
dns_dispatchmgr_setstats(dns_dispatchmgr_t *mgr, isc_stats_t *stats) {
|
2008-04-03 05:55:52 +00:00
|
|
|
REQUIRE(VALID_DISPATCHMGR(mgr));
|
|
|
|
REQUIRE(ISC_LIST_EMPTY(mgr->list));
|
|
|
|
REQUIRE(mgr->stats == NULL);
|
|
|
|
|
2009-01-27 22:30:00 +00:00
|
|
|
isc_stats_attach(stats, &mgr->stats);
|
2008-04-03 05:55:52 +00:00
|
|
|
}
|
|
|
|
|
Dispatch API simplification
- Many dispatch attributes can be set implicitly instead of being passed
in. we can infer whether to set DNS_DISPATCHATTR_TCP or _UDP from
whether we're calling dns_dispatch_createtcp() or _createudp(). we
can also infer DNS_DISPATCHATTR_IPV4 or _IPV6 from the addresses or
the socket that were passed in.
- We no longer use dup'd sockets in UDP dispatches, so the 'dup_socket'
parameter has been removed from dns_dispatch_createudp(), along with
the code implementing it. also removed isc_socket_dup() since it no
longer has any callers.
- The 'buffersize' parameter was ignored and has now been removed;
buffersize is now fixed at 4096.
- Maxbuffers and maxrequests don't need to be passed in on every call to
dns_dispatch_createtcp() and _createudp().
In all current uses, the value for mgr->maxbuffers will either be
raised once from its default of 20000 to 32768, or else left
alone. (passing in a value lower than 20000 does not lower it.) there
isn't enough difference between these values for there to be any need
to configure this.
The value for disp->maxrequests controls both the quota of concurrent
requests for a dispatch and also the size of the dispatch socket
memory pool. it's not clear that this quota is necessary at all. the
memory pool size currently starts at 32768, but is sometimes lowered
to 4096, which is definitely unnecessary.
This commit sets both values permanently to 32768.
- Previously TCP dispatches allocated their own separate QID table,
which didn't incorporate a port table. this commit removes
per-dispatch QID tables and shares the same table between all
dispatches. since dispatches are created for each TCP socket, this may
speed up the dispatch allocation process. there may be a slight
increase in lock contention since all dispatches are sharing a single
QID table, but since TCP sockets are used less often than UDP
sockets (which were already sharing a QID table), it should not be a
substantial change.
- The dispatch port table was being used to determine whether a port was
already in use; if so, then a UDP socket would be bound with
REUSEADDR. this commit removes the port table, and always binds UDP
sockets that way.
2020-12-17 00:43:00 -08:00
|
|
|
static void
|
|
|
|
qid_allocate(dns_dispatchmgr_t *mgr, dns_qid_t **qidp) {
|
2020-12-09 19:44:41 -08:00
|
|
|
dns_qid_t *qid = NULL;
|
2000-09-18 04:50:05 +00:00
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
REQUIRE(qidp != NULL && *qidp == NULL);
|
2000-09-19 06:59:28 +00:00
|
|
|
|
|
|
|
qid = isc_mem_get(mgr->mctx, sizeof(*qid));
|
Dispatch API simplification
- Many dispatch attributes can be set implicitly instead of being passed
in. we can infer whether to set DNS_DISPATCHATTR_TCP or _UDP from
whether we're calling dns_dispatch_createtcp() or _createudp(). we
can also infer DNS_DISPATCHATTR_IPV4 or _IPV6 from the addresses or
the socket that were passed in.
- We no longer use dup'd sockets in UDP dispatches, so the 'dup_socket'
parameter has been removed from dns_dispatch_createudp(), along with
the code implementing it. also removed isc_socket_dup() since it no
longer has any callers.
- The 'buffersize' parameter was ignored and has now been removed;
buffersize is now fixed at 4096.
- Maxbuffers and maxrequests don't need to be passed in on every call to
dns_dispatch_createtcp() and _createudp().
In all current uses, the value for mgr->maxbuffers will either be
raised once from its default of 20000 to 32768, or else left
alone. (passing in a value lower than 20000 does not lower it.) there
isn't enough difference between these values for there to be any need
to configure this.
The value for disp->maxrequests controls both the quota of concurrent
requests for a dispatch and also the size of the dispatch socket
memory pool. it's not clear that this quota is necessary at all. the
memory pool size currently starts at 32768, but is sometimes lowered
to 4096, which is definitely unnecessary.
This commit sets both values permanently to 32768.
- Previously TCP dispatches allocated their own separate QID table,
which didn't incorporate a port table. this commit removes
per-dispatch QID tables and shares the same table between all
dispatches. since dispatches are created for each TCP socket, this may
speed up the dispatch allocation process. there may be a slight
increase in lock contention since all dispatches are sharing a single
QID table, but since TCP sockets are used less often than UDP
sockets (which were already sharing a QID table), it should not be a
substantial change.
- The dispatch port table was being used to determine whether a port was
already in use; if so, then a UDP socket would be bound with
REUSEADDR. this commit removes the port table, and always binds UDP
sockets that way.
2020-12-17 00:43:00 -08:00
|
|
|
*qid = (dns_qid_t){ .qid_nbuckets = DNS_QID_BUCKETS,
|
|
|
|
.qid_increment = DNS_QID_INCREMENT };
|
2000-09-18 04:50:05 +00:00
|
|
|
|
2000-09-19 06:59:28 +00:00
|
|
|
qid->qid_table = isc_mem_get(mgr->mctx,
|
Dispatch API simplification
- Many dispatch attributes can be set implicitly instead of being passed
in. we can infer whether to set DNS_DISPATCHATTR_TCP or _UDP from
whether we're calling dns_dispatch_createtcp() or _createudp(). we
can also infer DNS_DISPATCHATTR_IPV4 or _IPV6 from the addresses or
the socket that were passed in.
- We no longer use dup'd sockets in UDP dispatches, so the 'dup_socket'
parameter has been removed from dns_dispatch_createudp(), along with
the code implementing it. also removed isc_socket_dup() since it no
longer has any callers.
- The 'buffersize' parameter was ignored and has now been removed;
buffersize is now fixed at 4096.
- Maxbuffers and maxrequests don't need to be passed in on every call to
dns_dispatch_createtcp() and _createudp().
In all current uses, the value for mgr->maxbuffers will either be
raised once from its default of 20000 to 32768, or else left
alone. (passing in a value lower than 20000 does not lower it.) there
isn't enough difference between these values for there to be any need
to configure this.
The value for disp->maxrequests controls both the quota of concurrent
requests for a dispatch and also the size of the dispatch socket
memory pool. it's not clear that this quota is necessary at all. the
memory pool size currently starts at 32768, but is sometimes lowered
to 4096, which is definitely unnecessary.
This commit sets both values permanently to 32768.
- Previously TCP dispatches allocated their own separate QID table,
which didn't incorporate a port table. this commit removes
per-dispatch QID tables and shares the same table between all
dispatches. since dispatches are created for each TCP socket, this may
speed up the dispatch allocation process. there may be a slight
increase in lock contention since all dispatches are sharing a single
QID table, but since TCP sockets are used less often than UDP
sockets (which were already sharing a QID table), it should not be a
substantial change.
- The dispatch port table was being used to determine whether a port was
already in use; if so, then a UDP socket would be bound with
REUSEADDR. this commit removes the port table, and always binds UDP
sockets that way.
2020-12-17 00:43:00 -08:00
|
|
|
DNS_QID_BUCKETS * sizeof(dns_displist_t));
|
|
|
|
for (i = 0; i < qid->qid_nbuckets; i++) {
|
2000-09-18 04:50:05 +00:00
|
|
|
ISC_LIST_INIT(qid->qid_table[i]);
|
2008-06-23 19:41:20 +00:00
|
|
|
}
|
2000-09-18 04:50:05 +00:00
|
|
|
|
2021-01-14 13:02:57 -08:00
|
|
|
isc_mutex_init(&qid->lock);
|
2000-09-18 04:50:05 +00:00
|
|
|
qid->magic = QID_MAGIC;
|
|
|
|
*qidp = qid;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2000-09-19 06:59:28 +00:00
|
|
|
qid_destroy(isc_mem_t *mctx, dns_qid_t **qidp) {
|
2020-12-09 19:44:41 -08:00
|
|
|
dns_qid_t *qid = NULL;
|
2000-09-18 04:50:05 +00:00
|
|
|
|
|
|
|
REQUIRE(qidp != NULL);
|
|
|
|
qid = *qidp;
|
2020-02-08 04:37:54 -08:00
|
|
|
*qidp = NULL;
|
2000-09-18 04:50:05 +00:00
|
|
|
|
|
|
|
REQUIRE(VALID_QID(qid));
|
|
|
|
|
|
|
|
qid->magic = 0;
|
2000-09-19 06:59:28 +00:00
|
|
|
isc_mem_put(mctx, qid->qid_table,
|
2000-09-18 04:50:05 +00:00
|
|
|
qid->qid_nbuckets * sizeof(dns_displist_t));
|
2018-11-19 10:31:09 +00:00
|
|
|
isc_mutex_destroy(&qid->lock);
|
2000-09-19 06:59:28 +00:00
|
|
|
isc_mem_put(mctx, qid, sizeof(*qid));
|
2000-09-18 04:50:05 +00:00
|
|
|
}
|
|
|
|
|
Merge the mlg-20000518 branch onto the mainline. Change summary:
dns_dispatch_create() no longer exists. dns_dispatch_createtcp()
and dns_dispatch_getudp() are the replacements. _createtcp() takes
a bound, connected TCP socket, while _getudp() will search for
a sharable UDP socket, and if found, attach to it and return a
pointer to it. If one is not found, it will create a udp socket,
bind it to a supplied local address, and create a new dispatcher
around it.
dns_dispatch_remove{request,response}() no longer take the dispatch
as an argument.
query-source can now be set per view.
The dispatch manager holds onto three memory pools, one for
allocating dispatchers from, one for events, and one for
requests/replies. The free list on these pools is hard-coded,
but set to 1024. This keeps us from having to dig into the
isc_mem_t the pools draw from as often.
dns_resolver_create() and dns_view_createresolver() require that
valid dispatchers be passed in; dispatchers are no longer created
for the caller.
2000-05-19 21:46:46 +00:00
|
|
|
/*
|
|
|
|
* Allocate and set important limits.
|
|
|
|
*/
|
2020-12-18 14:59:50 -08:00
|
|
|
static void
|
2021-08-03 18:24:27 -07:00
|
|
|
dispatch_allocate(dns_dispatchmgr_t *mgr, isc_socktype_t type,
|
|
|
|
dns_dispatch_t **dispp) {
|
2020-12-09 19:44:41 -08:00
|
|
|
dns_dispatch_t *disp = NULL;
|
1999-06-18 02:01:42 +00:00
|
|
|
|
2000-05-10 21:34:50 +00:00
|
|
|
REQUIRE(VALID_DISPATCHMGR(mgr));
|
1999-06-18 23:54:59 +00:00
|
|
|
REQUIRE(dispp != NULL && *dispp == NULL);
|
1999-06-18 02:01:42 +00:00
|
|
|
|
Merge the mlg-20000518 branch onto the mainline. Change summary:
dns_dispatch_create() no longer exists. dns_dispatch_createtcp()
and dns_dispatch_getudp() are the replacements. _createtcp() takes
a bound, connected TCP socket, while _getudp() will search for
a sharable UDP socket, and if found, attach to it and return a
pointer to it. If one is not found, it will create a udp socket,
bind it to a supplied local address, and create a new dispatcher
around it.
dns_dispatch_remove{request,response}() no longer take the dispatch
as an argument.
query-source can now be set per view.
The dispatch manager holds onto three memory pools, one for
allocating dispatchers from, one for events, and one for
requests/replies. The free list on these pools is hard-coded,
but set to 1024. This keeps us from having to dig into the
isc_mem_t the pools draw from as often.
dns_resolver_create() and dns_view_createresolver() require that
valid dispatchers be passed in; dispatchers are no longer created
for the caller.
2000-05-19 21:46:46 +00:00
|
|
|
/*
|
|
|
|
* Set up the dispatcher, mostly. Don't bother setting some of
|
|
|
|
* the options that are controlled by tcp vs. udp, etc.
|
|
|
|
*/
|
1999-06-18 02:01:42 +00:00
|
|
|
|
2021-05-12 21:16:17 +02:00
|
|
|
disp = isc_mem_get(mgr->mctx, sizeof(*disp));
|
2022-11-30 17:58:35 +01:00
|
|
|
*disp = (dns_dispatch_t){
|
|
|
|
.socktype = type,
|
|
|
|
.link = ISC_LINK_INITIALIZER,
|
|
|
|
.active = ISC_LIST_INITIALIZER,
|
|
|
|
.pending = ISC_LIST_INITIALIZER,
|
|
|
|
.magic = DISPATCH_MAGIC,
|
|
|
|
};
|
1999-06-18 02:01:42 +00:00
|
|
|
|
2021-05-25 22:54:17 -07:00
|
|
|
dns_dispatchmgr_attach(mgr, &disp->mgr);
|
2022-11-30 17:58:35 +01:00
|
|
|
#if DNS_DISPATCH_TRACE
|
|
|
|
fprintf(stderr, "dns_dispatch__init:%s:%s:%d:%p->references = 1\n",
|
|
|
|
__func__, __FILE__, __LINE__, disp);
|
|
|
|
#endif
|
|
|
|
isc_refcount_init(&disp->references, 1); /* DISPATCH000 */
|
2018-11-16 15:33:22 +01:00
|
|
|
isc_mutex_init(&disp->lock);
|
2021-05-25 22:54:17 -07:00
|
|
|
|
Merge the mlg-20000518 branch onto the mainline. Change summary:
dns_dispatch_create() no longer exists. dns_dispatch_createtcp()
and dns_dispatch_getudp() are the replacements. _createtcp() takes
a bound, connected TCP socket, while _getudp() will search for
a sharable UDP socket, and if found, attach to it and return a
pointer to it. If one is not found, it will create a udp socket,
bind it to a supplied local address, and create a new dispatcher
around it.
dns_dispatch_remove{request,response}() no longer take the dispatch
as an argument.
query-source can now be set per view.
The dispatch manager holds onto three memory pools, one for
allocating dispatchers from, one for events, and one for
requests/replies. The free list on these pools is hard-coded,
but set to 1024. This keeps us from having to dig into the
isc_mem_t the pools draw from as often.
dns_resolver_create() and dns_view_createresolver() require that
valid dispatchers be passed in; dispatchers are no longer created
for the caller.
2000-05-19 21:46:46 +00:00
|
|
|
*dispp = disp;
|
|
|
|
}
|
|
|
|
|
|
|
|
isc_result_t
|
2021-07-26 20:23:18 -07:00
|
|
|
dns_dispatch_createtcp(dns_dispatchmgr_t *mgr, const isc_sockaddr_t *localaddr,
|
2021-08-03 18:24:27 -07:00
|
|
|
const isc_sockaddr_t *destaddr, isc_dscp_t dscp,
|
|
|
|
dns_dispatch_t **dispp) {
|
2020-12-09 19:44:41 -08:00
|
|
|
dns_dispatch_t *disp = NULL;
|
2000-09-19 06:59:28 +00:00
|
|
|
|
Merge the mlg-20000518 branch onto the mainline. Change summary:
dns_dispatch_create() no longer exists. dns_dispatch_createtcp()
and dns_dispatch_getudp() are the replacements. _createtcp() takes
a bound, connected TCP socket, while _getudp() will search for
a sharable UDP socket, and if found, attach to it and return a
pointer to it. If one is not found, it will create a udp socket,
bind it to a supplied local address, and create a new dispatcher
around it.
dns_dispatch_remove{request,response}() no longer take the dispatch
as an argument.
query-source can now be set per view.
The dispatch manager holds onto three memory pools, one for
allocating dispatchers from, one for events, and one for
requests/replies. The free list on these pools is hard-coded,
but set to 1024. This keeps us from having to dig into the
isc_mem_t the pools draw from as often.
dns_resolver_create() and dns_view_createresolver() require that
valid dispatchers be passed in; dispatchers are no longer created
for the caller.
2000-05-19 21:46:46 +00:00
|
|
|
REQUIRE(VALID_DISPATCHMGR(mgr));
|
2020-12-18 14:59:50 -08:00
|
|
|
REQUIRE(destaddr != NULL);
|
Merge the mlg-20000518 branch onto the mainline. Change summary:
dns_dispatch_create() no longer exists. dns_dispatch_createtcp()
and dns_dispatch_getudp() are the replacements. _createtcp() takes
a bound, connected TCP socket, while _getudp() will search for
a sharable UDP socket, and if found, attach to it and return a
pointer to it. If one is not found, it will create a udp socket,
bind it to a supplied local address, and create a new dispatcher
around it.
dns_dispatch_remove{request,response}() no longer take the dispatch
as an argument.
query-source can now be set per view.
The dispatch manager holds onto three memory pools, one for
allocating dispatchers from, one for events, and one for
requests/replies. The free list on these pools is hard-coded,
but set to 1024. This keeps us from having to dig into the
isc_mem_t the pools draw from as often.
dns_resolver_create() and dns_view_createresolver() require that
valid dispatchers be passed in; dispatchers are no longer created
for the caller.
2000-05-19 21:46:46 +00:00
|
|
|
|
2021-01-14 13:02:57 -08:00
|
|
|
UNUSED(dscp);
|
|
|
|
|
Merge the mlg-20000518 branch onto the mainline. Change summary:
dns_dispatch_create() no longer exists. dns_dispatch_createtcp()
and dns_dispatch_getudp() are the replacements. _createtcp() takes
a bound, connected TCP socket, while _getudp() will search for
a sharable UDP socket, and if found, attach to it and return a
pointer to it. If one is not found, it will create a udp socket,
bind it to a supplied local address, and create a new dispatcher
around it.
dns_dispatch_remove{request,response}() no longer take the dispatch
as an argument.
query-source can now be set per view.
The dispatch manager holds onto three memory pools, one for
allocating dispatchers from, one for events, and one for
requests/replies. The free list on these pools is hard-coded,
but set to 1024. This keeps us from having to dig into the
isc_mem_t the pools draw from as often.
dns_resolver_create() and dns_view_createresolver() require that
valid dispatchers be passed in; dispatchers are no longer created
for the caller.
2000-05-19 21:46:46 +00:00
|
|
|
LOCK(&mgr->lock);
|
|
|
|
|
2021-08-03 18:24:27 -07:00
|
|
|
dispatch_allocate(mgr, isc_socktype_tcp, &disp);
|
Merge the mlg-20000518 branch onto the mainline. Change summary:
dns_dispatch_create() no longer exists. dns_dispatch_createtcp()
and dns_dispatch_getudp() are the replacements. _createtcp() takes
a bound, connected TCP socket, while _getudp() will search for
a sharable UDP socket, and if found, attach to it and return a
pointer to it. If one is not found, it will create a udp socket,
bind it to a supplied local address, and create a new dispatcher
around it.
dns_dispatch_remove{request,response}() no longer take the dispatch
as an argument.
query-source can now be set per view.
The dispatch manager holds onto three memory pools, one for
allocating dispatchers from, one for events, and one for
requests/replies. The free list on these pools is hard-coded,
but set to 1024. This keeps us from having to dig into the
isc_mem_t the pools draw from as often.
dns_resolver_create() and dns_view_createresolver() require that
valid dispatchers be passed in; dispatchers are no longer created
for the caller.
2000-05-19 21:46:46 +00:00
|
|
|
|
2020-12-18 14:59:50 -08:00
|
|
|
disp->peer = *destaddr;
|
|
|
|
|
|
|
|
if (localaddr != NULL) {
|
|
|
|
disp->local = *localaddr;
|
|
|
|
} else {
|
2021-08-03 18:24:27 -07:00
|
|
|
int pf;
|
|
|
|
pf = isc_sockaddr_pf(destaddr);
|
2021-01-14 13:02:57 -08:00
|
|
|
isc_sockaddr_anyofpf(&disp->local, pf);
|
|
|
|
isc_sockaddr_setport(&disp->local, 0);
|
2020-12-16 01:32:06 -08:00
|
|
|
}
|
|
|
|
|
2000-05-10 21:34:50 +00:00
|
|
|
/*
|
|
|
|
* Append it to the dispatcher list.
|
|
|
|
*/
|
2021-08-03 18:24:27 -07:00
|
|
|
|
|
|
|
/* FIXME: There should be a lookup hashtable here */
|
2000-05-10 21:34:50 +00:00
|
|
|
ISC_LIST_APPEND(mgr->list, disp, link);
|
|
|
|
UNLOCK(&mgr->lock);
|
1999-07-12 23:44:31 +00:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
if (isc_log_wouldlog(dns_lctx, 90)) {
|
|
|
|
char addrbuf[ISC_SOCKADDR_FORMATSIZE];
|
1999-06-30 01:33:11 +00:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
isc_sockaddr_format(localaddr, addrbuf,
|
|
|
|
ISC_SOCKADDR_FORMATSIZE);
|
|
|
|
|
|
|
|
mgr_log(mgr, LVL(90),
|
|
|
|
"dns_dispatch_createtcp: created TCP dispatch %p for "
|
|
|
|
"%s",
|
|
|
|
disp, addrbuf);
|
|
|
|
}
|
|
|
|
*dispp = disp;
|
|
|
|
|
|
|
|
return (ISC_R_SUCCESS);
|
|
|
|
}
|
Merge the mlg-20000518 branch onto the mainline. Change summary:
dns_dispatch_create() no longer exists. dns_dispatch_createtcp()
and dns_dispatch_getudp() are the replacements. _createtcp() takes
a bound, connected TCP socket, while _getudp() will search for
a sharable UDP socket, and if found, attach to it and return a
pointer to it. If one is not found, it will create a udp socket,
bind it to a supplied local address, and create a new dispatcher
around it.
dns_dispatch_remove{request,response}() no longer take the dispatch
as an argument.
query-source can now be set per view.
The dispatch manager holds onto three memory pools, one for
allocating dispatchers from, one for events, and one for
requests/replies. The free list on these pools is hard-coded,
but set to 1024. This keeps us from having to dig into the
isc_mem_t the pools draw from as often.
dns_resolver_create() and dns_view_createresolver() require that
valid dispatchers be passed in; dispatchers are no longer created
for the caller.
2000-05-19 21:46:46 +00:00
|
|
|
|
2014-09-04 10:37:45 +10:00
|
|
|
isc_result_t
|
2016-12-30 15:45:08 +11:00
|
|
|
dns_dispatch_gettcp(dns_dispatchmgr_t *mgr, const isc_sockaddr_t *destaddr,
|
2022-11-30 17:58:35 +01:00
|
|
|
const isc_sockaddr_t *localaddr, dns_dispatch_t **dispp) {
|
2021-08-04 13:14:11 -07:00
|
|
|
dns_dispatch_t *disp_connected = NULL;
|
|
|
|
dns_dispatch_t *disp_fallback = NULL;
|
|
|
|
isc_result_t result = ISC_R_NOTFOUND;
|
2015-01-20 17:22:31 -08:00
|
|
|
|
|
|
|
REQUIRE(VALID_DISPATCHMGR(mgr));
|
|
|
|
REQUIRE(destaddr != NULL);
|
|
|
|
REQUIRE(dispp != NULL && *dispp == NULL);
|
|
|
|
|
|
|
|
LOCK(&mgr->lock);
|
2021-08-04 13:14:11 -07:00
|
|
|
|
|
|
|
for (dns_dispatch_t *disp = ISC_LIST_HEAD(mgr->list); disp != NULL;
|
2021-08-03 18:24:27 -07:00
|
|
|
disp = ISC_LIST_NEXT(disp, link))
|
|
|
|
{
|
2021-05-25 22:54:17 -07:00
|
|
|
isc_sockaddr_t sockname;
|
|
|
|
isc_sockaddr_t peeraddr;
|
|
|
|
|
2015-01-20 17:22:31 -08:00
|
|
|
LOCK(&disp->lock);
|
2021-05-25 22:54:17 -07:00
|
|
|
|
|
|
|
if (disp->handle != NULL) {
|
|
|
|
sockname = isc_nmhandle_localaddr(disp->handle);
|
|
|
|
peeraddr = isc_nmhandle_peeraddr(disp->handle);
|
|
|
|
} else {
|
|
|
|
sockname = disp->local;
|
|
|
|
peeraddr = disp->peer;
|
|
|
|
}
|
|
|
|
|
2021-08-03 18:24:27 -07:00
|
|
|
/*
|
|
|
|
* The conditions match:
|
|
|
|
* 1. socktype is TCP
|
|
|
|
* 2. destination address is same
|
|
|
|
* 3. local address is either NULL or same
|
|
|
|
*/
|
2022-12-20 06:11:26 +01:00
|
|
|
if (disp->socktype != isc_socktype_tcp ||
|
|
|
|
!isc_sockaddr_equal(destaddr, &peeraddr) ||
|
|
|
|
(localaddr != NULL &&
|
|
|
|
!isc_sockaddr_eqaddr(localaddr, &sockname)))
|
2021-08-04 13:14:11 -07:00
|
|
|
{
|
2022-12-20 06:11:26 +01:00
|
|
|
UNLOCK(&disp->lock);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (disp->state) {
|
|
|
|
case DNS_DISPATCHSTATE_NONE:
|
|
|
|
/* A dispatch in indeterminate state, skip it */
|
|
|
|
break;
|
|
|
|
case DNS_DISPATCHSTATE_CONNECTED:
|
|
|
|
if (ISC_LIST_EMPTY(disp->active)) {
|
|
|
|
/* Ignore dispatch with no responses */
|
2022-11-30 17:58:35 +01:00
|
|
|
break;
|
2022-12-20 06:11:26 +01:00
|
|
|
}
|
|
|
|
/* We found a connected dispatch */
|
|
|
|
dns_dispatch_attach(disp, &disp_connected);
|
|
|
|
break;
|
|
|
|
case DNS_DISPATCHSTATE_CONNECTING:
|
|
|
|
if (ISC_LIST_EMPTY(disp->pending)) {
|
|
|
|
/* Ignore dispatch with no responses */
|
2022-11-30 17:58:35 +01:00
|
|
|
break;
|
2021-08-04 13:14:11 -07:00
|
|
|
}
|
2022-12-20 06:11:26 +01:00
|
|
|
/* We found "a" dispatch, store it for later */
|
|
|
|
if (disp_fallback == NULL) {
|
|
|
|
dns_dispatch_attach(disp, &disp_fallback);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case DNS_DISPATCHSTATE_CANCELED:
|
|
|
|
/* A canceled dispatch, skip it. */
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
2021-08-04 13:14:11 -07:00
|
|
|
}
|
|
|
|
|
2015-01-20 17:22:31 -08:00
|
|
|
UNLOCK(&disp->lock);
|
2022-11-30 17:58:35 +01:00
|
|
|
|
|
|
|
if (disp_connected != NULL) {
|
|
|
|
break;
|
|
|
|
}
|
2015-01-20 17:22:31 -08:00
|
|
|
}
|
2021-05-25 22:54:17 -07:00
|
|
|
|
2021-08-04 13:14:11 -07:00
|
|
|
if (disp_connected != NULL) {
|
|
|
|
/* We found connected dispatch */
|
|
|
|
INSIST(disp_connected->handle != NULL);
|
2015-01-20 17:22:31 -08:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
*dispp = disp_connected;
|
|
|
|
disp_connected = NULL;
|
2021-08-04 13:14:11 -07:00
|
|
|
|
|
|
|
result = ISC_R_SUCCESS;
|
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
if (disp_fallback != NULL) {
|
|
|
|
dns_dispatch_detach(&disp_fallback);
|
|
|
|
}
|
|
|
|
} else if (disp_fallback != NULL) {
|
|
|
|
*dispp = disp_fallback;
|
2021-08-04 13:14:11 -07:00
|
|
|
|
|
|
|
result = ISC_R_SUCCESS;
|
2015-01-20 17:22:31 -08:00
|
|
|
}
|
2020-12-18 14:59:50 -08:00
|
|
|
|
2021-05-25 22:54:17 -07:00
|
|
|
UNLOCK(&mgr->lock);
|
2021-08-04 13:14:11 -07:00
|
|
|
|
|
|
|
return (result);
|
2015-01-20 17:22:31 -08:00
|
|
|
}
|
|
|
|
|
Merge the mlg-20000518 branch onto the mainline. Change summary:
dns_dispatch_create() no longer exists. dns_dispatch_createtcp()
and dns_dispatch_getudp() are the replacements. _createtcp() takes
a bound, connected TCP socket, while _getudp() will search for
a sharable UDP socket, and if found, attach to it and return a
pointer to it. If one is not found, it will create a udp socket,
bind it to a supplied local address, and create a new dispatcher
around it.
dns_dispatch_remove{request,response}() no longer take the dispatch
as an argument.
query-source can now be set per view.
The dispatch manager holds onto three memory pools, one for
allocating dispatchers from, one for events, and one for
requests/replies. The free list on these pools is hard-coded,
but set to 1024. This keeps us from having to dig into the
isc_mem_t the pools draw from as often.
dns_resolver_create() and dns_view_createresolver() require that
valid dispatchers be passed in; dispatchers are no longer created
for the caller.
2000-05-19 21:46:46 +00:00
|
|
|
isc_result_t
|
2021-07-26 20:23:18 -07:00
|
|
|
dns_dispatch_createudp(dns_dispatchmgr_t *mgr, const isc_sockaddr_t *localaddr,
|
2021-08-03 18:24:27 -07:00
|
|
|
dns_dispatch_t **dispp) {
|
Merge the mlg-20000518 branch onto the mainline. Change summary:
dns_dispatch_create() no longer exists. dns_dispatch_createtcp()
and dns_dispatch_getudp() are the replacements. _createtcp() takes
a bound, connected TCP socket, while _getudp() will search for
a sharable UDP socket, and if found, attach to it and return a
pointer to it. If one is not found, it will create a udp socket,
bind it to a supplied local address, and create a new dispatcher
around it.
dns_dispatch_remove{request,response}() no longer take the dispatch
as an argument.
query-source can now be set per view.
The dispatch manager holds onto three memory pools, one for
allocating dispatchers from, one for events, and one for
requests/replies. The free list on these pools is hard-coded,
but set to 1024. This keeps us from having to dig into the
isc_mem_t the pools draw from as often.
dns_resolver_create() and dns_view_createresolver() require that
valid dispatchers be passed in; dispatchers are no longer created
for the caller.
2000-05-19 21:46:46 +00:00
|
|
|
isc_result_t result;
|
2007-02-02 02:18:06 +00:00
|
|
|
dns_dispatch_t *disp = NULL;
|
Merge the mlg-20000518 branch onto the mainline. Change summary:
dns_dispatch_create() no longer exists. dns_dispatch_createtcp()
and dns_dispatch_getudp() are the replacements. _createtcp() takes
a bound, connected TCP socket, while _getudp() will search for
a sharable UDP socket, and if found, attach to it and return a
pointer to it. If one is not found, it will create a udp socket,
bind it to a supplied local address, and create a new dispatcher
around it.
dns_dispatch_remove{request,response}() no longer take the dispatch
as an argument.
query-source can now be set per view.
The dispatch manager holds onto three memory pools, one for
allocating dispatchers from, one for events, and one for
requests/replies. The free list on these pools is hard-coded,
but set to 1024. This keeps us from having to dig into the
isc_mem_t the pools draw from as often.
dns_resolver_create() and dns_view_createresolver() require that
valid dispatchers be passed in; dispatchers are no longer created
for the caller.
2000-05-19 21:46:46 +00:00
|
|
|
|
|
|
|
REQUIRE(VALID_DISPATCHMGR(mgr));
|
|
|
|
REQUIRE(localaddr != NULL);
|
|
|
|
REQUIRE(dispp != NULL && *dispp == NULL);
|
|
|
|
|
|
|
|
LOCK(&mgr->lock);
|
2021-08-03 18:24:27 -07:00
|
|
|
result = dispatch_createudp(mgr, localaddr, &disp);
|
2020-12-16 01:32:06 -08:00
|
|
|
if (result == ISC_R_SUCCESS) {
|
|
|
|
*dispp = disp;
|
Merge the mlg-20000518 branch onto the mainline. Change summary:
dns_dispatch_create() no longer exists. dns_dispatch_createtcp()
and dns_dispatch_getudp() are the replacements. _createtcp() takes
a bound, connected TCP socket, while _getudp() will search for
a sharable UDP socket, and if found, attach to it and return a
pointer to it. If one is not found, it will create a udp socket,
bind it to a supplied local address, and create a new dispatcher
around it.
dns_dispatch_remove{request,response}() no longer take the dispatch
as an argument.
query-source can now be set per view.
The dispatch manager holds onto three memory pools, one for
allocating dispatchers from, one for events, and one for
requests/replies. The free list on these pools is hard-coded,
but set to 1024. This keeps us from having to dig into the
isc_mem_t the pools draw from as often.
dns_resolver_create() and dns_view_createresolver() require that
valid dispatchers be passed in; dispatchers are no longer created
for the caller.
2000-05-19 21:46:46 +00:00
|
|
|
}
|
|
|
|
UNLOCK(&mgr->lock);
|
2020-12-18 14:59:50 -08:00
|
|
|
|
|
|
|
return (result);
|
Merge the mlg-20000518 branch onto the mainline. Change summary:
dns_dispatch_create() no longer exists. dns_dispatch_createtcp()
and dns_dispatch_getudp() are the replacements. _createtcp() takes
a bound, connected TCP socket, while _getudp() will search for
a sharable UDP socket, and if found, attach to it and return a
pointer to it. If one is not found, it will create a udp socket,
bind it to a supplied local address, and create a new dispatcher
around it.
dns_dispatch_remove{request,response}() no longer take the dispatch
as an argument.
query-source can now be set per view.
The dispatch manager holds onto three memory pools, one for
allocating dispatchers from, one for events, and one for
requests/replies. The free list on these pools is hard-coded,
but set to 1024. This keeps us from having to dig into the
isc_mem_t the pools draw from as often.
dns_resolver_create() and dns_view_createresolver() require that
valid dispatchers be passed in; dispatchers are no longer created
for the caller.
2000-05-19 21:46:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static isc_result_t
|
2021-07-26 20:23:18 -07:00
|
|
|
dispatch_createudp(dns_dispatchmgr_t *mgr, const isc_sockaddr_t *localaddr,
|
2021-08-03 18:24:27 -07:00
|
|
|
dns_dispatch_t **dispp) {
|
2020-12-18 14:59:50 -08:00
|
|
|
isc_result_t result = ISC_R_SUCCESS;
|
2020-12-09 19:44:41 -08:00
|
|
|
dns_dispatch_t *disp = NULL;
|
2020-12-16 01:32:06 -08:00
|
|
|
isc_sockaddr_t sa_any;
|
2018-11-08 19:34:51 -08:00
|
|
|
|
2020-12-16 01:32:06 -08:00
|
|
|
/*
|
2021-01-14 13:02:57 -08:00
|
|
|
* Check whether this address/port is available locally.
|
2020-12-16 01:32:06 -08:00
|
|
|
*/
|
2021-08-03 18:24:27 -07:00
|
|
|
isc_sockaddr_anyofpf(&sa_any, isc_sockaddr_pf(localaddr));
|
2020-12-16 01:32:06 -08:00
|
|
|
if (!isc_sockaddr_eqaddr(&sa_any, localaddr)) {
|
2021-01-14 13:02:57 -08:00
|
|
|
result = isc_nm_checkaddr(localaddr, isc_socktype_udp);
|
2018-11-08 19:34:51 -08:00
|
|
|
if (result != ISC_R_SUCCESS) {
|
2022-11-30 17:58:35 +01:00
|
|
|
return (result);
|
2018-11-08 19:34:51 -08:00
|
|
|
}
|
2020-12-16 01:32:06 -08:00
|
|
|
}
|
2011-07-28 04:04:37 +00:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
dispatch_allocate(mgr, isc_socktype_udp, &disp);
|
|
|
|
|
2020-12-16 01:32:06 -08:00
|
|
|
if (isc_log_wouldlog(dns_lctx, 90)) {
|
|
|
|
char addrbuf[ISC_SOCKADDR_FORMATSIZE];
|
2008-11-12 23:10:57 +00:00
|
|
|
|
2020-12-16 01:32:06 -08:00
|
|
|
isc_sockaddr_format(localaddr, addrbuf,
|
|
|
|
ISC_SOCKADDR_FORMATSIZE);
|
|
|
|
mgr_log(mgr, LVL(90),
|
2022-11-30 17:58:35 +01:00
|
|
|
"dispatch_createudp: created UDP dispatch %p for %s",
|
|
|
|
disp, addrbuf);
|
2003-02-26 05:05:16 +00:00
|
|
|
}
|
2020-12-16 01:32:06 -08:00
|
|
|
|
2001-01-07 22:02:48 +00:00
|
|
|
disp->local = *localaddr;
|
Merge the mlg-20000518 branch onto the mainline. Change summary:
dns_dispatch_create() no longer exists. dns_dispatch_createtcp()
and dns_dispatch_getudp() are the replacements. _createtcp() takes
a bound, connected TCP socket, while _getudp() will search for
a sharable UDP socket, and if found, attach to it and return a
pointer to it. If one is not found, it will create a udp socket,
bind it to a supplied local address, and create a new dispatcher
around it.
dns_dispatch_remove{request,response}() no longer take the dispatch
as an argument.
query-source can now be set per view.
The dispatch manager holds onto three memory pools, one for
allocating dispatchers from, one for events, and one for
requests/replies. The free list on these pools is hard-coded,
but set to 1024. This keeps us from having to dig into the
isc_mem_t the pools draw from as often.
dns_resolver_create() and dns_view_createresolver() require that
valid dispatchers be passed in; dispatchers are no longer created
for the caller.
2000-05-19 21:46:46 +00:00
|
|
|
|
|
|
|
/*
|
2022-11-30 17:58:35 +01:00
|
|
|
* Don't append it to the dispatcher list, we don't care about UDP, only
|
|
|
|
* TCP should be searched
|
|
|
|
*
|
|
|
|
* ISC_LIST_APPEND(mgr->list, disp, link);
|
Merge the mlg-20000518 branch onto the mainline. Change summary:
dns_dispatch_create() no longer exists. dns_dispatch_createtcp()
and dns_dispatch_getudp() are the replacements. _createtcp() takes
a bound, connected TCP socket, while _getudp() will search for
a sharable UDP socket, and if found, attach to it and return a
pointer to it. If one is not found, it will create a udp socket,
bind it to a supplied local address, and create a new dispatcher
around it.
dns_dispatch_remove{request,response}() no longer take the dispatch
as an argument.
query-source can now be set per view.
The dispatch manager holds onto three memory pools, one for
allocating dispatchers from, one for events, and one for
requests/replies. The free list on these pools is hard-coded,
but set to 1024. This keeps us from having to dig into the
isc_mem_t the pools draw from as often.
dns_resolver_create() and dns_view_createresolver() require that
valid dispatchers be passed in; dispatchers are no longer created
for the caller.
2000-05-19 21:46:46 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
*dispp = disp;
|
2011-07-28 04:04:37 +00:00
|
|
|
|
2008-06-23 19:41:20 +00:00
|
|
|
return (result);
|
1999-06-16 01:32:31 +00:00
|
|
|
}
|
|
|
|
|
2021-05-25 22:54:17 -07:00
|
|
|
static void
|
2021-08-03 18:24:27 -07:00
|
|
|
dispatch_destroy(dns_dispatch_t *disp) {
|
2021-05-25 22:54:17 -07:00
|
|
|
dns_dispatchmgr_t *mgr = disp->mgr;
|
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
isc_refcount_destroy(&disp->references);
|
|
|
|
disp->magic = 0;
|
|
|
|
|
2021-05-25 22:54:17 -07:00
|
|
|
LOCK(&mgr->lock);
|
2022-11-30 17:58:35 +01:00
|
|
|
if (ISC_LINK_LINKED(disp, link)) {
|
|
|
|
ISC_LIST_UNLINK(disp->mgr->list, disp, link);
|
|
|
|
}
|
2021-05-25 22:54:17 -07:00
|
|
|
UNLOCK(&mgr->lock);
|
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
INSIST(disp->requests == 0);
|
|
|
|
INSIST(ISC_LIST_EMPTY(disp->pending));
|
|
|
|
INSIST(ISC_LIST_EMPTY(disp->active));
|
2021-05-25 22:54:17 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
INSIST(!ISC_LINK_LINKED(disp, link));
|
|
|
|
|
|
|
|
dispatch_log(disp, LVL(90), "destroying dispatch %p", disp);
|
|
|
|
|
|
|
|
if (disp->handle) {
|
|
|
|
dispatch_log(disp, LVL(90), "detaching TCP handle %p from %p",
|
|
|
|
disp->handle, &disp->handle);
|
2021-05-25 22:54:17 -07:00
|
|
|
isc_nmhandle_detach(&disp->handle);
|
|
|
|
}
|
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
isc_mutex_destroy(&disp->lock);
|
|
|
|
|
|
|
|
isc_mem_put(mgr->mctx, disp, sizeof(*disp));
|
2021-05-25 22:54:17 -07:00
|
|
|
|
2021-08-03 18:24:27 -07:00
|
|
|
/*
|
|
|
|
* Because dispatch uses mgr->mctx, we must detach after freeing
|
|
|
|
* dispatch, not before.
|
2021-05-25 22:54:17 -07:00
|
|
|
*/
|
|
|
|
dns_dispatchmgr_detach(&mgr);
|
|
|
|
}
|
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
#if DNS_DISPATCH_TRACE
|
|
|
|
ISC_REFCOUNT_TRACE_IMPL(dns_dispatch, dispatch_destroy);
|
|
|
|
#else
|
|
|
|
ISC_REFCOUNT_IMPL(dns_dispatch, dispatch_destroy);
|
|
|
|
#endif
|
1999-06-16 01:32:31 +00:00
|
|
|
|
1999-07-22 01:34:31 +00:00
|
|
|
isc_result_t
|
2021-10-03 15:15:50 -07:00
|
|
|
dns_dispatch_add(dns_dispatch_t *disp, unsigned int options,
|
|
|
|
unsigned int timeout, const isc_sockaddr_t *dest,
|
2022-09-19 11:04:22 +00:00
|
|
|
dns_transport_t *transport, isc_tlsctx_cache_t *tlsctx_cache,
|
2021-10-03 15:15:50 -07:00
|
|
|
dispatch_cb_t connected, dispatch_cb_t sent,
|
|
|
|
dispatch_cb_t response, void *arg, dns_messageid_t *idp,
|
2022-11-30 17:58:35 +01:00
|
|
|
dns_dispentry_t **respp) {
|
|
|
|
dns_dispentry_t *resp = NULL;
|
2021-01-14 13:02:57 -08:00
|
|
|
dns_qid_t *qid = NULL;
|
2008-06-23 19:41:20 +00:00
|
|
|
in_port_t localport = 0;
|
1999-06-18 02:01:42 +00:00
|
|
|
dns_messageid_t id;
|
2021-01-14 13:02:57 -08:00
|
|
|
unsigned int bucket;
|
2020-12-09 19:44:41 -08:00
|
|
|
bool ok = false;
|
2021-01-14 13:02:57 -08:00
|
|
|
int i = 0;
|
1999-06-18 02:01:42 +00:00
|
|
|
|
|
|
|
REQUIRE(VALID_DISPATCH(disp));
|
|
|
|
REQUIRE(dest != NULL);
|
2022-11-30 17:58:35 +01:00
|
|
|
REQUIRE(respp != NULL && *respp == NULL);
|
1999-06-18 02:01:42 +00:00
|
|
|
REQUIRE(idp != NULL);
|
2021-01-14 13:02:57 -08:00
|
|
|
REQUIRE(disp->socktype == isc_socktype_tcp ||
|
|
|
|
disp->socktype == isc_socktype_udp);
|
2022-11-30 17:58:35 +01:00
|
|
|
REQUIRE(connected != NULL);
|
|
|
|
REQUIRE(response != NULL);
|
|
|
|
REQUIRE(sent != NULL);
|
1999-06-18 02:01:42 +00:00
|
|
|
|
|
|
|
LOCK(&disp->lock);
|
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
if (disp->state == DNS_DISPATCHSTATE_CANCELED) {
|
1999-07-09 02:47:55 +00:00
|
|
|
UNLOCK(&disp->lock);
|
2022-11-30 17:58:35 +01:00
|
|
|
return (ISC_R_CANCELED);
|
1999-07-09 02:47:55 +00:00
|
|
|
}
|
|
|
|
|
Dispatch API simplification
- Many dispatch attributes can be set implicitly instead of being passed
in. we can infer whether to set DNS_DISPATCHATTR_TCP or _UDP from
whether we're calling dns_dispatch_createtcp() or _createudp(). we
can also infer DNS_DISPATCHATTR_IPV4 or _IPV6 from the addresses or
the socket that were passed in.
- We no longer use dup'd sockets in UDP dispatches, so the 'dup_socket'
parameter has been removed from dns_dispatch_createudp(), along with
the code implementing it. also removed isc_socket_dup() since it no
longer has any callers.
- The 'buffersize' parameter was ignored and has now been removed;
buffersize is now fixed at 4096.
- Maxbuffers and maxrequests don't need to be passed in on every call to
dns_dispatch_createtcp() and _createudp().
In all current uses, the value for mgr->maxbuffers will either be
raised once from its default of 20000 to 32768, or else left
alone. (passing in a value lower than 20000 does not lower it.) there
isn't enough difference between these values for there to be any need
to configure this.
The value for disp->maxrequests controls both the quota of concurrent
requests for a dispatch and also the size of the dispatch socket
memory pool. it's not clear that this quota is necessary at all. the
memory pool size currently starts at 32768, but is sometimes lowered
to 4096, which is definitely unnecessary.
This commit sets both values permanently to 32768.
- Previously TCP dispatches allocated their own separate QID table,
which didn't incorporate a port table. this commit removes
per-dispatch QID tables and shares the same table between all
dispatches. since dispatches are created for each TCP socket, this may
speed up the dispatch allocation process. there may be a slight
increase in lock contention since all dispatches are sharing a single
QID table, but since TCP sockets are used less often than UDP
sockets (which were already sharing a QID table), it should not be a
substantial change.
- The dispatch port table was being used to determine whether a port was
already in use; if so, then a UDP socket would be bound with
REUSEADDR. this commit removes the port table, and always binds UDP
sockets that way.
2020-12-17 00:43:00 -08:00
|
|
|
qid = disp->mgr->qid;
|
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
resp = isc_mem_get(disp->mgr->mctx, sizeof(*resp));
|
|
|
|
|
|
|
|
*resp = (dns_dispentry_t){
|
|
|
|
.port = localport,
|
|
|
|
.timeout = timeout,
|
|
|
|
.peer = *dest,
|
|
|
|
.connected = connected,
|
|
|
|
.sent = sent,
|
|
|
|
.response = response,
|
|
|
|
.arg = arg,
|
|
|
|
.link = ISC_LINK_INITIALIZER,
|
|
|
|
.alink = ISC_LINK_INITIALIZER,
|
|
|
|
.plink = ISC_LINK_INITIALIZER,
|
|
|
|
.rlink = ISC_LINK_INITIALIZER,
|
|
|
|
.magic = RESPONSE_MAGIC,
|
|
|
|
};
|
|
|
|
|
|
|
|
#if DNS_DISPATCH_TRACE
|
|
|
|
fprintf(stderr, "dns_dispentry__init:%s:%s:%d:%p->references = 1\n",
|
|
|
|
__func__, __FILE__, __LINE__, res);
|
|
|
|
#endif
|
|
|
|
isc_refcount_init(&resp->references, 1); /* DISPENTRY000 */
|
2021-05-25 22:54:17 -07:00
|
|
|
|
2021-01-14 13:02:57 -08:00
|
|
|
if (disp->socktype == isc_socktype_udp) {
|
2022-11-30 17:58:35 +01:00
|
|
|
isc_result_t result = setup_socket(disp, resp, dest,
|
|
|
|
&localport);
|
2008-06-23 19:41:20 +00:00
|
|
|
if (result != ISC_R_SUCCESS) {
|
2022-11-30 17:58:35 +01:00
|
|
|
isc_mem_put(disp->mgr->mctx, resp, sizeof(*resp));
|
2008-06-23 19:41:20 +00:00
|
|
|
UNLOCK(&disp->lock);
|
2009-01-31 00:10:24 +00:00
|
|
|
inc_stats(disp->mgr, dns_resstatscounter_dispsockfail);
|
2008-06-23 19:41:20 +00:00
|
|
|
return (result);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1999-06-18 02:01:42 +00:00
|
|
|
/*
|
2020-12-09 19:44:41 -08:00
|
|
|
* Try somewhat hard to find a unique ID. Start with
|
|
|
|
* a random number unless DNS_DISPATCHOPT_FIXEDID is set,
|
|
|
|
* in which case we start with the ID passed in via *idp.
|
1999-06-18 02:01:42 +00:00
|
|
|
*/
|
2018-04-22 14:56:28 +02:00
|
|
|
if ((options & DNS_DISPATCHOPT_FIXEDID) != 0) {
|
2014-10-01 07:24:16 +10:00
|
|
|
id = *idp;
|
2018-04-22 14:56:28 +02:00
|
|
|
} else {
|
2018-05-28 15:22:23 +02:00
|
|
|
id = (dns_messageid_t)isc_random16();
|
2018-04-22 14:56:28 +02:00
|
|
|
}
|
2020-12-09 19:44:41 -08:00
|
|
|
|
|
|
|
LOCK(&qid->lock);
|
2014-01-09 15:57:59 +11:00
|
|
|
do {
|
2020-12-09 19:44:41 -08:00
|
|
|
dns_dispentry_t *entry = NULL;
|
2013-12-23 09:50:18 -08:00
|
|
|
bucket = dns_hash(qid, dest, id, localport);
|
2020-12-09 19:44:41 -08:00
|
|
|
entry = entry_search(qid, dest, id, localport, bucket);
|
|
|
|
if (entry == NULL) {
|
2018-04-17 08:29:14 -07:00
|
|
|
ok = true;
|
1999-06-18 02:01:42 +00:00
|
|
|
break;
|
|
|
|
}
|
2022-11-30 17:58:35 +01:00
|
|
|
if ((options & DNS_DISPATCHOPT_FIXEDID) != 0) {
|
|
|
|
/* When using fixed ID, we either must use it or fail */
|
|
|
|
break;
|
|
|
|
}
|
2000-09-18 04:50:05 +00:00
|
|
|
id += qid->qid_increment;
|
1999-12-15 17:14:52 +00:00
|
|
|
id &= 0x0000ffff;
|
2014-01-09 15:57:59 +11:00
|
|
|
} while (i++ < 64);
|
2022-11-30 17:58:35 +01:00
|
|
|
|
|
|
|
if (ok) {
|
|
|
|
resp->id = id;
|
|
|
|
resp->bucket = bucket;
|
|
|
|
ISC_LIST_APPEND(qid->qid_table[bucket], resp, link);
|
|
|
|
}
|
2012-04-28 14:52:28 -07:00
|
|
|
UNLOCK(&qid->lock);
|
1999-06-18 02:01:42 +00:00
|
|
|
|
|
|
|
if (!ok) {
|
2022-11-30 17:58:35 +01:00
|
|
|
isc_mem_put(disp->mgr->mctx, resp, sizeof(*resp));
|
1999-06-18 02:01:42 +00:00
|
|
|
UNLOCK(&disp->lock);
|
2000-04-06 22:03:35 +00:00
|
|
|
return (ISC_R_NOMORE);
|
1999-06-18 02:01:42 +00:00
|
|
|
}
|
|
|
|
|
2022-09-19 11:04:22 +00:00
|
|
|
if (transport != NULL) {
|
2022-11-30 17:58:35 +01:00
|
|
|
dns_transport_attach(transport, &resp->transport);
|
2022-09-19 11:04:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (tlsctx_cache != NULL) {
|
2022-11-30 17:58:35 +01:00
|
|
|
isc_tlsctx_cache_attach(tlsctx_cache, &resp->tlsctx_cache);
|
2022-09-19 11:04:22 +00:00
|
|
|
}
|
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
dns_dispatch_attach(disp, &resp->disp); /* DISPATCH001 */
|
1999-06-18 02:01:42 +00:00
|
|
|
|
1999-07-09 00:51:08 +00:00
|
|
|
disp->requests++;
|
2020-12-09 19:44:41 -08:00
|
|
|
|
2021-01-14 13:02:57 -08:00
|
|
|
inc_stats(disp->mgr, (disp->socktype == isc_socktype_udp)
|
|
|
|
? dns_resstatscounter_disprequdp
|
|
|
|
: dns_resstatscounter_dispreqtcp);
|
2008-06-23 19:41:20 +00:00
|
|
|
|
1999-06-18 02:01:42 +00:00
|
|
|
UNLOCK(&disp->lock);
|
|
|
|
|
|
|
|
*idp = id;
|
2022-11-30 17:58:35 +01:00
|
|
|
*respp = resp;
|
1999-06-18 02:01:42 +00:00
|
|
|
|
2000-04-06 22:03:35 +00:00
|
|
|
return (ISC_R_SUCCESS);
|
1999-06-16 01:32:31 +00:00
|
|
|
}
|
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
isc_result_t
|
|
|
|
dns_dispatch_getnext(dns_dispentry_t *resp) {
|
|
|
|
REQUIRE(VALID_RESPONSE(resp));
|
|
|
|
REQUIRE(VALID_DISPATCH(resp->disp));
|
2021-08-03 18:24:27 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
dns_dispatch_t *disp = resp->disp;
|
|
|
|
isc_result_t result = ISC_R_SUCCESS;
|
|
|
|
int32_t timeout = -1;
|
2021-11-30 09:57:27 +01:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
LOCK(&disp->lock);
|
|
|
|
switch (disp->socktype) {
|
|
|
|
case isc_socktype_udp: {
|
|
|
|
timeout = resp->timeout - dispentry_runtime(resp);
|
|
|
|
if (timeout <= 0) {
|
|
|
|
result = ISC_R_TIMEDOUT;
|
|
|
|
break;
|
2021-08-03 18:24:27 -07:00
|
|
|
}
|
2022-11-30 17:58:35 +01:00
|
|
|
udp_dispatch_getnext(resp, timeout);
|
2021-08-03 18:24:27 -07:00
|
|
|
break;
|
2022-11-30 17:58:35 +01:00
|
|
|
}
|
2021-08-03 18:24:27 -07:00
|
|
|
case isc_socktype_tcp:
|
2022-11-30 17:58:35 +01:00
|
|
|
tcp_dispatch_getnext(disp, resp, timeout);
|
2021-08-03 18:24:27 -07:00
|
|
|
break;
|
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2021-08-03 18:24:27 -07:00
|
|
|
}
|
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
UNLOCK(&disp->lock);
|
2016-07-11 13:36:16 +10:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
return (result);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
udp_dispentry_cancel(dns_dispentry_t *resp, isc_result_t result) {
|
2016-07-11 13:36:16 +10:00
|
|
|
REQUIRE(VALID_RESPONSE(resp));
|
2022-11-30 17:58:35 +01:00
|
|
|
REQUIRE(VALID_DISPATCH(resp->disp));
|
|
|
|
REQUIRE(VALID_DISPATCHMGR(resp->disp->mgr));
|
2016-07-11 13:36:16 +10:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
dns_dispatch_t *disp = resp->disp;
|
|
|
|
dns_dispatchmgr_t *mgr = disp->mgr;
|
|
|
|
dns_qid_t *qid = mgr->qid;
|
|
|
|
dispatch_cb_t response = NULL;
|
2021-08-04 13:14:11 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
LOCK(&disp->lock);
|
|
|
|
dispentry_log(resp, LVL(90),
|
|
|
|
"canceling response: %s, %s/%s (%s/%s), "
|
|
|
|
"requests %u",
|
|
|
|
isc_result_totext(result), state2str(resp->state),
|
|
|
|
resp->reading ? "reading" : "not reading",
|
|
|
|
state2str(disp->state),
|
|
|
|
disp->reading ? "reading" : "not reading",
|
|
|
|
disp->requests);
|
2016-07-11 13:36:16 +10:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
if (ISC_LINK_LINKED(resp, alink)) {
|
|
|
|
ISC_LIST_UNLINK(disp->active, resp, alink);
|
2021-10-01 12:53:31 -07:00
|
|
|
}
|
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
switch (resp->state) {
|
|
|
|
case DNS_DISPATCHSTATE_NONE:
|
|
|
|
break;
|
2021-10-03 15:15:50 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
case DNS_DISPATCHSTATE_CONNECTING:
|
|
|
|
break;
|
2021-10-03 15:15:50 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
case DNS_DISPATCHSTATE_CONNECTED:
|
|
|
|
if (resp->reading) {
|
|
|
|
dns_dispentry_ref(resp); /* DISPENTRY003 */
|
|
|
|
response = resp->response;
|
2021-10-03 15:15:50 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
dispentry_log(resp, LVL(90), "canceling read on %p",
|
|
|
|
resp->handle);
|
|
|
|
isc_nm_cancelread(resp->handle);
|
|
|
|
}
|
|
|
|
break;
|
2021-10-03 15:15:50 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
case DNS_DISPATCHSTATE_CANCELED:
|
|
|
|
goto unlock;
|
2021-10-03 15:15:50 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
2021-10-03 15:15:50 -07:00
|
|
|
}
|
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
dec_stats(disp->mgr, dns_resstatscounter_disprequdp);
|
2021-10-03 15:15:50 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
LOCK(&qid->lock);
|
|
|
|
ISC_LIST_UNLINK(qid->qid_table[resp->bucket], resp, link);
|
|
|
|
UNLOCK(&qid->lock);
|
|
|
|
resp->state = DNS_DISPATCHSTATE_CANCELED;
|
2021-10-03 15:15:50 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
unlock:
|
2021-10-18 11:49:56 +02:00
|
|
|
UNLOCK(&disp->lock);
|
2021-10-03 15:15:50 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
if (response) {
|
|
|
|
dispentry_log(resp, LVL(90), "read callback: %s",
|
|
|
|
isc_result_totext(result));
|
|
|
|
response(result, NULL, resp->arg);
|
|
|
|
dns_dispentry_detach(&resp); /* DISPENTRY003 */
|
|
|
|
}
|
2021-10-03 15:15:50 -07:00
|
|
|
}
|
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
static void
|
|
|
|
tcp_dispentry_cancel(dns_dispentry_t *resp, isc_result_t result) {
|
|
|
|
REQUIRE(VALID_RESPONSE(resp));
|
|
|
|
REQUIRE(VALID_DISPATCH(resp->disp));
|
|
|
|
REQUIRE(VALID_DISPATCHMGR(resp->disp->mgr));
|
1999-06-18 02:01:42 +00:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
dns_dispatch_t *disp = resp->disp;
|
|
|
|
dns_dispatchmgr_t *mgr = disp->mgr;
|
|
|
|
dns_qid_t *qid = mgr->qid;
|
|
|
|
dns_displist_t resps = ISC_LIST_INITIALIZER;
|
2021-05-25 22:54:17 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
LOCK(&disp->lock);
|
|
|
|
dispentry_log(resp, LVL(90),
|
|
|
|
"canceling response: %s, %s/%s (%s/%s), "
|
|
|
|
"requests %u",
|
|
|
|
isc_result_totext(result), state2str(resp->state),
|
|
|
|
resp->reading ? "reading" : "not reading",
|
|
|
|
state2str(disp->state),
|
|
|
|
disp->reading ? "reading" : "not reading",
|
|
|
|
disp->requests);
|
|
|
|
|
|
|
|
switch (resp->state) {
|
|
|
|
case DNS_DISPATCHSTATE_NONE:
|
|
|
|
break;
|
2021-05-25 22:54:17 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
case DNS_DISPATCHSTATE_CONNECTING:
|
|
|
|
break;
|
1999-06-18 02:01:42 +00:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
case DNS_DISPATCHSTATE_CONNECTED:
|
|
|
|
if (resp->reading) {
|
|
|
|
tcp_recv_add(&resps, resp, ISC_R_CANCELED);
|
|
|
|
}
|
2021-01-14 13:02:57 -08:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
INSIST(!ISC_LINK_LINKED(resp, alink));
|
2021-05-25 22:54:17 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
if (ISC_LIST_EMPTY(disp->active)) {
|
|
|
|
INSIST(disp->handle != NULL);
|
2021-05-25 22:54:17 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
#if DISPATCH_TCP_KEEPALIVE
|
|
|
|
/*
|
|
|
|
* This is an experimental code that keeps the TCP
|
|
|
|
* connection open for 1 second before it is finally
|
|
|
|
* closed. By keeping the TCP connection open, it can
|
|
|
|
* be reused by dns_request that uses
|
|
|
|
* dns_dispatch_gettcp() to join existing TCP
|
|
|
|
* connections.
|
|
|
|
*
|
|
|
|
* It is disabled for now, because it changes the
|
|
|
|
* behaviour, but I am keeping the code here for future
|
|
|
|
* reference when we improve the dns_dispatch to reuse
|
|
|
|
* the TCP connections also in the resolver.
|
|
|
|
*
|
|
|
|
* The TCP connection reuse should be seamless and not
|
|
|
|
* require any extra handling on the client side though.
|
|
|
|
*/
|
|
|
|
isc_nmhandle_cleartimeout(disp->handle);
|
|
|
|
isc_nmhandle_settimeout(disp->handle, 1000);
|
|
|
|
|
|
|
|
if (!disp->reading) {
|
|
|
|
dispentry_log(resp, LVL(90),
|
|
|
|
"final 1 second timeout on %p",
|
|
|
|
disp->handle);
|
|
|
|
tcp_startrecv(NULL, disp, NULL);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
if (disp->reading) {
|
|
|
|
dispentry_log(resp, LVL(90),
|
|
|
|
"canceling read on %p",
|
|
|
|
disp->handle);
|
|
|
|
isc_nm_cancelread(disp->handle);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
break;
|
2000-09-18 04:50:05 +00:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
case DNS_DISPATCHSTATE_CANCELED:
|
|
|
|
goto unlock;
|
2021-05-25 22:54:17 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
2020-12-09 19:44:41 -08:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
dec_stats(disp->mgr, dns_resstatscounter_dispreqtcp);
|
1999-06-18 02:01:42 +00:00
|
|
|
|
2000-09-18 04:50:05 +00:00
|
|
|
LOCK(&qid->lock);
|
2021-05-25 22:54:17 -07:00
|
|
|
ISC_LIST_UNLINK(qid->qid_table[resp->bucket], resp, link);
|
2000-09-18 04:50:05 +00:00
|
|
|
UNLOCK(&qid->lock);
|
2022-11-30 17:58:35 +01:00
|
|
|
resp->state = DNS_DISPATCHSTATE_CANCELED;
|
|
|
|
|
|
|
|
unlock:
|
2021-05-25 22:54:17 -07:00
|
|
|
UNLOCK(&disp->lock);
|
1999-07-09 00:51:08 +00:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
/*
|
2022-12-20 08:39:36 +01:00
|
|
|
* NOTE: Calling the response callback directly from here should be done
|
|
|
|
* asynchronously, as the dns_dispatch_done() is usually called directly
|
|
|
|
* from the response callback, so there's a slight chance that the call
|
|
|
|
* stack will get higher here, but it's mitigated by the ".reading"
|
|
|
|
* flag, so we don't ever go into a loop.
|
2022-11-30 17:58:35 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
tcp_recv_processall(&resps, NULL);
|
2021-01-14 13:02:57 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2022-11-30 17:58:35 +01:00
|
|
|
dispentry_cancel(dns_dispentry_t *resp, isc_result_t result) {
|
|
|
|
REQUIRE(VALID_RESPONSE(resp));
|
|
|
|
REQUIRE(VALID_DISPATCH(resp->disp));
|
|
|
|
|
|
|
|
dns_dispatch_t *disp = resp->disp;
|
|
|
|
|
2021-01-14 13:02:57 -08:00
|
|
|
switch (disp->socktype) {
|
|
|
|
case isc_socktype_udp:
|
2022-11-30 17:58:35 +01:00
|
|
|
udp_dispentry_cancel(resp, result);
|
2021-05-25 22:54:17 -07:00
|
|
|
break;
|
2021-01-14 13:02:57 -08:00
|
|
|
case isc_socktype_tcp:
|
2022-11-30 17:58:35 +01:00
|
|
|
tcp_dispentry_cancel(resp, result);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
2021-10-21 02:28:48 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
void
|
|
|
|
dns_dispatch_done(dns_dispentry_t **respp) {
|
|
|
|
REQUIRE(VALID_RESPONSE(*respp));
|
2021-05-25 22:54:17 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
dns_dispentry_t *resp = *respp;
|
|
|
|
*respp = NULL;
|
2021-08-04 13:14:11 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
dispentry_cancel(resp, ISC_R_CANCELED);
|
|
|
|
dns_dispentry_detach(&resp); /* DISPENTRY000 */
|
|
|
|
}
|
2021-07-26 20:23:18 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
static void
|
|
|
|
udp_startrecv(isc_nmhandle_t *handle, dns_dispentry_t *resp) {
|
|
|
|
REQUIRE(VALID_RESPONSE(resp));
|
|
|
|
|
|
|
|
TIME_NOW(&resp->start);
|
|
|
|
dispentry_log(resp, LVL(90), "attaching handle %p to %p", handle,
|
|
|
|
&resp->handle);
|
|
|
|
isc_nmhandle_attach(handle, &resp->handle);
|
|
|
|
dns_dispentry_ref(resp); /* DISPENTRY003 */
|
|
|
|
dispentry_log(resp, LVL(90), "reading");
|
|
|
|
isc_nm_read(resp->handle, udp_recv, resp);
|
|
|
|
resp->reading = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
tcp_startrecv(isc_nmhandle_t *handle, dns_dispatch_t *disp,
|
|
|
|
dns_dispentry_t *resp) {
|
|
|
|
REQUIRE(VALID_DISPATCH(disp));
|
|
|
|
REQUIRE(disp->socktype == isc_socktype_tcp);
|
|
|
|
|
|
|
|
if (handle != NULL) {
|
|
|
|
isc_nmhandle_attach(handle, &disp->handle);
|
|
|
|
}
|
|
|
|
dns_dispatch_ref(disp); /* DISPATCH002 */
|
|
|
|
if (resp != NULL) {
|
|
|
|
dispentry_log(resp, LVL(90), "reading from %p", disp->handle);
|
|
|
|
} else {
|
|
|
|
dispatch_log(disp, LVL(90),
|
|
|
|
"TCP reading without response from %p",
|
|
|
|
disp->handle);
|
2020-12-19 01:34:41 -08:00
|
|
|
}
|
2022-11-30 17:58:35 +01:00
|
|
|
isc_nm_read(disp->handle, tcp_recv, disp);
|
|
|
|
disp->reading = true;
|
2021-01-14 13:02:57 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2021-08-04 13:14:11 -07:00
|
|
|
tcp_connected(isc_nmhandle_t *handle, isc_result_t eresult, void *arg) {
|
|
|
|
dns_dispatch_t *disp = (dns_dispatch_t *)arg;
|
2022-11-30 17:58:35 +01:00
|
|
|
dns_dispentry_t *resp = NULL;
|
|
|
|
dns_dispentry_t *next = NULL;
|
|
|
|
dns_displist_t resps = ISC_LIST_INITIALIZER;
|
2021-08-04 13:14:11 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
if (isc_log_wouldlog(dns_lctx, 90)) {
|
|
|
|
char localbuf[ISC_SOCKADDR_FORMATSIZE];
|
|
|
|
char peerbuf[ISC_SOCKADDR_FORMATSIZE];
|
|
|
|
if (handle != NULL) {
|
|
|
|
isc_sockaddr_t local = isc_nmhandle_localaddr(handle);
|
|
|
|
isc_sockaddr_t peer = isc_nmhandle_peeraddr(handle);
|
|
|
|
|
|
|
|
isc_sockaddr_format(&local, localbuf,
|
|
|
|
ISC_SOCKADDR_FORMATSIZE);
|
|
|
|
isc_sockaddr_format(&peer, peerbuf,
|
|
|
|
ISC_SOCKADDR_FORMATSIZE);
|
|
|
|
} else {
|
|
|
|
isc_sockaddr_format(&disp->local, localbuf,
|
|
|
|
ISC_SOCKADDR_FORMATSIZE);
|
|
|
|
isc_sockaddr_format(&disp->peer, peerbuf,
|
|
|
|
ISC_SOCKADDR_FORMATSIZE);
|
|
|
|
}
|
2021-08-04 13:14:11 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
dispatch_log(disp, LVL(90), "connected from %s to %s: %s",
|
|
|
|
localbuf, peerbuf, isc_result_totext(eresult));
|
|
|
|
}
|
|
|
|
|
|
|
|
LOCK(&disp->lock);
|
2022-12-19 18:17:46 +01:00
|
|
|
INSIST(disp->state == DNS_DISPATCHSTATE_CONNECTING);
|
2021-08-04 13:14:11 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If there are pending responses, call the connect
|
|
|
|
* callbacks for all of them.
|
|
|
|
*/
|
|
|
|
for (resp = ISC_LIST_HEAD(disp->pending); resp != NULL; resp = next) {
|
|
|
|
next = ISC_LIST_NEXT(resp, plink);
|
|
|
|
ISC_LIST_UNLINK(disp->pending, resp, plink);
|
2022-11-30 17:58:35 +01:00
|
|
|
ISC_LIST_APPEND(resps, resp, rlink);
|
2022-12-20 08:39:36 +01:00
|
|
|
resp->result = eresult;
|
2022-11-30 17:58:35 +01:00
|
|
|
|
2022-12-20 08:39:36 +01:00
|
|
|
if (resp->state == DNS_DISPATCHSTATE_CANCELED) {
|
|
|
|
resp->result = ISC_R_CANCELED;
|
|
|
|
} else if (eresult == ISC_R_SUCCESS) {
|
2022-11-30 17:58:35 +01:00
|
|
|
resp->state = DNS_DISPATCHSTATE_CONNECTED;
|
|
|
|
ISC_LIST_APPEND(disp->active, resp, alink);
|
|
|
|
resp->reading = true;
|
|
|
|
dispentry_log(resp, LVL(90), "start reading");
|
|
|
|
} else {
|
|
|
|
resp->state = DNS_DISPATCHSTATE_NONE;
|
|
|
|
}
|
2021-08-04 13:14:11 -07:00
|
|
|
}
|
2022-12-20 08:39:36 +01:00
|
|
|
|
|
|
|
if (ISC_LIST_EMPTY(disp->active)) {
|
|
|
|
/* All responses have been canceled */
|
|
|
|
disp->state = DNS_DISPATCHSTATE_CANCELED;
|
|
|
|
} else if (eresult == ISC_R_SUCCESS) {
|
|
|
|
disp->state = DNS_DISPATCHSTATE_CONNECTED;
|
|
|
|
tcp_startrecv(handle, disp, resp);
|
|
|
|
} else {
|
|
|
|
disp->state = DNS_DISPATCHSTATE_NONE;
|
|
|
|
}
|
|
|
|
|
2021-08-04 13:14:11 -07:00
|
|
|
UNLOCK(&disp->lock);
|
2021-08-03 18:24:27 -07:00
|
|
|
|
2021-08-04 13:14:11 -07:00
|
|
|
for (resp = ISC_LIST_HEAD(resps); resp != NULL; resp = next) {
|
2022-11-30 17:58:35 +01:00
|
|
|
next = ISC_LIST_NEXT(resp, rlink);
|
|
|
|
ISC_LIST_UNLINK(resps, resp, rlink);
|
2021-08-04 13:14:11 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
dispentry_log(resp, LVL(90), "connect callback: %s",
|
2022-12-20 08:39:36 +01:00
|
|
|
isc_result_totext(resp->result));
|
|
|
|
resp->connected(resp->result, NULL, resp->arg);
|
2022-11-30 17:58:35 +01:00
|
|
|
dns_dispentry_detach(&resp); /* DISPENTRY005 */
|
2021-01-14 13:02:57 -08:00
|
|
|
}
|
2020-12-19 01:34:41 -08:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
dns_dispatch_detach(&disp); /* DISPATCH003 */
|
2021-08-04 13:14:11 -07:00
|
|
|
}
|
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
static void
|
|
|
|
udp_dispatch_connect(dns_dispatch_t *disp, dns_dispentry_t *resp);
|
|
|
|
|
2021-08-04 13:14:11 -07:00
|
|
|
static void
|
|
|
|
udp_connected(isc_nmhandle_t *handle, isc_result_t eresult, void *arg) {
|
|
|
|
dns_dispentry_t *resp = (dns_dispentry_t *)arg;
|
|
|
|
dns_dispatch_t *disp = resp->disp;
|
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
dispentry_log(resp, LVL(90), "connected: %s",
|
|
|
|
isc_result_totext(eresult));
|
2021-08-04 13:14:11 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
LOCK(&disp->lock);
|
|
|
|
|
|
|
|
switch (resp->state) {
|
|
|
|
case DNS_DISPATCHSTATE_CANCELED:
|
2022-12-20 08:39:36 +01:00
|
|
|
eresult = ISC_R_CANCELED;
|
|
|
|
ISC_LIST_UNLINK(disp->pending, resp, plink);
|
|
|
|
goto unlock;
|
2022-11-30 17:58:35 +01:00
|
|
|
case DNS_DISPATCHSTATE_CONNECTING:
|
|
|
|
ISC_LIST_UNLINK(disp->pending, resp, plink);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (eresult) {
|
2022-12-20 08:39:36 +01:00
|
|
|
case ISC_R_CANCELED:
|
|
|
|
break;
|
2022-11-30 17:58:35 +01:00
|
|
|
case ISC_R_SUCCESS:
|
|
|
|
resp->state = DNS_DISPATCHSTATE_CONNECTED;
|
|
|
|
udp_startrecv(handle, resp);
|
|
|
|
break;
|
|
|
|
case ISC_R_ADDRINUSE: {
|
2021-08-04 13:14:11 -07:00
|
|
|
in_port_t localport = 0;
|
|
|
|
isc_result_t result;
|
|
|
|
|
|
|
|
/* probably a port collision; try a different one */
|
|
|
|
result = setup_socket(disp, resp, &resp->peer, &localport);
|
|
|
|
if (result == ISC_R_SUCCESS) {
|
2022-11-30 17:58:35 +01:00
|
|
|
UNLOCK(&disp->lock);
|
|
|
|
udp_dispatch_connect(disp, resp);
|
2021-08-04 13:14:11 -07:00
|
|
|
goto detach;
|
|
|
|
}
|
2022-11-30 17:58:35 +01:00
|
|
|
resp->state = DNS_DISPATCHSTATE_NONE;
|
|
|
|
break;
|
2021-01-14 13:02:57 -08:00
|
|
|
}
|
2022-11-30 17:58:35 +01:00
|
|
|
default:
|
|
|
|
resp->state = DNS_DISPATCHSTATE_NONE;
|
|
|
|
break;
|
2021-08-03 18:24:27 -07:00
|
|
|
}
|
2022-12-20 08:39:36 +01:00
|
|
|
unlock:
|
2022-11-30 17:58:35 +01:00
|
|
|
UNLOCK(&disp->lock);
|
|
|
|
|
|
|
|
dispentry_log(resp, LVL(90), "connect callback: %s",
|
|
|
|
isc_result_totext(eresult));
|
|
|
|
resp->connected(eresult, NULL, resp->arg);
|
|
|
|
|
2021-08-04 13:14:11 -07:00
|
|
|
detach:
|
2022-11-30 17:58:35 +01:00
|
|
|
dns_dispentry_detach(&resp); /* DISPENTRY004 */
|
2020-12-19 01:34:41 -08:00
|
|
|
}
|
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
static void
|
|
|
|
udp_dispatch_connect(dns_dispatch_t *disp, dns_dispentry_t *resp) {
|
|
|
|
LOCK(&disp->lock);
|
|
|
|
resp->state = DNS_DISPATCHSTATE_CONNECTING;
|
|
|
|
dns_dispentry_ref(resp); /* DISPENTRY004 */
|
|
|
|
ISC_LIST_APPEND(disp->pending, resp, plink);
|
|
|
|
UNLOCK(&disp->lock);
|
|
|
|
isc_nm_udpconnect(disp->mgr->nm, &resp->local, &resp->peer,
|
|
|
|
udp_connected, resp, resp->timeout);
|
|
|
|
}
|
|
|
|
|
|
|
|
static isc_result_t
|
|
|
|
tcp_dispatch_connect(dns_dispatch_t *disp, dns_dispentry_t *resp) {
|
|
|
|
dns_transport_type_t transport_type = DNS_TRANSPORT_TCP;
|
2022-09-19 11:04:22 +00:00
|
|
|
isc_tlsctx_t *tlsctx = NULL;
|
|
|
|
isc_tlsctx_client_session_cache_t *sess_cache = NULL;
|
2021-01-04 23:03:50 -08:00
|
|
|
|
2022-09-19 11:04:22 +00:00
|
|
|
if (resp->transport != NULL) {
|
|
|
|
transport_type = dns_transport_get_type(resp->transport);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (transport_type == DNS_TRANSPORT_TLS) {
|
|
|
|
isc_result_t result;
|
|
|
|
|
|
|
|
result = dns_transport_get_tlsctx(
|
|
|
|
resp->transport, &resp->peer, resp->tlsctx_cache,
|
|
|
|
resp->disp->mgr->mctx, &tlsctx, &sess_cache);
|
|
|
|
|
|
|
|
if (result != ISC_R_SUCCESS) {
|
|
|
|
return (result);
|
|
|
|
}
|
|
|
|
INSIST(tlsctx != NULL);
|
|
|
|
}
|
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
/* Check whether the dispatch is already connecting or connected. */
|
|
|
|
LOCK(&disp->lock);
|
|
|
|
switch (disp->state) {
|
|
|
|
case DNS_DISPATCHSTATE_NONE:
|
|
|
|
/* First connection, continue with connecting */
|
|
|
|
disp->state = DNS_DISPATCHSTATE_CONNECTING;
|
|
|
|
resp->state = DNS_DISPATCHSTATE_CONNECTING;
|
|
|
|
dns_dispentry_ref(resp); /* DISPENTRY005 */
|
|
|
|
ISC_LIST_APPEND(disp->pending, resp, plink);
|
|
|
|
UNLOCK(&disp->lock);
|
2021-05-25 22:54:17 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
char localbuf[ISC_SOCKADDR_FORMATSIZE];
|
|
|
|
char peerbuf[ISC_SOCKADDR_FORMATSIZE];
|
2021-08-04 13:14:11 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
isc_sockaddr_format(&disp->local, localbuf,
|
|
|
|
ISC_SOCKADDR_FORMATSIZE);
|
|
|
|
isc_sockaddr_format(&disp->peer, peerbuf,
|
|
|
|
ISC_SOCKADDR_FORMATSIZE);
|
2021-08-04 13:14:11 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
dns_dispatch_ref(disp); /* DISPATCH003 */
|
|
|
|
dispentry_log(resp, LVL(90),
|
|
|
|
"connecting from %s to %s, timeout %u", localbuf,
|
|
|
|
peerbuf, resp->timeout);
|
2021-08-04 13:14:11 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
if (transport_type == DNS_TRANSPORT_TLS) {
|
2022-08-05 19:43:34 +03:00
|
|
|
isc_nm_streamdnsconnect(disp->mgr->nm, &disp->local,
|
|
|
|
&disp->peer, tcp_connected,
|
|
|
|
disp, resp->timeout, tlsctx,
|
|
|
|
sess_cache);
|
2022-11-30 17:58:35 +01:00
|
|
|
} else {
|
2022-08-05 19:43:34 +03:00
|
|
|
isc_nm_streamdnsconnect(
|
|
|
|
disp->mgr->nm, &disp->local, &disp->peer,
|
|
|
|
tcp_connected, disp, resp->timeout, NULL, NULL);
|
2021-08-03 18:24:27 -07:00
|
|
|
}
|
2022-11-30 17:58:35 +01:00
|
|
|
break;
|
2021-08-03 18:24:27 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
case DNS_DISPATCHSTATE_CONNECTING:
|
|
|
|
/* Connection pending; add resp to the list */
|
|
|
|
resp->state = DNS_DISPATCHSTATE_CONNECTING;
|
|
|
|
dns_dispentry_ref(resp); /* DISPENTRY005 */
|
|
|
|
ISC_LIST_APPEND(disp->pending, resp, plink);
|
|
|
|
UNLOCK(&disp->lock);
|
2021-01-14 13:02:57 -08:00
|
|
|
break;
|
2021-08-04 13:14:11 -07:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
case DNS_DISPATCHSTATE_CONNECTED:
|
|
|
|
resp->state = DNS_DISPATCHSTATE_CONNECTED;
|
|
|
|
|
|
|
|
/* Add the resp to the reading list */
|
|
|
|
ISC_LIST_APPEND(disp->active, resp, alink);
|
|
|
|
dispentry_log(resp, LVL(90), "already connected; attaching");
|
|
|
|
resp->reading = true;
|
|
|
|
|
|
|
|
if (!disp->reading) {
|
|
|
|
/* Restart the reading */
|
|
|
|
tcp_startrecv(NULL, disp, resp);
|
|
|
|
}
|
|
|
|
|
|
|
|
UNLOCK(&disp->lock);
|
|
|
|
/* We are already connected; call the connected cb */
|
|
|
|
dispentry_log(resp, LVL(90), "connect callback: %s",
|
|
|
|
isc_result_totext(ISC_R_SUCCESS));
|
|
|
|
resp->connected(ISC_R_SUCCESS, NULL, resp->arg);
|
2021-01-14 13:02:57 -08:00
|
|
|
break;
|
2021-08-04 13:14:11 -07:00
|
|
|
|
2021-01-14 13:02:57 -08:00
|
|
|
default:
|
2022-11-30 17:58:35 +01:00
|
|
|
UNREACHABLE();
|
2021-01-14 13:02:57 -08:00
|
|
|
}
|
2021-01-04 23:03:50 -08:00
|
|
|
|
2021-01-14 13:02:57 -08:00
|
|
|
return (ISC_R_SUCCESS);
|
|
|
|
}
|
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
isc_result_t
|
|
|
|
dns_dispatch_connect(dns_dispentry_t *resp) {
|
|
|
|
REQUIRE(VALID_RESPONSE(resp));
|
|
|
|
REQUIRE(VALID_DISPATCH(resp->disp));
|
|
|
|
|
|
|
|
dns_dispatch_t *disp = resp->disp;
|
|
|
|
|
|
|
|
switch (disp->socktype) {
|
|
|
|
case isc_socktype_tcp:
|
|
|
|
return (tcp_dispatch_connect(disp, resp));
|
|
|
|
|
|
|
|
case isc_socktype_udp:
|
|
|
|
udp_dispatch_connect(disp, resp);
|
|
|
|
return (ISC_R_SUCCESS);
|
|
|
|
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-25 22:54:17 -07:00
|
|
|
static void
|
|
|
|
send_done(isc_nmhandle_t *handle, isc_result_t result, void *cbarg) {
|
|
|
|
dns_dispentry_t *resp = (dns_dispentry_t *)cbarg;
|
|
|
|
|
|
|
|
REQUIRE(VALID_RESPONSE(resp));
|
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
dns_dispatch_t *disp = resp->disp;
|
|
|
|
|
|
|
|
REQUIRE(VALID_DISPATCH(disp));
|
|
|
|
|
|
|
|
dispentry_log(resp, LVL(90), "sent: %s", isc_result_totext(result));
|
|
|
|
|
2021-08-03 15:27:06 +02:00
|
|
|
resp->sent(result, NULL, resp->arg);
|
2021-05-25 22:54:17 -07:00
|
|
|
|
|
|
|
if (result != ISC_R_SUCCESS) {
|
2022-11-30 17:58:35 +01:00
|
|
|
dispentry_cancel(resp, result);
|
2021-05-25 22:54:17 -07:00
|
|
|
}
|
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
dns_dispentry_detach(&resp); /* DISPENTRY007 */
|
|
|
|
isc_nmhandle_detach(&handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
tcp_dispatch_getnext(dns_dispatch_t *disp, dns_dispentry_t *resp,
|
|
|
|
int32_t timeout) {
|
|
|
|
REQUIRE(timeout <= INT16_MAX);
|
|
|
|
|
|
|
|
if (disp->reading) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (timeout > 0) {
|
|
|
|
isc_nmhandle_settimeout(disp->handle, timeout);
|
|
|
|
}
|
|
|
|
|
|
|
|
dispentry_log(resp, LVL(90), "continue reading");
|
|
|
|
|
|
|
|
dns_dispatch_ref(disp); /* DISPATCH002 */
|
|
|
|
isc_nm_read(disp->handle, tcp_recv, disp);
|
|
|
|
disp->reading = true;
|
|
|
|
|
|
|
|
ISC_LIST_APPEND(disp->active, resp, alink);
|
|
|
|
resp->reading = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
udp_dispatch_getnext(dns_dispentry_t *resp, int32_t timeout) {
|
|
|
|
REQUIRE(timeout <= INT16_MAX);
|
|
|
|
|
|
|
|
if (resp->reading) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (timeout > 0) {
|
|
|
|
isc_nmhandle_settimeout(resp->handle, timeout);
|
|
|
|
}
|
|
|
|
|
|
|
|
dispentry_log(resp, LVL(90), "continue reading");
|
|
|
|
|
|
|
|
dns_dispentry_ref(resp); /* DISPENTRY003 */
|
|
|
|
isc_nm_read(resp->handle, udp_recv, resp);
|
|
|
|
resp->reading = true;
|
2021-05-25 22:54:17 -07:00
|
|
|
}
|
|
|
|
|
2021-08-03 15:27:06 +02:00
|
|
|
void
|
2021-08-03 18:24:27 -07:00
|
|
|
dns_dispatch_resume(dns_dispentry_t *resp, uint16_t timeout) {
|
|
|
|
REQUIRE(VALID_RESPONSE(resp));
|
2022-11-30 17:58:35 +01:00
|
|
|
REQUIRE(VALID_DISPATCH(resp->disp));
|
2021-08-03 15:27:06 +02:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
dns_dispatch_t *disp = resp->disp;
|
2021-08-03 15:27:06 +02:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
LOCK(&disp->lock);
|
|
|
|
switch (disp->socktype) {
|
|
|
|
case isc_socktype_udp: {
|
|
|
|
udp_dispatch_getnext(resp, timeout);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case isc_socktype_tcp:
|
|
|
|
INSIST(disp->timedout > 0);
|
|
|
|
disp->timedout--;
|
|
|
|
tcp_dispatch_getnext(disp, resp, timeout);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
2021-08-03 15:27:06 +02:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
UNLOCK(&disp->lock);
|
2021-08-03 15:27:06 +02:00
|
|
|
}
|
|
|
|
|
2021-01-14 13:02:57 -08:00
|
|
|
void
|
|
|
|
dns_dispatch_send(dns_dispentry_t *resp, isc_region_t *r, isc_dscp_t dscp) {
|
|
|
|
REQUIRE(VALID_RESPONSE(resp));
|
2022-11-30 17:58:35 +01:00
|
|
|
REQUIRE(VALID_DISPATCH(resp->disp));
|
2021-01-14 13:02:57 -08:00
|
|
|
UNUSED(dscp);
|
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
dns_dispatch_t *disp = resp->disp;
|
|
|
|
isc_nmhandle_t *sendhandle = NULL;
|
|
|
|
|
2021-01-14 13:02:57 -08:00
|
|
|
#if 0
|
|
|
|
/* XXX: no DSCP support */
|
2021-01-04 23:03:50 -08:00
|
|
|
if (dscp == -1) {
|
|
|
|
sendevent->attributes &= ~ISC_SOCKEVENTATTR_DSCP;
|
|
|
|
sendevent->dscp = 0;
|
|
|
|
} else {
|
|
|
|
sendevent->attributes |= ISC_SOCKEVENTATTR_DSCP;
|
|
|
|
sendevent->dscp = dscp;
|
|
|
|
if (tcp) {
|
|
|
|
isc_socket_dscp(sock, dscp);
|
|
|
|
}
|
|
|
|
}
|
2021-01-14 13:02:57 -08:00
|
|
|
#endif
|
2021-01-04 23:03:50 -08:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
dispentry_log(resp, LVL(90), "sending");
|
|
|
|
switch (disp->socktype) {
|
|
|
|
case isc_socktype_udp:
|
|
|
|
isc_nmhandle_attach(resp->handle, &sendhandle);
|
|
|
|
break;
|
|
|
|
case isc_socktype_tcp:
|
|
|
|
isc_nmhandle_attach(disp->handle, &sendhandle);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
2021-08-03 18:24:27 -07:00
|
|
|
}
|
2022-11-30 17:58:35 +01:00
|
|
|
dns_dispentry_ref(resp); /* DISPENTRY007 */
|
|
|
|
isc_nm_send(sendhandle, r, send_done, resp);
|
2021-01-04 23:03:50 -08:00
|
|
|
}
|
|
|
|
|
2001-03-13 05:48:41 +00:00
|
|
|
isc_result_t
|
|
|
|
dns_dispatch_getlocaladdress(dns_dispatch_t *disp, isc_sockaddr_t *addrp) {
|
|
|
|
REQUIRE(VALID_DISPATCH(disp));
|
|
|
|
REQUIRE(addrp != NULL);
|
|
|
|
|
2021-01-14 13:02:57 -08:00
|
|
|
if (disp->socktype == isc_socktype_udp) {
|
2001-03-13 05:48:41 +00:00
|
|
|
*addrp = disp->local;
|
|
|
|
return (ISC_R_SUCCESS);
|
|
|
|
}
|
|
|
|
return (ISC_R_NOTIMPLEMENTED);
|
|
|
|
}
|
|
|
|
|
2021-01-04 14:38:35 -08:00
|
|
|
isc_result_t
|
|
|
|
dns_dispentry_getlocaladdress(dns_dispentry_t *resp, isc_sockaddr_t *addrp) {
|
|
|
|
REQUIRE(VALID_RESPONSE(resp));
|
2022-11-30 17:58:35 +01:00
|
|
|
REQUIRE(VALID_DISPATCH(resp->disp));
|
2021-01-04 14:38:35 -08:00
|
|
|
REQUIRE(addrp != NULL);
|
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
dns_dispatch_t *disp = resp->disp;
|
2021-01-04 14:38:35 -08:00
|
|
|
|
2022-11-30 17:58:35 +01:00
|
|
|
switch (disp->socktype) {
|
|
|
|
case isc_socktype_tcp:
|
|
|
|
*addrp = disp->local;
|
|
|
|
return (ISC_R_SUCCESS);
|
|
|
|
case isc_socktype_udp:
|
2021-05-25 22:54:17 -07:00
|
|
|
*addrp = isc_nmhandle_localaddr(resp->handle);
|
2021-01-14 13:02:57 -08:00
|
|
|
return (ISC_R_SUCCESS);
|
2022-11-30 17:58:35 +01:00
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
2021-01-04 14:38:35 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-04-27 16:07:24 -07:00
|
|
|
dns_dispatch_t *
|
|
|
|
dns_dispatchset_get(dns_dispatchset_t *dset) {
|
2020-12-09 19:44:41 -08:00
|
|
|
dns_dispatch_t *disp = NULL;
|
2012-04-27 16:07:24 -07:00
|
|
|
|
|
|
|
/* check that dispatch set is configured */
|
|
|
|
if (dset == NULL || dset->ndisp == 0) {
|
|
|
|
return (NULL);
|
2020-02-13 21:48:23 +01:00
|
|
|
}
|
2012-04-27 16:07:24 -07:00
|
|
|
|
|
|
|
LOCK(&dset->lock);
|
|
|
|
disp = dset->dispatches[dset->cur];
|
|
|
|
dset->cur++;
|
|
|
|
if (dset->cur == dset->ndisp) {
|
|
|
|
dset->cur = 0;
|
2020-02-13 21:48:23 +01:00
|
|
|
}
|
2012-04-27 16:07:24 -07:00
|
|
|
UNLOCK(&dset->lock);
|
|
|
|
|
|
|
|
return (disp);
|
|
|
|
}
|
|
|
|
|
|
|
|
isc_result_t
|
2021-07-26 20:23:18 -07:00
|
|
|
dns_dispatchset_create(isc_mem_t *mctx, dns_dispatch_t *source,
|
|
|
|
dns_dispatchset_t **dsetp, int n) {
|
2012-04-27 16:07:24 -07:00
|
|
|
isc_result_t result;
|
2020-12-09 19:44:41 -08:00
|
|
|
dns_dispatchset_t *dset = NULL;
|
|
|
|
dns_dispatchmgr_t *mgr = NULL;
|
2012-04-27 16:07:24 -07:00
|
|
|
int i, j;
|
|
|
|
|
|
|
|
REQUIRE(VALID_DISPATCH(source));
|
2021-08-03 18:24:27 -07:00
|
|
|
REQUIRE(source->socktype == isc_socktype_udp);
|
2012-04-27 16:07:24 -07:00
|
|
|
REQUIRE(dsetp != NULL && *dsetp == NULL);
|
|
|
|
|
|
|
|
mgr = source->mgr;
|
|
|
|
|
|
|
|
dset = isc_mem_get(mctx, sizeof(dns_dispatchset_t));
|
2020-12-09 19:44:41 -08:00
|
|
|
*dset = (dns_dispatchset_t){ .ndisp = n };
|
2012-04-27 16:07:24 -07:00
|
|
|
|
2018-11-16 15:33:22 +01:00
|
|
|
isc_mutex_init(&dset->lock);
|
2012-04-27 16:07:24 -07:00
|
|
|
|
|
|
|
dset->dispatches = isc_mem_get(mctx, sizeof(dns_dispatch_t *) * n);
|
|
|
|
|
|
|
|
isc_mem_attach(mctx, &dset->mctx);
|
|
|
|
|
|
|
|
dset->dispatches[0] = NULL;
|
2022-11-30 17:58:35 +01:00
|
|
|
dns_dispatch_attach(source, &dset->dispatches[0]); /* DISPATCH004 */
|
2012-04-27 16:07:24 -07:00
|
|
|
|
|
|
|
LOCK(&mgr->lock);
|
|
|
|
for (i = 1; i < n; i++) {
|
|
|
|
dset->dispatches[i] = NULL;
|
2021-07-26 20:23:18 -07:00
|
|
|
result = dispatch_createudp(mgr, &source->local,
|
2020-12-16 01:32:06 -08:00
|
|
|
&dset->dispatches[i]);
|
2012-04-27 16:07:24 -07:00
|
|
|
if (result != ISC_R_SUCCESS) {
|
|
|
|
goto fail;
|
2020-02-13 21:48:23 +01:00
|
|
|
}
|
2012-04-27 16:07:24 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
UNLOCK(&mgr->lock);
|
|
|
|
*dsetp = dset;
|
|
|
|
|
|
|
|
return (ISC_R_SUCCESS);
|
|
|
|
|
|
|
|
fail:
|
|
|
|
UNLOCK(&mgr->lock);
|
2012-04-28 23:45:42 +00:00
|
|
|
|
2012-04-27 16:07:24 -07:00
|
|
|
for (j = 0; j < i; j++) {
|
2022-11-30 17:58:35 +01:00
|
|
|
dns_dispatch_detach(&(dset->dispatches[j])); /* DISPATCH004 */
|
2020-02-13 21:48:23 +01:00
|
|
|
}
|
2012-04-27 16:07:24 -07:00
|
|
|
isc_mem_put(mctx, dset->dispatches, sizeof(dns_dispatch_t *) * n);
|
|
|
|
if (dset->mctx == mctx) {
|
|
|
|
isc_mem_detach(&dset->mctx);
|
2020-02-13 21:48:23 +01:00
|
|
|
}
|
2012-04-27 16:07:24 -07:00
|
|
|
|
2018-11-19 10:31:09 +00:00
|
|
|
isc_mutex_destroy(&dset->lock);
|
2012-04-27 16:07:24 -07:00
|
|
|
isc_mem_put(mctx, dset, sizeof(dns_dispatchset_t));
|
|
|
|
return (result);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
dns_dispatchset_destroy(dns_dispatchset_t **dsetp) {
|
2020-12-09 19:44:41 -08:00
|
|
|
dns_dispatchset_t *dset = NULL;
|
2012-04-27 16:07:24 -07:00
|
|
|
int i;
|
|
|
|
|
|
|
|
REQUIRE(dsetp != NULL && *dsetp != NULL);
|
|
|
|
|
|
|
|
dset = *dsetp;
|
2020-02-08 04:37:54 -08:00
|
|
|
*dsetp = NULL;
|
2012-04-27 16:07:24 -07:00
|
|
|
for (i = 0; i < dset->ndisp; i++) {
|
2022-11-30 17:58:35 +01:00
|
|
|
dns_dispatch_detach(&(dset->dispatches[i])); /* DISPATCH004 */
|
2020-02-13 21:48:23 +01:00
|
|
|
}
|
2012-04-27 16:07:24 -07:00
|
|
|
isc_mem_put(dset->mctx, dset->dispatches,
|
|
|
|
sizeof(dns_dispatch_t *) * dset->ndisp);
|
2018-11-19 10:31:09 +00:00
|
|
|
isc_mutex_destroy(&dset->lock);
|
2012-04-27 16:07:24 -07:00
|
|
|
isc_mem_putanddetach(&dset->mctx, dset, sizeof(dns_dispatchset_t));
|
|
|
|
}
|