2019-11-05 13:55:54 -08:00
|
|
|
/*
|
|
|
|
* Copyright (C) Internet Systems Consortium, Inc. ("ISC")
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: MPL-2.0
|
2021-06-03 08:37:05 +02:00
|
|
|
*
|
2019-11-05 13:55:54 -08:00
|
|
|
* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
* file, you can obtain one at https://mozilla.org/MPL/2.0/.
|
|
|
|
*
|
|
|
|
* See the COPYRIGHT file distributed with this work for additional
|
|
|
|
* information regarding copyright ownership.
|
|
|
|
*/
|
|
|
|
|
2019-11-25 09:10:29 +01:00
|
|
|
#pragma once
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
#include <unistd.h>
|
|
|
|
|
2020-05-13 17:37:51 +02:00
|
|
|
#include <openssl/err.h>
|
|
|
|
#include <openssl/ssl.h>
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
#include <isc/atomic.h>
|
2021-05-05 11:51:39 +02:00
|
|
|
#include <isc/barrier.h>
|
2019-11-05 13:55:54 -08:00
|
|
|
#include <isc/buffer.h>
|
|
|
|
#include <isc/condition.h>
|
2022-06-20 20:30:12 +03:00
|
|
|
#include <isc/dnsstream.h>
|
2019-11-05 13:55:54 -08:00
|
|
|
#include <isc/magic.h>
|
|
|
|
#include <isc/mem.h>
|
|
|
|
#include <isc/netmgr.h>
|
2023-03-16 12:50:04 +02:00
|
|
|
#include <isc/proxy2.h>
|
2020-03-24 13:38:51 +01:00
|
|
|
#include <isc/quota.h>
|
2019-11-05 13:55:54 -08:00
|
|
|
#include <isc/random.h>
|
|
|
|
#include <isc/refcount.h>
|
|
|
|
#include <isc/region.h>
|
|
|
|
#include <isc/result.h>
|
|
|
|
#include <isc/sockaddr.h>
|
2020-01-05 01:02:12 -08:00
|
|
|
#include <isc/stats.h>
|
2019-11-05 13:55:54 -08:00
|
|
|
#include <isc/thread.h>
|
2022-07-26 13:03:45 +02:00
|
|
|
#include <isc/tid.h>
|
2024-07-02 16:16:59 +03:00
|
|
|
#include <isc/time.h>
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
#include <isc/tls.h>
|
2019-11-05 13:55:54 -08:00
|
|
|
#include <isc/util.h>
|
2022-04-27 17:41:47 +02:00
|
|
|
#include <isc/uv.h>
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
#include "../loop_p.h"
|
|
|
|
|
|
|
|
#define ISC_NETMGR_TID_UNKNOWN -1
|
Refactor taskmgr to run on top of netmgr
This commit changes the taskmgr to run the individual tasks on the
netmgr internal workers. While an effort has been put into keeping the
taskmgr interface intact, couple of changes have been made:
* The taskmgr has no concept of universal privileged mode - rather the
tasks are either privileged or unprivileged (normal). The privileged
tasks are run as a first thing when the netmgr is unpaused. There
are now four different queues in in the netmgr:
1. priority queue - netievent on the priority queue are run even when
the taskmgr enter exclusive mode and netmgr is paused. This is
needed to properly start listening on the interfaces, free
resources and resume.
2. privileged task queue - only privileged tasks are queued here and
this is the first queue that gets processed when network manager
is unpaused using isc_nm_resume(). All netmgr workers need to
clean the privileged task queue before they all proceed normal
operation. Both task queues are processed when the workers are
finished.
3. task queue - only (traditional) task are scheduled here and this
queue along with privileged task queues are process when the
netmgr workers are finishing. This is needed to process the task
shutdown events.
4. normal queue - this is the queue with netmgr events, e.g. reading,
sending, callbacks and pretty much everything is processed here.
* The isc_taskmgr_create() now requires initialized netmgr (isc_nm_t)
object.
* The isc_nm_destroy() function now waits for indefinite time, but it
will print out the active objects when in tracing mode
(-DNETMGR_TRACE=1 and -DNETMGR_TRACE_VERBOSE=1), the netmgr has been
made a little bit more asynchronous and it might take longer time to
shutdown all the active networking connections.
* Previously, the isc_nm_stoplistening() was a synchronous operation.
This has been changed and the isc_nm_stoplistening() just schedules
the child sockets to stop listening and exits. This was needed to
prevent a deadlock as the the (traditional) tasks are now executed on
the netmgr threads.
* The socket selection logic in isc__nm_udp_send() was flawed, but
fortunatelly, it was broken, so we never hit the problem where we
created uvreq_t on a socket from nmhandle_t, but then a different
socket could be picked up and then we were trying to run the send
callback on a socket that had different threadid than currently
running.
2021-04-09 11:31:19 +02:00
|
|
|
|
Fix the UDP recvmmsg support
Previously, the netmgr/udp.c tried to detect the recvmmsg detection in
libuv with #ifdef UV_UDP_<foo> preprocessor macros. However, because
the UV_UDP_<foo> are not preprocessor macros, but enum members, the
detection didn't work. Because the detection didn't work, the code
didn't have access to the information when we received the final chunk
of the recvmmsg and tried to free the uvbuf every time. Fortunately,
the isc__nm_free_uvbuf() had a kludge that detected attempt to free in
the middle of the receive buffer, so the code worked.
However, libuv 1.37.0 changed the way the recvmmsg was enabled from
implicit to explicit, and we checked for yet another enum member
presence with preprocessor macro, so in fact libuv recvmmsg support was
never enabled with libuv >= 1.37.0.
This commit changes to the preprocessor macros to autoconf checks for
declaration, so the detection now works again. On top of that, it's now
possible to cleanup the alloc_cb and free_uvbuf functions because now,
the information whether we can or cannot free the buffer is available to
us.
2022-01-11 12:14:23 +01:00
|
|
|
/*
|
|
|
|
* Receive buffers
|
|
|
|
*/
|
|
|
|
#if HAVE_DECL_UV_UDP_MMSG_CHUNK
|
|
|
|
/*
|
|
|
|
* The value 20 here is UV__MMSG_MAXWIDTH taken from the current libuv source,
|
|
|
|
* libuv will not receive more that 20 datagrams in a single recvmmsg call.
|
|
|
|
*/
|
|
|
|
#define ISC_NETMGR_UDP_RECVBUF_SIZE (20 * UINT16_MAX)
|
|
|
|
#else
|
|
|
|
/*
|
|
|
|
* A single DNS message size
|
|
|
|
*/
|
|
|
|
#define ISC_NETMGR_UDP_RECVBUF_SIZE UINT16_MAX
|
|
|
|
#endif
|
2021-03-18 09:27:38 +01:00
|
|
|
|
2020-01-29 13:16:04 +01:00
|
|
|
/*
|
2024-01-18 17:24:22 +01:00
|
|
|
* The TCP send and receive buffers can fit one maximum sized DNS message plus
|
|
|
|
* its size, the receive buffer here affects TCP, DoT and DoH.
|
2020-01-29 13:16:04 +01:00
|
|
|
*/
|
2024-01-18 17:24:22 +01:00
|
|
|
#define ISC_NETMGR_TCP_SENDBUF_SIZE (sizeof(uint16_t) + UINT16_MAX)
|
Fix the UDP recvmmsg support
Previously, the netmgr/udp.c tried to detect the recvmmsg detection in
libuv with #ifdef UV_UDP_<foo> preprocessor macros. However, because
the UV_UDP_<foo> are not preprocessor macros, but enum members, the
detection didn't work. Because the detection didn't work, the code
didn't have access to the information when we received the final chunk
of the recvmmsg and tried to free the uvbuf every time. Fortunately,
the isc__nm_free_uvbuf() had a kludge that detected attempt to free in
the middle of the receive buffer, so the code worked.
However, libuv 1.37.0 changed the way the recvmmsg was enabled from
implicit to explicit, and we checked for yet another enum member
presence with preprocessor macro, so in fact libuv recvmmsg support was
never enabled with libuv >= 1.37.0.
This commit changes to the preprocessor macros to autoconf checks for
declaration, so the detection now works again. On top of that, it's now
possible to cleanup the alloc_cb and free_uvbuf functions because now,
the information whether we can or cannot free the buffer is available to
us.
2022-01-11 12:14:23 +01:00
|
|
|
#define ISC_NETMGR_TCP_RECVBUF_SIZE (sizeof(uint16_t) + UINT16_MAX)
|
2020-01-29 13:16:04 +01:00
|
|
|
|
Fix the UDP recvmmsg support
Previously, the netmgr/udp.c tried to detect the recvmmsg detection in
libuv with #ifdef UV_UDP_<foo> preprocessor macros. However, because
the UV_UDP_<foo> are not preprocessor macros, but enum members, the
detection didn't work. Because the detection didn't work, the code
didn't have access to the information when we received the final chunk
of the recvmmsg and tried to free the uvbuf every time. Fortunately,
the isc__nm_free_uvbuf() had a kludge that detected attempt to free in
the middle of the receive buffer, so the code worked.
However, libuv 1.37.0 changed the way the recvmmsg was enabled from
implicit to explicit, and we checked for yet another enum member
presence with preprocessor macro, so in fact libuv recvmmsg support was
never enabled with libuv >= 1.37.0.
This commit changes to the preprocessor macros to autoconf checks for
declaration, so the detection now works again. On top of that, it's now
possible to cleanup the alloc_cb and free_uvbuf functions because now,
the information whether we can or cannot free the buffer is available to
us.
2022-01-11 12:14:23 +01:00
|
|
|
/* Pick the larger buffer */
|
|
|
|
#define ISC_NETMGR_RECVBUF_SIZE \
|
|
|
|
(ISC_NETMGR_UDP_RECVBUF_SIZE >= ISC_NETMGR_TCP_RECVBUF_SIZE \
|
|
|
|
? ISC_NETMGR_UDP_RECVBUF_SIZE \
|
|
|
|
: ISC_NETMGR_TCP_RECVBUF_SIZE)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure our RECVBUF size is large enough
|
|
|
|
*/
|
|
|
|
|
|
|
|
STATIC_ASSERT(ISC_NETMGR_UDP_RECVBUF_SIZE <= ISC_NETMGR_RECVBUF_SIZE,
|
|
|
|
"UDP receive buffer size must be smaller or equal than worker "
|
|
|
|
"receive buffer size");
|
|
|
|
|
|
|
|
STATIC_ASSERT(ISC_NETMGR_TCP_RECVBUF_SIZE <= ISC_NETMGR_RECVBUF_SIZE,
|
|
|
|
"TCP receive buffer size must be smaller or equal than worker "
|
|
|
|
"receive buffer size");
|
|
|
|
|
2024-06-04 09:12:45 +02:00
|
|
|
/*%
|
|
|
|
* Maximum outstanding DNS message that we process in a single TCP read.
|
|
|
|
*/
|
|
|
|
#define ISC_NETMGR_MAX_STREAM_CLIENTS_PER_CONN 23
|
|
|
|
|
2020-12-17 11:40:29 +01:00
|
|
|
/*%
|
|
|
|
* Regular TCP buffer size.
|
|
|
|
*/
|
|
|
|
#define NM_REG_BUF 4096
|
|
|
|
|
|
|
|
/*%
|
|
|
|
* Larger buffer for when the regular one isn't enough; this will
|
|
|
|
* hold two full DNS packets with lengths. netmgr receives 64k at
|
|
|
|
* most in TCPDNS or TLSDNS connections, so there's no risk of overrun
|
|
|
|
* when using a buffer this size.
|
|
|
|
*/
|
Fix the UDP recvmmsg support
Previously, the netmgr/udp.c tried to detect the recvmmsg detection in
libuv with #ifdef UV_UDP_<foo> preprocessor macros. However, because
the UV_UDP_<foo> are not preprocessor macros, but enum members, the
detection didn't work. Because the detection didn't work, the code
didn't have access to the information when we received the final chunk
of the recvmmsg and tried to free the uvbuf every time. Fortunately,
the isc__nm_free_uvbuf() had a kludge that detected attempt to free in
the middle of the receive buffer, so the code worked.
However, libuv 1.37.0 changed the way the recvmmsg was enabled from
implicit to explicit, and we checked for yet another enum member
presence with preprocessor macro, so in fact libuv recvmmsg support was
never enabled with libuv >= 1.37.0.
This commit changes to the preprocessor macros to autoconf checks for
declaration, so the detection now works again. On top of that, it's now
possible to cleanup the alloc_cb and free_uvbuf functions because now,
the information whether we can or cannot free the buffer is available to
us.
2022-01-11 12:14:23 +01:00
|
|
|
#define NM_BIG_BUF ISC_NETMGR_TCP_RECVBUF_SIZE * 2
|
2020-12-17 11:40:29 +01:00
|
|
|
|
2021-10-05 22:30:55 +02:00
|
|
|
/*%
|
|
|
|
* Maximum segment size (MSS) of TCP socket on which the server responds to
|
|
|
|
* queries. Value lower than common MSS on Ethernet (1220, that is 1280 (IPv6
|
|
|
|
* minimum link MTU) - 40 (IPv6 fixed header) - 20 (TCP fixed header)) will
|
|
|
|
* address path MTU problem.
|
|
|
|
*/
|
|
|
|
#define NM_MAXSEG (1280 - 20 - 40)
|
|
|
|
|
2023-08-16 16:30:53 +02:00
|
|
|
/*%
|
|
|
|
* How many isc_nmhandles and isc_nm_uvreqs will we be
|
|
|
|
* caching for reuse in a socket.
|
|
|
|
*/
|
2023-09-12 19:13:45 +02:00
|
|
|
#define ISC_NM_NMSOCKET_MAX 64
|
2023-08-16 16:30:53 +02:00
|
|
|
#define ISC_NM_NMHANDLES_MAX 64
|
|
|
|
#define ISC_NM_UVREQS_MAX 64
|
|
|
|
|
2023-03-16 12:50:04 +02:00
|
|
|
/*% ISC_PROXY2_MIN_AF_UNIX_SIZE is the largest type when TLVs are not used */
|
|
|
|
#define ISC_NM_PROXY2_DEFAULT_BUFFER_SIZE (ISC_PROXY2_MIN_AF_UNIX_SIZE)
|
|
|
|
|
2020-09-02 17:57:44 +02:00
|
|
|
/*
|
2023-01-03 08:27:54 +01:00
|
|
|
* Define ISC_NETMGR_TRACE to activate tracing of handles and sockets.
|
2020-09-02 17:57:44 +02:00
|
|
|
* This will impair performance but enables us to quickly determine,
|
|
|
|
* if netmgr resources haven't been cleaned up on shutdown, which ones
|
|
|
|
* are still in use.
|
|
|
|
*/
|
2023-01-03 08:27:54 +01:00
|
|
|
#if ISC_NETMGR_TRACE
|
2020-09-02 17:57:44 +02:00
|
|
|
#define TRACE_SIZE 8
|
|
|
|
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
#if defined(__linux__)
|
|
|
|
#include <syscall.h>
|
Fix the streaming read callback shutdown logic
When shutting down TCP sockets, the read callback calling logic was
flawed, it would call either one less callback or one extra. Fix the
logic in the way:
1. When isc_nm_read() has been called but isc_nm_read_stop() hasn't on
the handle, the read callback will be called with ISC_R_CANCELED to
cancel active reading from the socket/handle.
2. When isc_nm_read() has been called and isc_nm_read_stop() has been
called on the on the handle, the read callback will be called with
ISC_R_SHUTTINGDOWN to signal that the dormant (not-reading) socket
is being shut down.
3. The .reading and .recv_read flags are little bit tricky. The
.reading flag indicates if the outer layer is reading the data (that
would be uv_tcp_t for TCP and isc_nmsocket_t (TCP) for TLSStream),
the .recv_read flag indicates whether somebody is interested in the
data read from the socket.
Usually, you would expect that the .reading should be false when
.recv_read is false, but it gets even more tricky with TLSStream as
the TLS protocol might need to read from the socket even when sending
data.
Fix the usage of the .recv_read and .reading flags in the TLSStream
to their true meaning - which mostly consist of using .recv_read
everywhere and then wrapping isc_nm_read() and isc_nm_read_stop()
with the .reading flag.
4. The TLS failed read helper has been modified to resemble the TCP code
as much as possible, clearing and re-setting the .recv_read flag in
the TCP timeout code has been fixed and .recv_read is now cleared
when isc_nm_read_stop() has been called on the streaming socket.
5. The use of Network Manager in the named_controlconf, isccc_ccmsg, and
isc_httpd units have been greatly simplified due to the improved design.
6. More unit tests for TCP and TLS testing the shutdown conditions have
been added.
Co-authored-by: Ondřej Surý <ondrej@isc.org>
Co-authored-by: Artem Boldariev <artem@isc.org>
2023-04-13 17:27:50 +02:00
|
|
|
#define gettid() (uint64_t) syscall(SYS_gettid)
|
|
|
|
#elif defined(__FreeBSD__)
|
|
|
|
#include <pthread_np.h>
|
|
|
|
#define gettid() (uint64_t)(pthread_getthreadid_np())
|
|
|
|
#elif defined(__OpenBSD__)
|
|
|
|
#include <unistd.h>
|
|
|
|
#define gettid() (uint64_t)(getthrid())
|
|
|
|
#elif defined(__NetBSD__)
|
|
|
|
#include <lwp.h>
|
|
|
|
#define gettid() (uint64_t)(_lwp_self())
|
|
|
|
#elif defined(__DragonFly__)
|
|
|
|
#include <unistd.h>
|
|
|
|
#define gettid() (uint64_t)(lwp_gettid())
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
#else
|
Fix the streaming read callback shutdown logic
When shutting down TCP sockets, the read callback calling logic was
flawed, it would call either one less callback or one extra. Fix the
logic in the way:
1. When isc_nm_read() has been called but isc_nm_read_stop() hasn't on
the handle, the read callback will be called with ISC_R_CANCELED to
cancel active reading from the socket/handle.
2. When isc_nm_read() has been called and isc_nm_read_stop() has been
called on the on the handle, the read callback will be called with
ISC_R_SHUTTINGDOWN to signal that the dormant (not-reading) socket
is being shut down.
3. The .reading and .recv_read flags are little bit tricky. The
.reading flag indicates if the outer layer is reading the data (that
would be uv_tcp_t for TCP and isc_nmsocket_t (TCP) for TLSStream),
the .recv_read flag indicates whether somebody is interested in the
data read from the socket.
Usually, you would expect that the .reading should be false when
.recv_read is false, but it gets even more tricky with TLSStream as
the TLS protocol might need to read from the socket even when sending
data.
Fix the usage of the .recv_read and .reading flags in the TLSStream
to their true meaning - which mostly consist of using .recv_read
everywhere and then wrapping isc_nm_read() and isc_nm_read_stop()
with the .reading flag.
4. The TLS failed read helper has been modified to resemble the TCP code
as much as possible, clearing and re-setting the .recv_read flag in
the TCP timeout code has been fixed and .recv_read is now cleared
when isc_nm_read_stop() has been called on the streaming socket.
5. The use of Network Manager in the named_controlconf, isccc_ccmsg, and
isc_httpd units have been greatly simplified due to the improved design.
6. More unit tests for TCP and TLS testing the shutdown conditions have
been added.
Co-authored-by: Ondřej Surý <ondrej@isc.org>
Co-authored-by: Artem Boldariev <artem@isc.org>
2023-04-13 17:27:50 +02:00
|
|
|
#define gettid() (uint64_t)(pthread_self())
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#define NETMGR_TRACE_LOG(format, ...) \
|
Fix the streaming read callback shutdown logic
When shutting down TCP sockets, the read callback calling logic was
flawed, it would call either one less callback or one extra. Fix the
logic in the way:
1. When isc_nm_read() has been called but isc_nm_read_stop() hasn't on
the handle, the read callback will be called with ISC_R_CANCELED to
cancel active reading from the socket/handle.
2. When isc_nm_read() has been called and isc_nm_read_stop() has been
called on the on the handle, the read callback will be called with
ISC_R_SHUTTINGDOWN to signal that the dormant (not-reading) socket
is being shut down.
3. The .reading and .recv_read flags are little bit tricky. The
.reading flag indicates if the outer layer is reading the data (that
would be uv_tcp_t for TCP and isc_nmsocket_t (TCP) for TLSStream),
the .recv_read flag indicates whether somebody is interested in the
data read from the socket.
Usually, you would expect that the .reading should be false when
.recv_read is false, but it gets even more tricky with TLSStream as
the TLS protocol might need to read from the socket even when sending
data.
Fix the usage of the .recv_read and .reading flags in the TLSStream
to their true meaning - which mostly consist of using .recv_read
everywhere and then wrapping isc_nm_read() and isc_nm_read_stop()
with the .reading flag.
4. The TLS failed read helper has been modified to resemble the TCP code
as much as possible, clearing and re-setting the .recv_read flag in
the TCP timeout code has been fixed and .recv_read is now cleared
when isc_nm_read_stop() has been called on the streaming socket.
5. The use of Network Manager in the named_controlconf, isccc_ccmsg, and
isc_httpd units have been greatly simplified due to the improved design.
6. More unit tests for TCP and TLS testing the shutdown conditions have
been added.
Co-authored-by: Ondřej Surý <ondrej@isc.org>
Co-authored-by: Artem Boldariev <artem@isc.org>
2023-04-13 17:27:50 +02:00
|
|
|
fprintf(stderr, "%" PRIu64 ":%d:%s:%u:%s:" format, gettid(), \
|
2022-07-26 13:03:45 +02:00
|
|
|
isc_tid(), file, line, func, __VA_ARGS__)
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
|
Fix the streaming read callback shutdown logic
When shutting down TCP sockets, the read callback calling logic was
flawed, it would call either one less callback or one extra. Fix the
logic in the way:
1. When isc_nm_read() has been called but isc_nm_read_stop() hasn't on
the handle, the read callback will be called with ISC_R_CANCELED to
cancel active reading from the socket/handle.
2. When isc_nm_read() has been called and isc_nm_read_stop() has been
called on the on the handle, the read callback will be called with
ISC_R_SHUTTINGDOWN to signal that the dormant (not-reading) socket
is being shut down.
3. The .reading and .recv_read flags are little bit tricky. The
.reading flag indicates if the outer layer is reading the data (that
would be uv_tcp_t for TCP and isc_nmsocket_t (TCP) for TLSStream),
the .recv_read flag indicates whether somebody is interested in the
data read from the socket.
Usually, you would expect that the .reading should be false when
.recv_read is false, but it gets even more tricky with TLSStream as
the TLS protocol might need to read from the socket even when sending
data.
Fix the usage of the .recv_read and .reading flags in the TLSStream
to their true meaning - which mostly consist of using .recv_read
everywhere and then wrapping isc_nm_read() and isc_nm_read_stop()
with the .reading flag.
4. The TLS failed read helper has been modified to resemble the TCP code
as much as possible, clearing and re-setting the .recv_read flag in
the TCP timeout code has been fixed and .recv_read is now cleared
when isc_nm_read_stop() has been called on the streaming socket.
5. The use of Network Manager in the named_controlconf, isccc_ccmsg, and
isc_httpd units have been greatly simplified due to the improved design.
6. More unit tests for TCP and TLS testing the shutdown conditions have
been added.
Co-authored-by: Ondřej Surý <ondrej@isc.org>
Co-authored-by: Artem Boldariev <artem@isc.org>
2023-04-13 17:27:50 +02:00
|
|
|
#define FLARG \
|
|
|
|
, const char *func ISC_ATTR_UNUSED, const char *file ISC_ATTR_UNUSED, \
|
|
|
|
unsigned int line ISC_ATTR_UNUSED
|
|
|
|
|
|
|
|
#define FLARG_PASS , func, file, line
|
2023-03-24 12:11:44 +01:00
|
|
|
#define isc__nm_uvreq_get(sock) \
|
Fix the streaming read callback shutdown logic
When shutting down TCP sockets, the read callback calling logic was
flawed, it would call either one less callback or one extra. Fix the
logic in the way:
1. When isc_nm_read() has been called but isc_nm_read_stop() hasn't on
the handle, the read callback will be called with ISC_R_CANCELED to
cancel active reading from the socket/handle.
2. When isc_nm_read() has been called and isc_nm_read_stop() has been
called on the on the handle, the read callback will be called with
ISC_R_SHUTTINGDOWN to signal that the dormant (not-reading) socket
is being shut down.
3. The .reading and .recv_read flags are little bit tricky. The
.reading flag indicates if the outer layer is reading the data (that
would be uv_tcp_t for TCP and isc_nmsocket_t (TCP) for TLSStream),
the .recv_read flag indicates whether somebody is interested in the
data read from the socket.
Usually, you would expect that the .reading should be false when
.recv_read is false, but it gets even more tricky with TLSStream as
the TLS protocol might need to read from the socket even when sending
data.
Fix the usage of the .recv_read and .reading flags in the TLSStream
to their true meaning - which mostly consist of using .recv_read
everywhere and then wrapping isc_nm_read() and isc_nm_read_stop()
with the .reading flag.
4. The TLS failed read helper has been modified to resemble the TCP code
as much as possible, clearing and re-setting the .recv_read flag in
the TCP timeout code has been fixed and .recv_read is now cleared
when isc_nm_read_stop() has been called on the streaming socket.
5. The use of Network Manager in the named_controlconf, isccc_ccmsg, and
isc_httpd units have been greatly simplified due to the improved design.
6. More unit tests for TCP and TLS testing the shutdown conditions have
been added.
Co-authored-by: Ondřej Surý <ondrej@isc.org>
Co-authored-by: Artem Boldariev <artem@isc.org>
2023-04-13 17:27:50 +02:00
|
|
|
isc___nm_uvreq_get(sock, __func__, __FILE__, __LINE__)
|
2023-03-24 12:11:44 +01:00
|
|
|
#define isc__nm_uvreq_put(req) \
|
Fix the streaming read callback shutdown logic
When shutting down TCP sockets, the read callback calling logic was
flawed, it would call either one less callback or one extra. Fix the
logic in the way:
1. When isc_nm_read() has been called but isc_nm_read_stop() hasn't on
the handle, the read callback will be called with ISC_R_CANCELED to
cancel active reading from the socket/handle.
2. When isc_nm_read() has been called and isc_nm_read_stop() has been
called on the on the handle, the read callback will be called with
ISC_R_SHUTTINGDOWN to signal that the dormant (not-reading) socket
is being shut down.
3. The .reading and .recv_read flags are little bit tricky. The
.reading flag indicates if the outer layer is reading the data (that
would be uv_tcp_t for TCP and isc_nmsocket_t (TCP) for TLSStream),
the .recv_read flag indicates whether somebody is interested in the
data read from the socket.
Usually, you would expect that the .reading should be false when
.recv_read is false, but it gets even more tricky with TLSStream as
the TLS protocol might need to read from the socket even when sending
data.
Fix the usage of the .recv_read and .reading flags in the TLSStream
to their true meaning - which mostly consist of using .recv_read
everywhere and then wrapping isc_nm_read() and isc_nm_read_stop()
with the .reading flag.
4. The TLS failed read helper has been modified to resemble the TCP code
as much as possible, clearing and re-setting the .recv_read flag in
the TCP timeout code has been fixed and .recv_read is now cleared
when isc_nm_read_stop() has been called on the streaming socket.
5. The use of Network Manager in the named_controlconf, isccc_ccmsg, and
isc_httpd units have been greatly simplified due to the improved design.
6. More unit tests for TCP and TLS testing the shutdown conditions have
been added.
Co-authored-by: Ondřej Surý <ondrej@isc.org>
Co-authored-by: Artem Boldariev <artem@isc.org>
2023-04-13 17:27:50 +02:00
|
|
|
isc___nm_uvreq_put(req, __func__, __FILE__, __LINE__)
|
2023-01-03 08:27:54 +01:00
|
|
|
#define isc__nmsocket_init(sock, mgr, type, iface, parent) \
|
Fix the streaming read callback shutdown logic
When shutting down TCP sockets, the read callback calling logic was
flawed, it would call either one less callback or one extra. Fix the
logic in the way:
1. When isc_nm_read() has been called but isc_nm_read_stop() hasn't on
the handle, the read callback will be called with ISC_R_CANCELED to
cancel active reading from the socket/handle.
2. When isc_nm_read() has been called and isc_nm_read_stop() has been
called on the on the handle, the read callback will be called with
ISC_R_SHUTTINGDOWN to signal that the dormant (not-reading) socket
is being shut down.
3. The .reading and .recv_read flags are little bit tricky. The
.reading flag indicates if the outer layer is reading the data (that
would be uv_tcp_t for TCP and isc_nmsocket_t (TCP) for TLSStream),
the .recv_read flag indicates whether somebody is interested in the
data read from the socket.
Usually, you would expect that the .reading should be false when
.recv_read is false, but it gets even more tricky with TLSStream as
the TLS protocol might need to read from the socket even when sending
data.
Fix the usage of the .recv_read and .reading flags in the TLSStream
to their true meaning - which mostly consist of using .recv_read
everywhere and then wrapping isc_nm_read() and isc_nm_read_stop()
with the .reading flag.
4. The TLS failed read helper has been modified to resemble the TCP code
as much as possible, clearing and re-setting the .recv_read flag in
the TCP timeout code has been fixed and .recv_read is now cleared
when isc_nm_read_stop() has been called on the streaming socket.
5. The use of Network Manager in the named_controlconf, isccc_ccmsg, and
isc_httpd units have been greatly simplified due to the improved design.
6. More unit tests for TCP and TLS testing the shutdown conditions have
been added.
Co-authored-by: Ondřej Surý <ondrej@isc.org>
Co-authored-by: Artem Boldariev <artem@isc.org>
2023-04-13 17:27:50 +02:00
|
|
|
isc___nmsocket_init(sock, mgr, type, iface, parent, __func__, \
|
|
|
|
__FILE__, __LINE__)
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
#define isc__nmsocket_put(sockp) \
|
Fix the streaming read callback shutdown logic
When shutting down TCP sockets, the read callback calling logic was
flawed, it would call either one less callback or one extra. Fix the
logic in the way:
1. When isc_nm_read() has been called but isc_nm_read_stop() hasn't on
the handle, the read callback will be called with ISC_R_CANCELED to
cancel active reading from the socket/handle.
2. When isc_nm_read() has been called and isc_nm_read_stop() has been
called on the on the handle, the read callback will be called with
ISC_R_SHUTTINGDOWN to signal that the dormant (not-reading) socket
is being shut down.
3. The .reading and .recv_read flags are little bit tricky. The
.reading flag indicates if the outer layer is reading the data (that
would be uv_tcp_t for TCP and isc_nmsocket_t (TCP) for TLSStream),
the .recv_read flag indicates whether somebody is interested in the
data read from the socket.
Usually, you would expect that the .reading should be false when
.recv_read is false, but it gets even more tricky with TLSStream as
the TLS protocol might need to read from the socket even when sending
data.
Fix the usage of the .recv_read and .reading flags in the TLSStream
to their true meaning - which mostly consist of using .recv_read
everywhere and then wrapping isc_nm_read() and isc_nm_read_stop()
with the .reading flag.
4. The TLS failed read helper has been modified to resemble the TCP code
as much as possible, clearing and re-setting the .recv_read flag in
the TCP timeout code has been fixed and .recv_read is now cleared
when isc_nm_read_stop() has been called on the streaming socket.
5. The use of Network Manager in the named_controlconf, isccc_ccmsg, and
isc_httpd units have been greatly simplified due to the improved design.
6. More unit tests for TCP and TLS testing the shutdown conditions have
been added.
Co-authored-by: Ondřej Surý <ondrej@isc.org>
Co-authored-by: Artem Boldariev <artem@isc.org>
2023-04-13 17:27:50 +02:00
|
|
|
isc___nmsocket_put(sockp, __func__, __FILE__, __LINE__)
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
#define isc__nmsocket_attach(sock, target) \
|
Fix the streaming read callback shutdown logic
When shutting down TCP sockets, the read callback calling logic was
flawed, it would call either one less callback or one extra. Fix the
logic in the way:
1. When isc_nm_read() has been called but isc_nm_read_stop() hasn't on
the handle, the read callback will be called with ISC_R_CANCELED to
cancel active reading from the socket/handle.
2. When isc_nm_read() has been called and isc_nm_read_stop() has been
called on the on the handle, the read callback will be called with
ISC_R_SHUTTINGDOWN to signal that the dormant (not-reading) socket
is being shut down.
3. The .reading and .recv_read flags are little bit tricky. The
.reading flag indicates if the outer layer is reading the data (that
would be uv_tcp_t for TCP and isc_nmsocket_t (TCP) for TLSStream),
the .recv_read flag indicates whether somebody is interested in the
data read from the socket.
Usually, you would expect that the .reading should be false when
.recv_read is false, but it gets even more tricky with TLSStream as
the TLS protocol might need to read from the socket even when sending
data.
Fix the usage of the .recv_read and .reading flags in the TLSStream
to their true meaning - which mostly consist of using .recv_read
everywhere and then wrapping isc_nm_read() and isc_nm_read_stop()
with the .reading flag.
4. The TLS failed read helper has been modified to resemble the TCP code
as much as possible, clearing and re-setting the .recv_read flag in
the TCP timeout code has been fixed and .recv_read is now cleared
when isc_nm_read_stop() has been called on the streaming socket.
5. The use of Network Manager in the named_controlconf, isccc_ccmsg, and
isc_httpd units have been greatly simplified due to the improved design.
6. More unit tests for TCP and TLS testing the shutdown conditions have
been added.
Co-authored-by: Ondřej Surý <ondrej@isc.org>
Co-authored-by: Artem Boldariev <artem@isc.org>
2023-04-13 17:27:50 +02:00
|
|
|
isc___nmsocket_attach(sock, target, __func__, __FILE__, __LINE__)
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
#define isc__nmsocket_detach(socketp) \
|
Fix the streaming read callback shutdown logic
When shutting down TCP sockets, the read callback calling logic was
flawed, it would call either one less callback or one extra. Fix the
logic in the way:
1. When isc_nm_read() has been called but isc_nm_read_stop() hasn't on
the handle, the read callback will be called with ISC_R_CANCELED to
cancel active reading from the socket/handle.
2. When isc_nm_read() has been called and isc_nm_read_stop() has been
called on the on the handle, the read callback will be called with
ISC_R_SHUTTINGDOWN to signal that the dormant (not-reading) socket
is being shut down.
3. The .reading and .recv_read flags are little bit tricky. The
.reading flag indicates if the outer layer is reading the data (that
would be uv_tcp_t for TCP and isc_nmsocket_t (TCP) for TLSStream),
the .recv_read flag indicates whether somebody is interested in the
data read from the socket.
Usually, you would expect that the .reading should be false when
.recv_read is false, but it gets even more tricky with TLSStream as
the TLS protocol might need to read from the socket even when sending
data.
Fix the usage of the .recv_read and .reading flags in the TLSStream
to their true meaning - which mostly consist of using .recv_read
everywhere and then wrapping isc_nm_read() and isc_nm_read_stop()
with the .reading flag.
4. The TLS failed read helper has been modified to resemble the TCP code
as much as possible, clearing and re-setting the .recv_read flag in
the TCP timeout code has been fixed and .recv_read is now cleared
when isc_nm_read_stop() has been called on the streaming socket.
5. The use of Network Manager in the named_controlconf, isccc_ccmsg, and
isc_httpd units have been greatly simplified due to the improved design.
6. More unit tests for TCP and TLS testing the shutdown conditions have
been added.
Co-authored-by: Ondřej Surý <ondrej@isc.org>
Co-authored-by: Artem Boldariev <artem@isc.org>
2023-04-13 17:27:50 +02:00
|
|
|
isc___nmsocket_detach(socketp, __func__, __FILE__, __LINE__)
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
#define isc__nmsocket_close(socketp) \
|
Fix the streaming read callback shutdown logic
When shutting down TCP sockets, the read callback calling logic was
flawed, it would call either one less callback or one extra. Fix the
logic in the way:
1. When isc_nm_read() has been called but isc_nm_read_stop() hasn't on
the handle, the read callback will be called with ISC_R_CANCELED to
cancel active reading from the socket/handle.
2. When isc_nm_read() has been called and isc_nm_read_stop() has been
called on the on the handle, the read callback will be called with
ISC_R_SHUTTINGDOWN to signal that the dormant (not-reading) socket
is being shut down.
3. The .reading and .recv_read flags are little bit tricky. The
.reading flag indicates if the outer layer is reading the data (that
would be uv_tcp_t for TCP and isc_nmsocket_t (TCP) for TLSStream),
the .recv_read flag indicates whether somebody is interested in the
data read from the socket.
Usually, you would expect that the .reading should be false when
.recv_read is false, but it gets even more tricky with TLSStream as
the TLS protocol might need to read from the socket even when sending
data.
Fix the usage of the .recv_read and .reading flags in the TLSStream
to their true meaning - which mostly consist of using .recv_read
everywhere and then wrapping isc_nm_read() and isc_nm_read_stop()
with the .reading flag.
4. The TLS failed read helper has been modified to resemble the TCP code
as much as possible, clearing and re-setting the .recv_read flag in
the TCP timeout code has been fixed and .recv_read is now cleared
when isc_nm_read_stop() has been called on the streaming socket.
5. The use of Network Manager in the named_controlconf, isccc_ccmsg, and
isc_httpd units have been greatly simplified due to the improved design.
6. More unit tests for TCP and TLS testing the shutdown conditions have
been added.
Co-authored-by: Ondřej Surý <ondrej@isc.org>
Co-authored-by: Artem Boldariev <artem@isc.org>
2023-04-13 17:27:50 +02:00
|
|
|
isc___nmsocket_close(socketp, __func__, __FILE__, __LINE__)
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
#define isc__nmhandle_get(sock, peer, local) \
|
Fix the streaming read callback shutdown logic
When shutting down TCP sockets, the read callback calling logic was
flawed, it would call either one less callback or one extra. Fix the
logic in the way:
1. When isc_nm_read() has been called but isc_nm_read_stop() hasn't on
the handle, the read callback will be called with ISC_R_CANCELED to
cancel active reading from the socket/handle.
2. When isc_nm_read() has been called and isc_nm_read_stop() has been
called on the on the handle, the read callback will be called with
ISC_R_SHUTTINGDOWN to signal that the dormant (not-reading) socket
is being shut down.
3. The .reading and .recv_read flags are little bit tricky. The
.reading flag indicates if the outer layer is reading the data (that
would be uv_tcp_t for TCP and isc_nmsocket_t (TCP) for TLSStream),
the .recv_read flag indicates whether somebody is interested in the
data read from the socket.
Usually, you would expect that the .reading should be false when
.recv_read is false, but it gets even more tricky with TLSStream as
the TLS protocol might need to read from the socket even when sending
data.
Fix the usage of the .recv_read and .reading flags in the TLSStream
to their true meaning - which mostly consist of using .recv_read
everywhere and then wrapping isc_nm_read() and isc_nm_read_stop()
with the .reading flag.
4. The TLS failed read helper has been modified to resemble the TCP code
as much as possible, clearing and re-setting the .recv_read flag in
the TCP timeout code has been fixed and .recv_read is now cleared
when isc_nm_read_stop() has been called on the streaming socket.
5. The use of Network Manager in the named_controlconf, isccc_ccmsg, and
isc_httpd units have been greatly simplified due to the improved design.
6. More unit tests for TCP and TLS testing the shutdown conditions have
been added.
Co-authored-by: Ondřej Surý <ondrej@isc.org>
Co-authored-by: Artem Boldariev <artem@isc.org>
2023-04-13 17:27:50 +02:00
|
|
|
isc___nmhandle_get(sock, peer, local, __func__, __FILE__, __LINE__)
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
#define isc__nmsocket_prep_destroy(sock) \
|
Fix the streaming read callback shutdown logic
When shutting down TCP sockets, the read callback calling logic was
flawed, it would call either one less callback or one extra. Fix the
logic in the way:
1. When isc_nm_read() has been called but isc_nm_read_stop() hasn't on
the handle, the read callback will be called with ISC_R_CANCELED to
cancel active reading from the socket/handle.
2. When isc_nm_read() has been called and isc_nm_read_stop() has been
called on the on the handle, the read callback will be called with
ISC_R_SHUTTINGDOWN to signal that the dormant (not-reading) socket
is being shut down.
3. The .reading and .recv_read flags are little bit tricky. The
.reading flag indicates if the outer layer is reading the data (that
would be uv_tcp_t for TCP and isc_nmsocket_t (TCP) for TLSStream),
the .recv_read flag indicates whether somebody is interested in the
data read from the socket.
Usually, you would expect that the .reading should be false when
.recv_read is false, but it gets even more tricky with TLSStream as
the TLS protocol might need to read from the socket even when sending
data.
Fix the usage of the .recv_read and .reading flags in the TLSStream
to their true meaning - which mostly consist of using .recv_read
everywhere and then wrapping isc_nm_read() and isc_nm_read_stop()
with the .reading flag.
4. The TLS failed read helper has been modified to resemble the TCP code
as much as possible, clearing and re-setting the .recv_read flag in
the TCP timeout code has been fixed and .recv_read is now cleared
when isc_nm_read_stop() has been called on the streaming socket.
5. The use of Network Manager in the named_controlconf, isccc_ccmsg, and
isc_httpd units have been greatly simplified due to the improved design.
6. More unit tests for TCP and TLS testing the shutdown conditions have
been added.
Co-authored-by: Ondřej Surý <ondrej@isc.org>
Co-authored-by: Artem Boldariev <artem@isc.org>
2023-04-13 17:27:50 +02:00
|
|
|
isc___nmsocket_prep_destroy(sock, __func__, __FILE__, __LINE__)
|
|
|
|
#define isc__nm_get_read_req(sock, sockaddr) \
|
|
|
|
isc___nm_get_read_req(sock, sockaddr, __func__, __FILE__, __LINE__)
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
#else
|
|
|
|
#define NETMGR_TRACE_LOG(format, ...)
|
|
|
|
|
|
|
|
#define FLARG
|
2023-01-17 13:58:10 -08:00
|
|
|
#define FLARG_PASS
|
2023-03-24 12:11:44 +01:00
|
|
|
#define isc__nm_uvreq_get(sock) isc___nm_uvreq_get(sock)
|
|
|
|
#define isc__nm_uvreq_put(req) isc___nm_uvreq_put(req)
|
2023-01-03 08:27:54 +01:00
|
|
|
#define isc__nmsocket_init(sock, mgr, type, iface, parent) \
|
|
|
|
isc___nmsocket_init(sock, mgr, type, iface, parent)
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
#define isc__nmsocket_put(sockp) isc___nmsocket_put(sockp)
|
|
|
|
#define isc__nmsocket_attach(sock, target) isc___nmsocket_attach(sock, target)
|
|
|
|
#define isc__nmsocket_detach(socketp) isc___nmsocket_detach(socketp)
|
|
|
|
#define isc__nmsocket_close(socketp) isc___nmsocket_close(socketp)
|
|
|
|
#define isc__nmhandle_get(sock, peer, local) \
|
|
|
|
isc___nmhandle_get(sock, peer, local)
|
|
|
|
#define isc__nmsocket_prep_destroy(sock) isc___nmsocket_prep_destroy(sock)
|
Fix the streaming read callback shutdown logic
When shutting down TCP sockets, the read callback calling logic was
flawed, it would call either one less callback or one extra. Fix the
logic in the way:
1. When isc_nm_read() has been called but isc_nm_read_stop() hasn't on
the handle, the read callback will be called with ISC_R_CANCELED to
cancel active reading from the socket/handle.
2. When isc_nm_read() has been called and isc_nm_read_stop() has been
called on the on the handle, the read callback will be called with
ISC_R_SHUTTINGDOWN to signal that the dormant (not-reading) socket
is being shut down.
3. The .reading and .recv_read flags are little bit tricky. The
.reading flag indicates if the outer layer is reading the data (that
would be uv_tcp_t for TCP and isc_nmsocket_t (TCP) for TLSStream),
the .recv_read flag indicates whether somebody is interested in the
data read from the socket.
Usually, you would expect that the .reading should be false when
.recv_read is false, but it gets even more tricky with TLSStream as
the TLS protocol might need to read from the socket even when sending
data.
Fix the usage of the .recv_read and .reading flags in the TLSStream
to their true meaning - which mostly consist of using .recv_read
everywhere and then wrapping isc_nm_read() and isc_nm_read_stop()
with the .reading flag.
4. The TLS failed read helper has been modified to resemble the TCP code
as much as possible, clearing and re-setting the .recv_read flag in
the TCP timeout code has been fixed and .recv_read is now cleared
when isc_nm_read_stop() has been called on the streaming socket.
5. The use of Network Manager in the named_controlconf, isccc_ccmsg, and
isc_httpd units have been greatly simplified due to the improved design.
6. More unit tests for TCP and TLS testing the shutdown conditions have
been added.
Co-authored-by: Ondřej Surý <ondrej@isc.org>
Co-authored-by: Artem Boldariev <artem@isc.org>
2023-04-13 17:27:50 +02:00
|
|
|
#define isc__nm_get_read_req(sock, sockaddr) \
|
|
|
|
isc___nm_get_read_req(sock, sockaddr)
|
2020-09-02 17:57:44 +02:00
|
|
|
#endif
|
|
|
|
|
2022-02-22 23:40:39 +01:00
|
|
|
typedef struct isc__nm_uvreq isc__nm_uvreq_t;
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
/*
|
|
|
|
* Single network event loop worker.
|
|
|
|
*/
|
|
|
|
typedef struct isc__networker {
|
2022-07-26 13:03:45 +02:00
|
|
|
isc_mem_t *mctx;
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_refcount_t references;
|
2022-07-26 13:03:45 +02:00
|
|
|
isc_loop_t *loop;
|
|
|
|
isc_nm_t *netmgr;
|
|
|
|
bool shuttingdown;
|
|
|
|
|
2020-01-29 13:16:04 +01:00
|
|
|
char *recvbuf;
|
2019-11-15 13:22:13 -08:00
|
|
|
bool recvbuf_inuse;
|
2023-01-03 08:27:54 +01:00
|
|
|
|
|
|
|
ISC_LIST(isc_nmsocket_t) active_sockets;
|
|
|
|
|
2023-09-12 19:13:45 +02:00
|
|
|
isc_mempool_t *nmsocket_pool;
|
2023-01-04 15:57:00 +01:00
|
|
|
isc_mempool_t *uvreq_pool;
|
2019-11-05 13:55:54 -08:00
|
|
|
} isc__networker_t;
|
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
ISC_REFCOUNT_DECL(isc__networker);
|
|
|
|
|
2023-09-26 15:37:48 +03:00
|
|
|
#ifdef ISC_NETMGR_TRACE
|
2023-01-03 08:27:54 +01:00
|
|
|
void
|
|
|
|
isc__nm_dump_active(isc__networker_t *worker);
|
|
|
|
|
2023-09-26 15:37:48 +03:00
|
|
|
void
|
|
|
|
isc__nm_dump_active_manager(isc_nm_t *netmgr);
|
|
|
|
#endif /* ISC_NETMGR_TRACE */
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
/*
|
|
|
|
* A general handle for a connection bound to a networker. For UDP
|
|
|
|
* connections we have peer address here, so both TCP and UDP can be
|
|
|
|
* handled with a simple send-like function
|
|
|
|
*/
|
2020-07-01 00:49:12 -07:00
|
|
|
#define NMHANDLE_MAGIC ISC_MAGIC('N', 'M', 'H', 'D')
|
|
|
|
#define VALID_NMHANDLE(t) \
|
|
|
|
(ISC_MAGIC_VALID(t, NMHANDLE_MAGIC) && \
|
|
|
|
atomic_load(&(t)->references) > 0)
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
typedef void (*isc__nm_closecb)(isc_nmhandle_t *);
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
typedef struct isc_nm_http_session isc_nm_http_session_t;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
struct isc_nmhandle {
|
|
|
|
int magic;
|
|
|
|
isc_refcount_t references;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The socket is not 'attached' in the traditional
|
|
|
|
* reference-counting sense. Instead, we keep all handles in an
|
|
|
|
* array in the socket object. This way, we don't have circular
|
|
|
|
* dependencies and we can close all handles when we're destroying
|
|
|
|
* the socket.
|
|
|
|
*/
|
|
|
|
isc_nmsocket_t *sock;
|
|
|
|
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
isc_nm_http_session_t *httpsession;
|
2020-10-31 20:42:18 +01:00
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_sockaddr_t peer;
|
|
|
|
isc_sockaddr_t local;
|
2023-03-16 12:50:04 +02:00
|
|
|
bool proxy_is_unspec;
|
2023-07-12 15:25:38 +03:00
|
|
|
struct isc_nmhandle *proxy_udphandle;
|
2019-11-08 10:52:49 -08:00
|
|
|
isc_nm_opaquecb_t doreset; /* reset extra callback, external */
|
|
|
|
isc_nm_opaquecb_t dofree; /* free extra callback, external */
|
2023-01-03 08:27:54 +01:00
|
|
|
#if ISC_NETMGR_TRACE
|
2020-09-02 17:57:44 +02:00
|
|
|
void *backtrace[TRACE_SIZE];
|
|
|
|
int backtrace_size;
|
|
|
|
#endif
|
2023-01-03 08:27:54 +01:00
|
|
|
LINK(isc_nmhandle_t) active_link;
|
2023-01-04 15:57:00 +01:00
|
|
|
LINK(isc_nmhandle_t) inactive_link;
|
2023-08-16 16:30:53 +02:00
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
void *opaque;
|
2023-04-09 06:48:46 +02:00
|
|
|
|
|
|
|
isc_job_t job;
|
2019-11-05 13:55:54 -08:00
|
|
|
};
|
|
|
|
|
|
|
|
typedef union {
|
|
|
|
isc_nm_recv_cb_t recv;
|
|
|
|
isc_nm_cb_t send;
|
|
|
|
isc_nm_cb_t connect;
|
|
|
|
} isc__nm_cb_t;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wrapper around uv_req_t with 'our' fields in it. req->data should
|
|
|
|
* always point to its parent. Note that we always allocate more than
|
|
|
|
* sizeof(struct) because we make room for different req types;
|
|
|
|
*/
|
|
|
|
#define UVREQ_MAGIC ISC_MAGIC('N', 'M', 'U', 'R')
|
|
|
|
#define VALID_UVREQ(t) ISC_MAGIC_VALID(t, UVREQ_MAGIC)
|
|
|
|
|
2020-05-13 17:37:51 +02:00
|
|
|
typedef struct isc__nm_uvreq isc__nm_uvreq_t;
|
|
|
|
struct isc__nm_uvreq {
|
2019-11-05 13:55:54 -08:00
|
|
|
int magic;
|
|
|
|
isc_nmsocket_t *sock;
|
|
|
|
isc_nmhandle_t *handle;
|
2022-03-10 13:51:08 +01:00
|
|
|
char tcplen[2]; /* The TCP DNS message length */
|
|
|
|
uv_buf_t uvbuf; /* translated isc_region_t, to be
|
|
|
|
* sent or received */
|
|
|
|
isc_sockaddr_t local; /* local address */
|
|
|
|
isc_sockaddr_t peer; /* peer address */
|
|
|
|
isc__nm_cb_t cb; /* callback */
|
|
|
|
void *cbarg; /* callback argument */
|
|
|
|
isc_nm_timer_t *timer; /* TCP write timer */
|
2022-07-13 09:34:47 +02:00
|
|
|
int connect_tries; /* connect retries */
|
2023-03-23 06:56:17 +01:00
|
|
|
isc_result_t result;
|
2022-03-10 13:51:08 +01:00
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
union {
|
2020-10-29 12:04:00 +01:00
|
|
|
uv_handle_t handle;
|
2019-11-05 13:55:54 -08:00
|
|
|
uv_write_t write;
|
|
|
|
uv_connect_t connect;
|
|
|
|
uv_udp_send_t udp_send;
|
|
|
|
} uv_req;
|
2020-05-13 17:37:51 +02:00
|
|
|
ISC_LINK(isc__nm_uvreq_t) link;
|
2023-03-24 12:11:44 +01:00
|
|
|
ISC_LINK(isc__nm_uvreq_t) active_link;
|
2023-03-27 22:40:57 +02:00
|
|
|
|
|
|
|
isc_job_t job;
|
2020-05-13 17:37:51 +02:00
|
|
|
};
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Network manager
|
|
|
|
*/
|
|
|
|
#define NM_MAGIC ISC_MAGIC('N', 'E', 'T', 'M')
|
|
|
|
#define VALID_NM(t) ISC_MAGIC_VALID(t, NM_MAGIC)
|
|
|
|
|
|
|
|
struct isc_nm {
|
|
|
|
int magic;
|
|
|
|
isc_refcount_t references;
|
|
|
|
isc_mem_t *mctx;
|
2022-07-26 13:03:45 +02:00
|
|
|
isc_loopmgr_t *loopmgr;
|
|
|
|
uint32_t nloops;
|
2019-11-05 13:55:54 -08:00
|
|
|
isc__networker_t *workers;
|
2019-11-21 17:08:06 -08:00
|
|
|
|
2020-01-05 01:02:12 -08:00
|
|
|
isc_stats_t *stats;
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
atomic_uint_fast32_t maxudp;
|
|
|
|
|
2022-04-01 14:43:14 +02:00
|
|
|
bool load_balance_sockets;
|
|
|
|
|
2019-11-22 15:57:42 -08:00
|
|
|
/*
|
|
|
|
* Active connections are being closed and new connections are
|
|
|
|
* no longer allowed.
|
|
|
|
*/
|
2022-07-26 13:03:45 +02:00
|
|
|
atomic_bool shuttingdown;
|
2019-11-20 22:33:35 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Timeout values for TCP connections, corresponding to
|
|
|
|
* tcp-intiial-timeout, tcp-idle-timeout, tcp-keepalive-timeout,
|
|
|
|
* and tcp-advertised-timeout. Note that these are stored in
|
|
|
|
* milliseconds so they can be used directly with the libuv timer,
|
|
|
|
* but they are configured in tenths of seconds.
|
|
|
|
*/
|
2020-12-02 09:52:39 +01:00
|
|
|
atomic_uint_fast32_t init;
|
|
|
|
atomic_uint_fast32_t idle;
|
|
|
|
atomic_uint_fast32_t keepalive;
|
|
|
|
atomic_uint_fast32_t advertised;
|
2020-09-02 17:57:44 +02:00
|
|
|
|
2020-12-02 20:51:38 +01:00
|
|
|
/*
|
|
|
|
* Socket SO_RCVBUF and SO_SNDBUF values
|
|
|
|
*/
|
|
|
|
atomic_int_fast32_t recv_udp_buffer_size;
|
|
|
|
atomic_int_fast32_t send_udp_buffer_size;
|
|
|
|
atomic_int_fast32_t recv_tcp_buffer_size;
|
|
|
|
atomic_int_fast32_t send_tcp_buffer_size;
|
2019-11-05 13:55:54 -08:00
|
|
|
};
|
|
|
|
|
|
|
|
/*%
|
|
|
|
* A universal structure for either a single socket or a group of
|
|
|
|
* dup'd/SO_REUSE_PORT-using sockets listening on the same interface.
|
|
|
|
*/
|
|
|
|
#define NMSOCK_MAGIC ISC_MAGIC('N', 'M', 'S', 'K')
|
|
|
|
#define VALID_NMSOCK(t) ISC_MAGIC_VALID(t, NMSOCK_MAGIC)
|
|
|
|
|
2020-01-05 01:02:12 -08:00
|
|
|
/*%
|
|
|
|
* Index into socket stat counter arrays.
|
|
|
|
*/
|
2021-10-02 16:26:43 -07:00
|
|
|
typedef enum {
|
2020-01-05 01:02:12 -08:00
|
|
|
STATID_OPEN = 0,
|
|
|
|
STATID_OPENFAIL = 1,
|
|
|
|
STATID_CLOSE = 2,
|
|
|
|
STATID_BINDFAIL = 3,
|
|
|
|
STATID_CONNECTFAIL = 4,
|
|
|
|
STATID_CONNECT = 5,
|
|
|
|
STATID_ACCEPTFAIL = 6,
|
|
|
|
STATID_ACCEPT = 7,
|
|
|
|
STATID_SENDFAIL = 8,
|
|
|
|
STATID_RECVFAIL = 9,
|
2021-10-02 16:26:43 -07:00
|
|
|
STATID_ACTIVE = 10,
|
2024-01-02 16:28:46 +03:00
|
|
|
STATID_CLIENTS = 11,
|
|
|
|
STATID_MAX = 12,
|
2021-10-02 16:26:43 -07:00
|
|
|
} isc__nm_statid_t;
|
2020-01-05 01:02:12 -08:00
|
|
|
|
2021-01-25 17:44:39 +02:00
|
|
|
typedef struct isc_nmsocket_tls_send_req {
|
|
|
|
isc_nmsocket_t *tlssock;
|
2022-12-29 20:03:26 +02:00
|
|
|
isc_buffer_t data;
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
isc_nm_cb_t cb;
|
|
|
|
void *cbarg;
|
|
|
|
isc_nmhandle_t *handle;
|
|
|
|
bool finish;
|
2021-08-02 17:15:13 +03:00
|
|
|
uint8_t smallbuf[512];
|
2021-01-25 17:44:39 +02:00
|
|
|
} isc_nmsocket_tls_send_req_t;
|
|
|
|
|
2022-10-18 15:36:00 +03:00
|
|
|
#if HAVE_LIBNGHTTP2
|
|
|
|
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
typedef enum isc_http_request_type {
|
2020-12-07 14:19:10 +02:00
|
|
|
ISC_HTTP_REQ_GET,
|
|
|
|
ISC_HTTP_REQ_POST,
|
|
|
|
ISC_HTTP_REQ_UNSUPPORTED
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
} isc_http_request_type_t;
|
2020-12-07 14:19:10 +02:00
|
|
|
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
typedef enum isc_http_scheme_type {
|
2020-12-07 14:19:10 +02:00
|
|
|
ISC_HTTP_SCHEME_HTTP,
|
|
|
|
ISC_HTTP_SCHEME_HTTP_SECURE,
|
|
|
|
ISC_HTTP_SCHEME_UNSUPPORTED
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
} isc_http_scheme_type_t;
|
2020-12-07 14:19:10 +02:00
|
|
|
|
2021-07-16 14:57:08 +03:00
|
|
|
typedef struct isc_nm_httphandler {
|
2024-03-13 18:04:46 +02:00
|
|
|
int magic;
|
2021-07-16 14:57:08 +03:00
|
|
|
char *path;
|
|
|
|
isc_nm_recv_cb_t cb;
|
|
|
|
void *cbarg;
|
|
|
|
LINK(struct isc_nm_httphandler) link;
|
|
|
|
} isc_nm_httphandler_t;
|
|
|
|
|
2021-07-13 12:32:47 +03:00
|
|
|
struct isc_nm_http_endpoints {
|
2022-06-22 19:31:18 +03:00
|
|
|
uint32_t magic;
|
2021-07-13 12:32:47 +03:00
|
|
|
isc_mem_t *mctx;
|
|
|
|
|
|
|
|
ISC_LIST(isc_nm_httphandler_t) handlers;
|
|
|
|
|
|
|
|
isc_refcount_t references;
|
|
|
|
atomic_bool in_use;
|
|
|
|
};
|
|
|
|
|
2020-10-31 20:42:18 +01:00
|
|
|
typedef struct isc_nmsocket_h2 {
|
|
|
|
isc_nmsocket_t *psock; /* owner of the structure */
|
|
|
|
char *request_path;
|
|
|
|
char *query_data;
|
2020-12-07 14:19:10 +02:00
|
|
|
size_t query_data_len;
|
|
|
|
bool query_too_large;
|
2020-10-31 20:42:18 +01:00
|
|
|
|
2021-07-21 20:10:46 +03:00
|
|
|
isc_buffer_t rbuf;
|
2021-07-21 23:23:58 +03:00
|
|
|
isc_buffer_t wbuf;
|
2020-10-31 20:42:18 +01:00
|
|
|
|
|
|
|
int32_t stream_id;
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
isc_nm_http_session_t *session;
|
2020-12-07 14:19:10 +02:00
|
|
|
|
2021-05-18 12:03:58 +03:00
|
|
|
/* maximum concurrent streams (server-side) */
|
2022-06-22 16:45:28 +03:00
|
|
|
atomic_uint_fast32_t max_concurrent_streams;
|
2021-05-18 12:03:58 +03:00
|
|
|
|
2021-10-06 14:09:53 +03:00
|
|
|
uint32_t min_ttl; /* used to set "max-age" in responses */
|
|
|
|
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
isc_http_request_type_t request_type;
|
|
|
|
isc_http_scheme_type_t request_scheme;
|
|
|
|
|
2020-12-07 14:19:10 +02:00
|
|
|
size_t content_length;
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
char clenbuf[128];
|
|
|
|
|
2021-10-06 14:09:53 +03:00
|
|
|
char cache_control_buf[128];
|
|
|
|
|
2021-02-16 16:54:51 +02:00
|
|
|
int headers_error_code;
|
|
|
|
size_t headers_data_processed;
|
2020-12-07 14:19:10 +02:00
|
|
|
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
isc_nm_recv_cb_t cb;
|
|
|
|
void *cbarg;
|
2020-10-31 20:42:18 +01:00
|
|
|
LINK(struct isc_nmsocket_h2) link;
|
2020-12-07 14:19:10 +02:00
|
|
|
|
2022-06-22 19:31:18 +03:00
|
|
|
isc_nm_http_endpoints_t **listener_endpoints;
|
|
|
|
size_t n_listener_endpoints;
|
2020-12-07 14:19:10 +02:00
|
|
|
|
2024-03-13 18:04:46 +02:00
|
|
|
isc_nm_http_endpoints_t *peer_endpoints;
|
|
|
|
|
2021-06-07 16:38:39 +03:00
|
|
|
bool response_submitted;
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
struct {
|
2020-12-07 14:19:10 +02:00
|
|
|
char *uri;
|
|
|
|
bool post;
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
isc_tlsctx_t *tlsctx;
|
2021-05-26 08:15:34 +02:00
|
|
|
isc_sockaddr_t local_interface;
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
void *cstream;
|
2022-01-13 14:35:24 +02:00
|
|
|
const char *tls_peer_verify_string;
|
2020-12-07 14:19:10 +02:00
|
|
|
} connect;
|
2020-10-31 20:42:18 +01:00
|
|
|
} isc_nmsocket_h2_t;
|
2021-07-16 14:57:08 +03:00
|
|
|
#endif /* HAVE_LIBNGHTTP2 */
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
|
2021-03-16 09:03:02 +01:00
|
|
|
typedef void (*isc_nm_closehandlecb_t)(void *arg);
|
|
|
|
/*%<
|
|
|
|
* Opaque callback function, used for isc_nmhandle 'reset' and 'free'
|
|
|
|
* callbacks.
|
|
|
|
*/
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
struct isc_nmsocket {
|
|
|
|
/*% Unlocked, RO */
|
2020-01-05 01:02:12 -08:00
|
|
|
int magic;
|
2022-07-26 13:03:45 +02:00
|
|
|
uint32_t tid;
|
2023-03-28 17:03:56 +02:00
|
|
|
isc_refcount_t references;
|
2020-01-05 01:02:12 -08:00
|
|
|
isc_nmsocket_type type;
|
2022-07-26 13:03:45 +02:00
|
|
|
isc__networker_t *worker;
|
|
|
|
|
2023-01-07 16:30:21 -08:00
|
|
|
isc_barrier_t listen_barrier;
|
|
|
|
isc_barrier_t stop_barrier;
|
2020-10-31 20:42:18 +01:00
|
|
|
|
2020-01-15 14:53:42 +01:00
|
|
|
/*% Parent socket for multithreaded listeners */
|
2020-01-05 01:02:12 -08:00
|
|
|
isc_nmsocket_t *parent;
|
2019-11-22 13:19:45 +01:00
|
|
|
|
2021-01-25 17:44:39 +02:00
|
|
|
/*% TLS stuff */
|
|
|
|
struct tlsstream {
|
|
|
|
bool server;
|
2021-03-10 14:30:16 +02:00
|
|
|
BIO *bio_in;
|
|
|
|
BIO *bio_out;
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
isc_tls_t *tls;
|
|
|
|
isc_tlsctx_t *ctx;
|
2022-03-29 20:42:16 +03:00
|
|
|
isc_tlsctx_t **listener_tls_ctx; /*%< A context reference per
|
|
|
|
worker */
|
2022-03-31 13:47:48 +03:00
|
|
|
size_t n_listener_tls_ctx;
|
2022-04-22 15:59:11 +03:00
|
|
|
isc_tlsctx_client_session_cache_t *client_sess_cache;
|
|
|
|
bool client_session_saved;
|
2021-01-25 17:44:39 +02:00
|
|
|
isc_nmsocket_t *tlslistener;
|
2022-03-22 20:24:46 +02:00
|
|
|
isc_nmsocket_t *tlssocket;
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
atomic_bool result_updated;
|
2021-01-25 17:44:39 +02:00
|
|
|
enum {
|
|
|
|
TLS_INIT,
|
|
|
|
TLS_HANDSHAKE,
|
|
|
|
TLS_IO,
|
|
|
|
TLS_CLOSED
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
} state; /*%< The order of these is significant */
|
2021-01-25 17:44:39 +02:00
|
|
|
size_t nsending;
|
2022-08-25 22:37:26 +03:00
|
|
|
bool tcp_nodelay_value;
|
2022-12-05 20:19:03 +02:00
|
|
|
isc_nmsocket_tls_send_req_t *send_req; /*%< Send req to reuse */
|
Fix the streaming read callback shutdown logic
When shutting down TCP sockets, the read callback calling logic was
flawed, it would call either one less callback or one extra. Fix the
logic in the way:
1. When isc_nm_read() has been called but isc_nm_read_stop() hasn't on
the handle, the read callback will be called with ISC_R_CANCELED to
cancel active reading from the socket/handle.
2. When isc_nm_read() has been called and isc_nm_read_stop() has been
called on the on the handle, the read callback will be called with
ISC_R_SHUTTINGDOWN to signal that the dormant (not-reading) socket
is being shut down.
3. The .reading and .recv_read flags are little bit tricky. The
.reading flag indicates if the outer layer is reading the data (that
would be uv_tcp_t for TCP and isc_nmsocket_t (TCP) for TLSStream),
the .recv_read flag indicates whether somebody is interested in the
data read from the socket.
Usually, you would expect that the .reading should be false when
.recv_read is false, but it gets even more tricky with TLSStream as
the TLS protocol might need to read from the socket even when sending
data.
Fix the usage of the .recv_read and .reading flags in the TLSStream
to their true meaning - which mostly consist of using .recv_read
everywhere and then wrapping isc_nm_read() and isc_nm_read_stop()
with the .reading flag.
4. The TLS failed read helper has been modified to resemble the TCP code
as much as possible, clearing and re-setting the .recv_read flag in
the TCP timeout code has been fixed and .recv_read is now cleared
when isc_nm_read_stop() has been called on the streaming socket.
5. The use of Network Manager in the named_controlconf, isccc_ccmsg, and
isc_httpd units have been greatly simplified due to the improved design.
6. More unit tests for TCP and TLS testing the shutdown conditions have
been added.
Co-authored-by: Ondřej Surý <ondrej@isc.org>
Co-authored-by: Artem Boldariev <artem@isc.org>
2023-04-13 17:27:50 +02:00
|
|
|
bool reading;
|
2021-01-25 17:44:39 +02:00
|
|
|
} tlsstream;
|
|
|
|
|
2022-10-18 15:36:00 +03:00
|
|
|
#if HAVE_LIBNGHTTP2
|
2023-09-12 09:32:30 +02:00
|
|
|
isc_nmsocket_h2_t *h2;
|
2021-07-16 14:57:08 +03:00
|
|
|
#endif /* HAVE_LIBNGHTTP2 */
|
2022-06-20 20:30:12 +03:00
|
|
|
|
|
|
|
struct {
|
|
|
|
isc_dnsstream_assembler_t *input;
|
|
|
|
bool reading;
|
|
|
|
isc_nmsocket_t *listener;
|
|
|
|
isc_nmsocket_t *sock;
|
|
|
|
size_t nsending;
|
|
|
|
void *send_req;
|
|
|
|
bool dot_alpn_negotiated;
|
|
|
|
const char *tls_verify_error;
|
|
|
|
} streamdns;
|
2023-03-16 12:50:04 +02:00
|
|
|
|
|
|
|
struct {
|
|
|
|
isc_nmsocket_t *sock;
|
|
|
|
bool reading;
|
|
|
|
size_t nsending;
|
|
|
|
void *send_req;
|
|
|
|
union {
|
|
|
|
isc_proxy2_handler_t *handler; /* server */
|
|
|
|
isc_buffer_t *outbuf; /* client */
|
|
|
|
} proxy2;
|
|
|
|
bool header_processed;
|
|
|
|
bool extra_processed; /* data arrived past header processed */
|
2023-07-12 15:25:38 +03:00
|
|
|
isc_nmsocket_t **udp_server_socks; /* UDP sockets */
|
|
|
|
size_t udp_server_socks_num;
|
2023-03-16 12:50:04 +02:00
|
|
|
} proxy;
|
|
|
|
|
2019-12-03 00:07:59 -08:00
|
|
|
/*%
|
2023-04-11 07:54:58 +02:00
|
|
|
* pquota is a non-attached pointer to the TCP client quota, stored in
|
|
|
|
* listening sockets.
|
2019-11-22 13:19:45 +01:00
|
|
|
*/
|
2020-01-05 01:02:12 -08:00
|
|
|
isc_quota_t *pquota;
|
2023-04-11 07:54:58 +02:00
|
|
|
isc_job_t quotacb;
|
2020-01-05 01:02:12 -08:00
|
|
|
|
|
|
|
/*%
|
|
|
|
* Socket statistics
|
|
|
|
*/
|
|
|
|
const isc_statscounter_t *statsindex;
|
2019-11-22 13:19:45 +01:00
|
|
|
|
2019-12-03 00:07:59 -08:00
|
|
|
/*%
|
2020-09-05 11:07:40 -07:00
|
|
|
* TCP read/connect timeout timers.
|
2019-11-22 13:19:45 +01:00
|
|
|
*/
|
2022-02-09 10:59:08 +01:00
|
|
|
uv_timer_t read_timer;
|
2020-01-05 01:02:12 -08:00
|
|
|
uint64_t read_timeout;
|
2020-09-05 11:07:40 -07:00
|
|
|
uint64_t connect_timeout;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2022-02-09 11:21:04 +01:00
|
|
|
/*%
|
|
|
|
* TCP write timeout timer.
|
|
|
|
*/
|
|
|
|
uint64_t write_timeout;
|
|
|
|
|
2024-06-11 17:20:22 +03:00
|
|
|
/*
|
|
|
|
* Reading was throttled over TCP as the peer does not read the
|
|
|
|
* data we are sending back.
|
|
|
|
*/
|
|
|
|
bool reading_throttled;
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
/*% outer socket is for 'wrapped' sockets - e.g. tcpdns in tcp */
|
2020-01-05 01:02:12 -08:00
|
|
|
isc_nmsocket_t *outer;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
/*% server socket for connections */
|
2020-01-05 01:02:12 -08:00
|
|
|
isc_nmsocket_t *server;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2023-03-24 15:32:02 +01:00
|
|
|
/*% client socket for connections */
|
|
|
|
isc_nmsocket_t *listener;
|
|
|
|
|
2019-12-03 00:07:59 -08:00
|
|
|
/*% Child sockets for multi-socket setups */
|
2020-01-05 01:02:12 -08:00
|
|
|
isc_nmsocket_t *children;
|
2020-12-03 17:58:10 +01:00
|
|
|
uint_fast32_t nchildren;
|
2021-05-26 08:15:34 +02:00
|
|
|
isc_sockaddr_t iface;
|
2020-06-10 11:32:39 +02:00
|
|
|
isc_nmhandle_t *statichandle;
|
2020-06-04 23:13:54 -07:00
|
|
|
isc_nmhandle_t *outerhandle;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2019-11-22 13:19:45 +01:00
|
|
|
/*% TCP backlog */
|
2020-01-05 01:02:12 -08:00
|
|
|
int backlog;
|
2019-11-22 13:19:45 +01:00
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
/*% libuv data */
|
2020-01-05 01:02:12 -08:00
|
|
|
uv_os_sock_t fd;
|
|
|
|
union uv_any_handle uv_handle;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2019-12-03 00:07:59 -08:00
|
|
|
/*% Peer address */
|
2020-01-05 01:02:12 -08:00
|
|
|
isc_sockaddr_t peer;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
/*%
|
2019-12-03 00:07:59 -08:00
|
|
|
* Socket is active if it's listening, working, etc. If it's
|
|
|
|
* closing, then it doesn't make a sense, for example, to
|
|
|
|
* push handles or reqs for reuse.
|
2019-11-05 13:55:54 -08:00
|
|
|
*/
|
2023-03-28 17:03:56 +02:00
|
|
|
bool active;
|
2023-03-24 13:37:19 +01:00
|
|
|
bool destroying;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2021-10-02 14:52:46 -07:00
|
|
|
bool route_sock;
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
/*%
|
|
|
|
* Socket is closed if it's not active and all the possible
|
|
|
|
* callbacks were fired, there are no active handles, etc.
|
2019-12-03 00:07:59 -08:00
|
|
|
* If active==false but closed==false, that means the socket
|
|
|
|
* is closing.
|
2019-11-05 13:55:54 -08:00
|
|
|
*/
|
2023-03-24 13:37:19 +01:00
|
|
|
bool closing;
|
|
|
|
bool closed;
|
|
|
|
bool connecting;
|
|
|
|
bool connected;
|
|
|
|
bool accepting;
|
2022-08-29 10:55:10 +02:00
|
|
|
bool reading;
|
2023-03-24 13:37:19 +01:00
|
|
|
bool timedout;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2024-07-02 16:16:59 +03:00
|
|
|
/*%
|
|
|
|
* A timestamp of when the connection acceptance was delayed due
|
|
|
|
* to quota.
|
|
|
|
*/
|
|
|
|
isc_nanosecs_t quota_accept_ts;
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
/*%
|
2020-06-10 11:32:39 +02:00
|
|
|
* Established an outgoing connection, as client not server.
|
|
|
|
*/
|
2023-03-24 13:37:19 +01:00
|
|
|
bool client;
|
2020-06-10 11:32:39 +02:00
|
|
|
|
2019-11-08 10:52:49 -08:00
|
|
|
/*%
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
* The socket is processing read callback, this is guard to not read
|
|
|
|
* data before the readcb is back.
|
2019-11-08 10:52:49 -08:00
|
|
|
*/
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
bool processing;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2019-11-20 22:33:35 +01:00
|
|
|
/*%
|
|
|
|
* A TCP or TCPDNS socket has been set to use the keepalive
|
|
|
|
* timeout instead of the default idle timeout.
|
|
|
|
*/
|
2023-03-24 13:37:19 +01:00
|
|
|
bool keepalive;
|
2019-11-20 22:33:35 +01:00
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
/*%
|
2023-03-24 12:11:44 +01:00
|
|
|
* 'spare' handles for that can be reused to avoid allocations, for UDP.
|
2019-11-05 13:55:54 -08:00
|
|
|
*/
|
2023-01-04 15:57:00 +01:00
|
|
|
ISC_LIST(isc_nmhandle_t) inactive_handles;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2023-08-16 16:30:53 +02:00
|
|
|
size_t inactive_handles_cur;
|
|
|
|
size_t inactive_handles_max;
|
|
|
|
|
2023-03-24 12:11:44 +01:00
|
|
|
/*%
|
|
|
|
* 'active' handles and uvreqs, mostly for debugging purposes.
|
|
|
|
*/
|
|
|
|
ISC_LIST(isc_nmhandle_t) active_handles;
|
|
|
|
ISC_LIST(isc__nm_uvreq_t) active_uvreqs;
|
|
|
|
|
2024-06-04 09:12:45 +02:00
|
|
|
size_t active_handles_cur;
|
|
|
|
size_t active_handles_max;
|
|
|
|
|
2019-12-03 00:07:59 -08:00
|
|
|
/*%
|
2020-09-05 11:07:40 -07:00
|
|
|
* Used to pass a result back from listen or connect events.
|
2019-12-03 00:07:59 -08:00
|
|
|
*/
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc_result_t result;
|
2019-12-02 13:54:44 +01:00
|
|
|
|
2019-12-03 00:07:59 -08:00
|
|
|
/*%
|
2019-11-08 10:52:49 -08:00
|
|
|
* This function will be called with handle->sock
|
|
|
|
* as the argument whenever a handle's references drop
|
|
|
|
* to zero, after its reset callback has been called.
|
|
|
|
*/
|
2021-03-16 09:03:02 +01:00
|
|
|
isc_nm_closehandlecb_t closehandle_cb;
|
2019-11-08 10:52:49 -08:00
|
|
|
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc_nmhandle_t *recv_handle;
|
2020-09-11 10:53:31 +02:00
|
|
|
isc_nm_recv_cb_t recv_cb;
|
|
|
|
void *recv_cbarg;
|
2019-11-25 18:36:14 -03:00
|
|
|
|
2020-09-05 11:07:40 -07:00
|
|
|
isc_nm_cb_t connect_cb;
|
|
|
|
void *connect_cbarg;
|
|
|
|
|
2020-09-11 10:53:31 +02:00
|
|
|
isc_nm_accept_cb_t accept_cb;
|
2020-01-05 01:02:12 -08:00
|
|
|
void *accept_cbarg;
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
|
2023-01-07 16:30:21 -08:00
|
|
|
bool barriers_initialised;
|
2022-10-18 15:21:10 +03:00
|
|
|
bool manual_read_timer;
|
2023-01-03 08:27:54 +01:00
|
|
|
#if ISC_NETMGR_TRACE
|
2020-09-02 17:57:44 +02:00
|
|
|
void *backtrace[TRACE_SIZE];
|
|
|
|
int backtrace_size;
|
2023-01-03 08:27:54 +01:00
|
|
|
#endif
|
2020-09-02 17:57:44 +02:00
|
|
|
LINK(isc_nmsocket_t) active_link;
|
2023-04-10 07:16:45 +02:00
|
|
|
|
|
|
|
isc_job_t job;
|
2019-11-05 13:55:54 -08:00
|
|
|
};
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_free_uvbuf(isc_nmsocket_t *sock, const uv_buf_t *buf);
|
|
|
|
/*%<
|
|
|
|
* Free a buffer allocated for a receive operation.
|
|
|
|
*
|
|
|
|
* Note that as currently implemented, this doesn't actually
|
|
|
|
* free anything, marks the isc__networker's UDP receive buffer
|
|
|
|
* as "not in use".
|
|
|
|
*/
|
|
|
|
|
|
|
|
isc_nmhandle_t *
|
2023-01-03 08:27:54 +01:00
|
|
|
isc___nmhandle_get(isc_nmsocket_t *sock, isc_sockaddr_t const *peer,
|
|
|
|
isc_sockaddr_t const *local FLARG);
|
2019-11-05 13:55:54 -08:00
|
|
|
/*%<
|
|
|
|
* Get a handle for the socket 'sock', allocating a new one
|
|
|
|
* if there isn't one available in 'sock->inactivehandles'.
|
|
|
|
*
|
|
|
|
* If 'peer' is not NULL, set the handle's peer address to 'peer',
|
|
|
|
* otherwise set it to 'sock->peer'.
|
|
|
|
*
|
|
|
|
* If 'local' is not NULL, set the handle's local address to 'local',
|
|
|
|
* otherwise set it to 'sock->iface->addr'.
|
2020-06-04 23:13:54 -07:00
|
|
|
*
|
|
|
|
* 'sock' will be attached to 'handle->sock'. The caller may need
|
|
|
|
* to detach the socket afterward.
|
2019-11-05 13:55:54 -08:00
|
|
|
*/
|
|
|
|
|
|
|
|
isc__nm_uvreq_t *
|
2023-03-24 12:11:44 +01:00
|
|
|
isc___nm_uvreq_get(isc_nmsocket_t *sock FLARG);
|
2019-11-05 13:55:54 -08:00
|
|
|
/*%<
|
|
|
|
* Get a UV request structure for the socket 'sock', allocating a
|
|
|
|
* new one if there isn't one available in 'sock->inactivereqs'.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
2023-03-24 12:11:44 +01:00
|
|
|
isc___nm_uvreq_put(isc__nm_uvreq_t **req FLARG);
|
2019-11-05 13:55:54 -08:00
|
|
|
/*%<
|
|
|
|
* Completes the use of a UV request structure, setting '*req' to NULL.
|
|
|
|
*
|
|
|
|
* The UV request is pushed onto the 'sock->inactivereqs' stack or,
|
|
|
|
* if that doesn't work, freed.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
2022-07-26 13:03:45 +02:00
|
|
|
isc___nmsocket_init(isc_nmsocket_t *sock, isc__networker_t *worker,
|
2023-01-03 08:27:54 +01:00
|
|
|
isc_nmsocket_type type, isc_sockaddr_t *iface,
|
|
|
|
isc_nmsocket_t *parent FLARG);
|
2019-11-05 13:55:54 -08:00
|
|
|
/*%<
|
2020-01-05 01:02:12 -08:00
|
|
|
* Initialize socket 'sock', attach it to 'mgr', and set it to type 'type'
|
|
|
|
* and its interface to 'iface'.
|
2019-11-05 13:55:54 -08:00
|
|
|
*/
|
|
|
|
|
2020-06-04 14:54:36 -07:00
|
|
|
void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc___nmsocket_attach(isc_nmsocket_t *sock, isc_nmsocket_t **target FLARG);
|
2020-06-04 14:54:36 -07:00
|
|
|
/*%<
|
|
|
|
* Attach to a socket, increasing refcount
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc___nmsocket_detach(isc_nmsocket_t **socketp FLARG);
|
2020-06-04 14:54:36 -07:00
|
|
|
/*%<
|
|
|
|
* Detach from socket, decreasing refcount and possibly destroying the
|
|
|
|
* socket if it's no longer referenced.
|
|
|
|
*/
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc___nmsocket_prep_destroy(isc_nmsocket_t *sock FLARG);
|
2019-11-05 13:55:54 -08:00
|
|
|
/*%<
|
|
|
|
* Market 'sock' as inactive, close it if necessary, and destroy it
|
|
|
|
* if there are no remaining references or active handles.
|
|
|
|
*/
|
|
|
|
|
2021-03-31 11:48:41 +02:00
|
|
|
void
|
|
|
|
isc__nmsocket_shutdown(isc_nmsocket_t *sock);
|
|
|
|
/*%<
|
|
|
|
* Initiate the socket shutdown which actively calls the active
|
|
|
|
* callbacks.
|
|
|
|
*/
|
|
|
|
|
2022-02-15 14:41:15 +01:00
|
|
|
void
|
|
|
|
isc__nmsocket_reset(isc_nmsocket_t *sock);
|
|
|
|
/*%<
|
|
|
|
* Reset and close the socket.
|
|
|
|
*/
|
|
|
|
|
2020-01-16 11:52:58 +01:00
|
|
|
bool
|
|
|
|
isc__nmsocket_active(isc_nmsocket_t *sock);
|
|
|
|
/*%<
|
2020-01-16 12:13:28 +01:00
|
|
|
* Determine whether 'sock' is active by checking 'sock->active'
|
|
|
|
* or, for child sockets, 'sock->parent->active'.
|
2020-01-16 11:52:58 +01:00
|
|
|
*/
|
|
|
|
|
2020-06-20 15:03:05 -07:00
|
|
|
void
|
|
|
|
isc__nmsocket_clearcb(isc_nmsocket_t *sock);
|
|
|
|
/*%<
|
|
|
|
* Clear the recv and accept callbacks in 'sock'.
|
|
|
|
*/
|
|
|
|
|
2021-03-16 09:03:02 +01:00
|
|
|
void
|
|
|
|
isc__nmsocket_timer_stop(isc_nmsocket_t *sock);
|
|
|
|
void
|
|
|
|
isc__nmsocket_timer_start(isc_nmsocket_t *sock);
|
|
|
|
void
|
|
|
|
isc__nmsocket_timer_restart(isc_nmsocket_t *sock);
|
2021-03-29 10:52:05 +02:00
|
|
|
bool
|
|
|
|
isc__nmsocket_timer_running(isc_nmsocket_t *sock);
|
2021-03-16 09:03:02 +01:00
|
|
|
/*%<
|
2021-03-29 10:52:05 +02:00
|
|
|
* Start/stop/restart/check the timeout on the socket
|
2021-03-16 09:03:02 +01:00
|
|
|
*/
|
|
|
|
|
2020-11-11 10:46:33 +01:00
|
|
|
void
|
|
|
|
isc__nm_connectcb(isc_nmsocket_t *sock, isc__nm_uvreq_t *uvreq,
|
2021-03-30 09:25:09 +02:00
|
|
|
isc_result_t eresult, bool async);
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
|
2020-11-11 10:46:33 +01:00
|
|
|
void
|
|
|
|
isc__nm_readcb(isc_nmsocket_t *sock, isc__nm_uvreq_t *uvreq,
|
2022-11-23 14:03:23 +01:00
|
|
|
isc_result_t eresult, bool async);
|
2020-11-11 10:46:33 +01:00
|
|
|
/*%<
|
|
|
|
* Issue a read callback on the socket, used to call the callback
|
|
|
|
* on failed conditions when the event can't be scheduled on the uv loop.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_sendcb(isc_nmsocket_t *sock, isc__nm_uvreq_t *uvreq,
|
2021-03-16 09:03:02 +01:00
|
|
|
isc_result_t eresult, bool async);
|
2020-11-11 10:46:33 +01:00
|
|
|
/*%<
|
|
|
|
* Issue a write callback on the socket, used to call the callback
|
|
|
|
* on failed conditions when the event can't be scheduled on the uv loop.
|
|
|
|
*/
|
|
|
|
|
2020-10-21 12:52:09 +02:00
|
|
|
void
|
2020-10-31 20:42:18 +01:00
|
|
|
isc__nm_udp_send(isc_nmhandle_t *handle, const isc_region_t *region,
|
|
|
|
isc_nm_cb_t cb, void *cbarg);
|
2019-11-05 13:55:54 -08:00
|
|
|
/*%<
|
|
|
|
* Back-end implementation of isc_nm_send() for UDP handles.
|
|
|
|
*/
|
|
|
|
|
2020-10-27 20:00:08 +01:00
|
|
|
void
|
2020-09-05 11:07:40 -07:00
|
|
|
isc__nm_udp_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg);
|
|
|
|
/*
|
|
|
|
* Back-end implementation of isc_nm_read() for UDP handles.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_udp_close(isc_nmsocket_t *sock);
|
|
|
|
/*%<
|
|
|
|
* Close a UDP socket.
|
|
|
|
*/
|
|
|
|
|
2020-10-26 12:30:54 +01:00
|
|
|
void
|
|
|
|
isc__nm_udp_shutdown(isc_nmsocket_t *sock);
|
|
|
|
/*%<
|
|
|
|
* Called during the shutdown process to close and clean up connected
|
|
|
|
* sockets.
|
|
|
|
*/
|
|
|
|
|
2020-03-20 11:55:10 +01:00
|
|
|
void
|
|
|
|
isc__nm_udp_stoplistening(isc_nmsocket_t *sock);
|
2020-11-02 19:58:05 -08:00
|
|
|
/*%<
|
|
|
|
* Stop listening on 'sock'.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_udp_settimeout(isc_nmhandle_t *handle, uint32_t timeout);
|
|
|
|
/*%<
|
2021-03-16 09:03:02 +01:00
|
|
|
* Set or clear the recv timeout for the UDP socket associated with 'handle'.
|
2020-11-02 19:58:05 -08:00
|
|
|
*/
|
2020-03-20 11:55:10 +01:00
|
|
|
|
2020-10-21 12:52:09 +02:00
|
|
|
void
|
2020-10-31 20:42:18 +01:00
|
|
|
isc__nm_tcp_send(isc_nmhandle_t *handle, const isc_region_t *region,
|
|
|
|
isc_nm_cb_t cb, void *cbarg);
|
2019-11-05 13:55:54 -08:00
|
|
|
/*%<
|
|
|
|
* Back-end implementation of isc_nm_send() for TCP handles.
|
|
|
|
*/
|
|
|
|
|
2020-10-21 12:52:09 +02:00
|
|
|
void
|
2020-03-20 11:55:10 +01:00
|
|
|
isc__nm_tcp_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg);
|
2020-06-10 11:32:39 +02:00
|
|
|
/*
|
2022-08-29 10:55:10 +02:00
|
|
|
* Start reading on this handle.
|
2020-06-10 11:32:39 +02:00
|
|
|
*/
|
2020-03-20 11:55:10 +01:00
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
void
|
|
|
|
isc__nm_tcp_close(isc_nmsocket_t *sock);
|
|
|
|
/*%<
|
|
|
|
* Close a TCP socket.
|
|
|
|
*/
|
2020-10-22 10:07:56 +02:00
|
|
|
void
|
2022-08-29 10:55:10 +02:00
|
|
|
isc__nm_tcp_read_stop(isc_nmhandle_t *handle);
|
2020-03-20 11:55:10 +01:00
|
|
|
/*%<
|
2022-08-29 10:55:10 +02:00
|
|
|
* Stop reading on this handle.
|
2020-03-20 11:55:10 +01:00
|
|
|
*/
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2019-11-22 14:13:19 +01:00
|
|
|
void
|
|
|
|
isc__nm_tcp_shutdown(isc_nmsocket_t *sock);
|
|
|
|
/*%<
|
2020-06-05 17:32:36 -07:00
|
|
|
* Called during the shutdown process to close and clean up connected
|
|
|
|
* sockets.
|
|
|
|
*/
|
|
|
|
|
2020-03-20 11:55:10 +01:00
|
|
|
void
|
|
|
|
isc__nm_tcp_stoplistening(isc_nmsocket_t *sock);
|
2020-11-02 19:58:05 -08:00
|
|
|
/*%<
|
|
|
|
* Stop listening on 'sock'.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_tcp_settimeout(isc_nmhandle_t *handle, uint32_t timeout);
|
|
|
|
/*%<
|
|
|
|
* Set the read timeout for the TCP socket associated with 'handle'.
|
|
|
|
*/
|
2020-03-20 11:55:10 +01:00
|
|
|
|
2022-10-18 15:21:10 +03:00
|
|
|
void
|
|
|
|
isc__nmhandle_tcp_set_manual_timer(isc_nmhandle_t *handle, const bool manual);
|
|
|
|
|
2022-12-07 13:33:52 +02:00
|
|
|
void
|
|
|
|
isc__nm_tcp_senddns(isc_nmhandle_t *handle, const isc_region_t *region,
|
|
|
|
isc_nm_cb_t cb, void *cbarg);
|
|
|
|
/*%<
|
|
|
|
* The same as 'isc__nm_tcp_send()', but with data length sent
|
|
|
|
* ahead of data (two bytes (16 bit) in big-endian format).
|
|
|
|
*/
|
|
|
|
|
2021-04-21 13:52:15 +02:00
|
|
|
void
|
|
|
|
isc__nm_tls_send(isc_nmhandle_t *handle, const isc_region_t *region,
|
|
|
|
isc_nm_cb_t cb, void *cbarg);
|
|
|
|
|
|
|
|
/*%<
|
|
|
|
* Back-end implementation of isc_nm_send() for TLSDNS handles.
|
|
|
|
*/
|
|
|
|
|
2022-12-07 14:18:33 +02:00
|
|
|
void
|
|
|
|
isc__nm_tls_senddns(isc_nmhandle_t *handle, const isc_region_t *region,
|
|
|
|
isc_nm_cb_t cb, void *cbarg);
|
|
|
|
/*%<
|
|
|
|
* The same as 'isc__nm_tls_send()', but with data length sent
|
|
|
|
* ahead of data (two bytes (16 bit) in big-endian format).
|
|
|
|
*/
|
|
|
|
|
2021-01-25 17:44:39 +02:00
|
|
|
void
|
|
|
|
isc__nm_tls_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg);
|
|
|
|
/*%<
|
2022-08-29 10:55:10 +02:00
|
|
|
* Start reading on the TLS handle.
|
2021-01-25 17:44:39 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
2022-08-29 10:55:10 +02:00
|
|
|
isc__nm_tls_close(isc_nmsocket_t *sock);
|
2021-01-25 17:44:39 +02:00
|
|
|
/*%<
|
2022-08-29 10:55:10 +02:00
|
|
|
* Close a TLS socket.
|
2021-01-25 17:44:39 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
2022-08-29 10:55:10 +02:00
|
|
|
isc__nm_tls_read_stop(isc_nmhandle_t *handle);
|
2021-01-25 17:44:39 +02:00
|
|
|
/*%<
|
2022-08-29 10:55:10 +02:00
|
|
|
* Stop reading on the TLS handle.
|
2021-01-25 17:44:39 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_tls_cleanup_data(isc_nmsocket_t *sock);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_tls_stoplistening(isc_nmsocket_t *sock);
|
|
|
|
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
void
|
|
|
|
isc__nm_tls_settimeout(isc_nmhandle_t *handle, uint32_t timeout);
|
2021-03-16 09:03:02 +01:00
|
|
|
void
|
|
|
|
isc__nm_tls_cleartimeout(isc_nmhandle_t *handle);
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
/*%<
|
|
|
|
* Set the read timeout and reset the timer for the socket
|
|
|
|
* associated with 'handle', and the TCP socket it wraps
|
|
|
|
* around.
|
|
|
|
*/
|
|
|
|
|
2022-07-26 17:07:19 +03:00
|
|
|
void
|
|
|
|
isc__nmsocket_tls_reset(isc_nmsocket_t *sock);
|
|
|
|
|
2022-10-20 15:40:51 +03:00
|
|
|
void
|
|
|
|
isc__nmhandle_tls_set_manual_timer(isc_nmhandle_t *handle, const bool manual);
|
|
|
|
|
2022-01-13 14:35:24 +02:00
|
|
|
const char *
|
|
|
|
isc__nm_tls_verify_tls_peer_result_string(const isc_nmhandle_t *handle);
|
|
|
|
|
2021-07-14 21:12:37 -07:00
|
|
|
void
|
|
|
|
isc__nmhandle_tls_keepalive(isc_nmhandle_t *handle, bool value);
|
|
|
|
/*%<
|
|
|
|
* Set the keepalive value on the underlying TCP handle.
|
|
|
|
*/
|
|
|
|
|
2022-03-31 13:47:48 +03:00
|
|
|
void
|
|
|
|
isc__nm_async_tls_set_tlsctx(isc_nmsocket_t *listener, isc_tlsctx_t *tlsctx,
|
|
|
|
const int tid);
|
|
|
|
|
2022-06-23 20:18:58 +03:00
|
|
|
void
|
|
|
|
isc__nmhandle_tls_setwritetimeout(isc_nmhandle_t *handle,
|
|
|
|
uint64_t write_timeout);
|
|
|
|
|
2022-07-26 17:36:32 +03:00
|
|
|
bool
|
|
|
|
isc__nmsocket_tls_timer_running(isc_nmsocket_t *sock);
|
|
|
|
|
2022-07-27 16:26:55 +03:00
|
|
|
void
|
|
|
|
isc__nmsocket_tls_timer_restart(isc_nmsocket_t *sock);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nmsocket_tls_timer_stop(isc_nmsocket_t *sock);
|
|
|
|
|
2022-08-29 10:55:10 +02:00
|
|
|
void
|
2022-11-23 14:03:23 +01:00
|
|
|
isc__nm_tls_failed_read_cb(isc_nmsocket_t *sock, isc_result_t result,
|
|
|
|
bool async);
|
2022-08-29 10:55:10 +02:00
|
|
|
|
2022-08-03 14:46:33 +03:00
|
|
|
void
|
|
|
|
isc__nmhandle_tls_get_selected_alpn(isc_nmhandle_t *handle,
|
|
|
|
const unsigned char **alpn,
|
|
|
|
unsigned int *alpnlen);
|
|
|
|
|
2022-08-25 22:37:26 +03:00
|
|
|
isc_result_t
|
|
|
|
isc__nmhandle_tls_set_tcp_nodelay(isc_nmhandle_t *handle, const bool value);
|
|
|
|
|
2022-10-18 15:36:00 +03:00
|
|
|
#if HAVE_LIBNGHTTP2
|
|
|
|
|
2020-12-07 14:19:10 +02:00
|
|
|
void
|
|
|
|
isc__nm_http_stoplistening(isc_nmsocket_t *sock);
|
|
|
|
|
|
|
|
void
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
isc__nm_http_settimeout(isc_nmhandle_t *handle, uint32_t timeout);
|
2021-03-16 09:03:02 +01:00
|
|
|
void
|
|
|
|
isc__nm_http_cleartimeout(isc_nmhandle_t *handle);
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
/*%<
|
|
|
|
* Set the read timeout and reset the timer for the socket
|
|
|
|
* associated with 'handle', and the TLS/TCP socket it wraps
|
|
|
|
* around.
|
|
|
|
*/
|
2020-12-07 14:19:10 +02:00
|
|
|
|
2021-07-14 21:12:37 -07:00
|
|
|
void
|
|
|
|
isc__nmhandle_http_keepalive(isc_nmhandle_t *handle, bool value);
|
|
|
|
/*%<
|
|
|
|
* Set the keepalive value on the underlying session handle
|
|
|
|
*/
|
|
|
|
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
void
|
|
|
|
isc__nm_http_cleanup_data(isc_nmsocket_t *sock);
|
|
|
|
|
|
|
|
isc_result_t
|
|
|
|
isc__nm_http_request(isc_nmhandle_t *handle, isc_region_t *region,
|
|
|
|
isc_nm_recv_cb_t reply_cb, void *cbarg);
|
2020-12-07 14:19:10 +02:00
|
|
|
|
2020-10-31 20:42:18 +01:00
|
|
|
void
|
|
|
|
isc__nm_http_send(isc_nmhandle_t *handle, const isc_region_t *region,
|
|
|
|
isc_nm_cb_t cb, void *cbarg);
|
|
|
|
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
void
|
|
|
|
isc__nm_http_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg);
|
|
|
|
|
2020-12-07 14:19:10 +02:00
|
|
|
void
|
|
|
|
isc__nm_http_close(isc_nmsocket_t *sock);
|
|
|
|
|
2021-07-06 16:36:17 +03:00
|
|
|
void
|
|
|
|
isc__nm_http_bad_request(isc_nmhandle_t *handle);
|
|
|
|
/*%<
|
|
|
|
* Respond to the request with 400 "Bad Request" status.
|
|
|
|
*
|
|
|
|
* Requires:
|
|
|
|
* \li 'handle' is a valid HTTP netmgr handle object, referencing a server-side
|
|
|
|
* socket
|
|
|
|
*/
|
|
|
|
|
2021-11-16 13:35:37 +02:00
|
|
|
bool
|
|
|
|
isc__nm_http_has_encryption(const isc_nmhandle_t *handle);
|
|
|
|
|
2021-10-06 14:09:53 +03:00
|
|
|
void
|
|
|
|
isc__nm_http_set_maxage(isc_nmhandle_t *handle, const uint32_t ttl);
|
|
|
|
|
2022-01-13 14:35:24 +02:00
|
|
|
const char *
|
|
|
|
isc__nm_http_verify_tls_peer_result_string(const isc_nmhandle_t *handle);
|
|
|
|
|
2020-12-07 14:19:10 +02:00
|
|
|
bool
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
isc__nm_parse_httpquery(const char *query_string, const char **start,
|
|
|
|
size_t *len);
|
2020-12-07 14:19:10 +02:00
|
|
|
|
|
|
|
char *
|
|
|
|
isc__nm_base64url_to_base64(isc_mem_t *mem, const char *base64url,
|
|
|
|
const size_t base64url_len, size_t *res_len);
|
|
|
|
|
|
|
|
char *
|
|
|
|
isc__nm_base64_to_base64url(isc_mem_t *mem, const char *base64,
|
|
|
|
const size_t base64_len, size_t *res_len);
|
|
|
|
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
void
|
|
|
|
isc__nm_httpsession_attach(isc_nm_http_session_t *source,
|
|
|
|
isc_nm_http_session_t **targetp);
|
|
|
|
void
|
|
|
|
isc__nm_httpsession_detach(isc_nm_http_session_t **sessionp);
|
|
|
|
|
2023-05-10 22:01:36 +03:00
|
|
|
isc_nmhandle_t *
|
|
|
|
isc__nm_httpsession_handle(isc_nm_http_session_t *session);
|
|
|
|
|
2022-03-31 13:47:48 +03:00
|
|
|
void
|
|
|
|
isc__nm_http_set_tlsctx(isc_nmsocket_t *sock, isc_tlsctx_t *tlsctx);
|
|
|
|
|
2022-06-22 16:45:28 +03:00
|
|
|
void
|
|
|
|
isc__nm_http_set_max_streams(isc_nmsocket_t *listener,
|
|
|
|
const uint32_t max_concurrent_streams);
|
|
|
|
|
2022-09-07 12:50:08 +02:00
|
|
|
#endif
|
|
|
|
|
2022-06-20 20:30:12 +03:00
|
|
|
void
|
|
|
|
isc__nm_streamdns_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb,
|
|
|
|
void *cbarg);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_streamdns_send(isc_nmhandle_t *handle, const isc_region_t *region,
|
|
|
|
isc_nm_cb_t cb, void *cbarg);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_streamdns_close(isc_nmsocket_t *sock);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_streamdns_stoplistening(isc_nmsocket_t *sock);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_streamdns_cleanup_data(isc_nmsocket_t *sock);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nmhandle_streamdns_cleartimeout(isc_nmhandle_t *handle);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nmhandle_streamdns_settimeout(isc_nmhandle_t *handle, uint32_t timeout);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nmhandle_streamdns_keepalive(isc_nmhandle_t *handle, bool value);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nmhandle_streamdns_setwritetimeout(isc_nmhandle_t *handle,
|
|
|
|
uint32_t timeout);
|
|
|
|
|
|
|
|
bool
|
|
|
|
isc__nm_streamdns_has_encryption(const isc_nmhandle_t *handle);
|
|
|
|
|
|
|
|
const char *
|
|
|
|
isc__nm_streamdns_verify_tls_peer_result_string(const isc_nmhandle_t *handle);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_streamdns_set_tlsctx(isc_nmsocket_t *listener, isc_tlsctx_t *tlsctx);
|
|
|
|
|
2023-01-18 10:36:34 +00:00
|
|
|
isc_result_t
|
|
|
|
isc__nm_streamdns_xfr_checkperm(isc_nmsocket_t *sock);
|
2022-06-20 20:30:12 +03:00
|
|
|
|
|
|
|
void
|
|
|
|
isc__nmsocket_streamdns_reset(isc_nmsocket_t *sock);
|
|
|
|
|
|
|
|
bool
|
|
|
|
isc__nmsocket_streamdns_timer_running(isc_nmsocket_t *sock);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nmsocket_streamdns_timer_stop(isc_nmsocket_t *sock);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nmsocket_streamdns_timer_restart(isc_nmsocket_t *sock);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_streamdns_failed_read_cb(isc_nmsocket_t *sock, isc_result_t result,
|
|
|
|
bool async);
|
|
|
|
|
2023-03-16 12:50:04 +02:00
|
|
|
bool
|
|
|
|
isc__nm_valid_proxy_addresses(const isc_sockaddr_t *src,
|
|
|
|
const isc_sockaddr_t *dst);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_proxystream_failed_read_cb(isc_nmsocket_t *sock, isc_result_t result,
|
|
|
|
bool async);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_proxystream_stoplistening(isc_nmsocket_t *sock);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_proxystream_cleanup_data(isc_nmsocket_t *sock);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nmhandle_proxystream_cleartimeout(isc_nmhandle_t *handle);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nmhandle_proxystream_settimeout(isc_nmhandle_t *handle, uint32_t timeout);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nmhandle_proxystream_keepalive(isc_nmhandle_t *handle, bool value);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nmhandle_proxystream_setwritetimeout(isc_nmhandle_t *handle,
|
|
|
|
uint64_t write_timeout);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nmsocket_proxystream_reset(isc_nmsocket_t *sock);
|
|
|
|
|
|
|
|
bool
|
|
|
|
isc__nmsocket_proxystream_timer_running(isc_nmsocket_t *sock);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nmsocket_proxystream_timer_restart(isc_nmsocket_t *sock);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nmsocket_proxystream_timer_stop(isc_nmsocket_t *sock);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nmhandle_proxystream_set_manual_timer(isc_nmhandle_t *handle,
|
|
|
|
const bool manual);
|
|
|
|
|
|
|
|
isc_result_t
|
|
|
|
isc__nmhandle_proxystream_set_tcp_nodelay(isc_nmhandle_t *handle,
|
|
|
|
const bool value);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_proxystream_read_stop(isc_nmhandle_t *handle);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_proxystream_close(isc_nmsocket_t *sock);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_proxystream_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb,
|
|
|
|
void *cbarg);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_proxystream_send(isc_nmhandle_t *handle, isc_region_t *region,
|
|
|
|
isc_nm_cb_t cb, void *cbarg);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_proxystream_senddns(isc_nmhandle_t *handle, isc_region_t *region,
|
|
|
|
isc_nm_cb_t cb, void *cbarg);
|
|
|
|
|
2020-01-05 01:02:12 -08:00
|
|
|
void
|
2023-10-17 20:36:58 +03:00
|
|
|
isc__nm_proxystream_set_tlsctx(isc_nmsocket_t *listener, isc_tlsctx_t *tlsctx);
|
|
|
|
|
|
|
|
bool
|
|
|
|
isc__nm_proxystream_has_encryption(const isc_nmhandle_t *handle);
|
|
|
|
|
|
|
|
const char *
|
|
|
|
isc__nm_proxystream_verify_tls_peer_result_string(const isc_nmhandle_t *handle);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nmhandle_proxystream_get_selected_alpn(isc_nmhandle_t *handle,
|
|
|
|
const unsigned char **alpn,
|
|
|
|
unsigned int *alpnlen);
|
|
|
|
|
|
|
|
void
|
2023-07-12 15:25:38 +03:00
|
|
|
isc__nm_proxyudp_failed_read_cb(isc_nmsocket_t *sock, const isc_result_t result,
|
|
|
|
const bool async);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_proxyudp_stoplistening(isc_nmsocket_t *listener);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_proxyudp_cleanup_data(isc_nmsocket_t *sock);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nmhandle_proxyudp_cleartimeout(isc_nmhandle_t *handle);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nmhandle_proxyudp_settimeout(isc_nmhandle_t *handle, uint32_t timeout);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nmhandle_proxyudp_setwritetimeout(isc_nmhandle_t *handle,
|
|
|
|
uint64_t write_timeout);
|
|
|
|
|
|
|
|
bool
|
|
|
|
isc__nmsocket_proxyudp_timer_running(isc_nmsocket_t *sock);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nmsocket_proxyudp_timer_restart(isc_nmsocket_t *sock);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nmsocket_proxyudp_timer_stop(isc_nmsocket_t *sock);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_proxyudp_close(isc_nmsocket_t *sock);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_proxyudp_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_proxyudp_send(isc_nmhandle_t *handle, isc_region_t *region,
|
|
|
|
isc_nm_cb_t cb, void *cbarg);
|
|
|
|
|
|
|
|
void
|
2021-10-02 16:26:43 -07:00
|
|
|
isc__nm_incstats(isc_nmsocket_t *sock, isc__nm_statid_t id);
|
2020-01-05 01:02:12 -08:00
|
|
|
/*%<
|
|
|
|
* Increment socket-related statistics counters.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
2021-10-02 16:26:43 -07:00
|
|
|
isc__nm_decstats(isc_nmsocket_t *sock, isc__nm_statid_t id);
|
2020-01-05 01:02:12 -08:00
|
|
|
/*%<
|
|
|
|
* Decrement socket-related statistics counters.
|
|
|
|
*/
|
2020-07-21 13:29:14 +02:00
|
|
|
|
2020-11-07 20:48:37 +01:00
|
|
|
isc_result_t
|
|
|
|
isc__nm_socket(int domain, int type, int protocol, uv_os_sock_t *sockp);
|
|
|
|
/*%<
|
|
|
|
* Platform independent socket() version
|
|
|
|
*/
|
|
|
|
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
void
|
|
|
|
isc__nm_closesocket(uv_os_sock_t sock);
|
|
|
|
/*%<
|
|
|
|
* Platform independent closesocket() version
|
|
|
|
*/
|
|
|
|
|
2020-10-05 10:40:02 +02:00
|
|
|
isc_result_t
|
2020-10-05 13:14:04 +02:00
|
|
|
isc__nm_socket_reuse(uv_os_sock_t fd);
|
2020-10-05 10:40:02 +02:00
|
|
|
/*%<
|
2020-10-05 13:14:04 +02:00
|
|
|
* Set the SO_REUSEADDR or SO_REUSEPORT (or equivalent) socket option on the fd
|
|
|
|
*/
|
|
|
|
|
|
|
|
isc_result_t
|
|
|
|
isc__nm_socket_reuse_lb(uv_os_sock_t fd);
|
|
|
|
/*%<
|
|
|
|
* Set the SO_REUSEPORT_LB (or equivalent) socket option on the fd
|
2020-10-05 10:40:02 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
isc_result_t
|
2020-10-05 12:25:19 +02:00
|
|
|
isc__nm_socket_incoming_cpu(uv_os_sock_t fd);
|
2020-10-05 10:40:02 +02:00
|
|
|
/*%<
|
|
|
|
* Set the SO_INCOMING_CPU socket option on the fd if available
|
|
|
|
*/
|
2020-10-05 10:51:40 +02:00
|
|
|
|
|
|
|
isc_result_t
|
2021-07-28 15:55:46 +02:00
|
|
|
isc__nm_socket_disable_pmtud(uv_os_sock_t fd, sa_family_t sa_family);
|
2020-10-05 10:51:40 +02:00
|
|
|
/*%<
|
2021-07-28 15:55:46 +02:00
|
|
|
* Disable the Path MTU Discovery, either by disabling IP(V6)_DONTFRAG socket
|
|
|
|
* option, or setting the IP(V6)_MTU_DISCOVER socket option to IP_PMTUDISC_OMIT
|
2020-10-05 10:51:40 +02:00
|
|
|
*/
|
2020-05-13 17:37:51 +02:00
|
|
|
|
2022-01-13 13:24:55 +01:00
|
|
|
isc_result_t
|
|
|
|
isc__nm_socket_v6only(uv_os_sock_t fd, sa_family_t sa_family);
|
|
|
|
/*%<
|
|
|
|
* Restrict the socket to sending and receiving IPv6 packets only
|
|
|
|
*/
|
|
|
|
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc_result_t
|
|
|
|
isc__nm_socket_connectiontimeout(uv_os_sock_t fd, int timeout_ms);
|
|
|
|
/*%<
|
2021-03-18 11:16:45 +01:00
|
|
|
* Set the connection timeout in milliseconds, on non-Linux platforms,
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
* the minimum value must be at least 1000 (1 second).
|
|
|
|
*/
|
|
|
|
|
2021-02-28 19:33:16 +02:00
|
|
|
isc_result_t
|
2022-08-25 21:59:23 +03:00
|
|
|
isc__nm_socket_tcp_nodelay(const uv_os_sock_t fd, bool value);
|
2021-02-28 19:33:16 +02:00
|
|
|
/*%<
|
2022-08-25 21:59:23 +03:00
|
|
|
* Disables/Enables Nagle's algorithm on a TCP socket (sets TCP_NODELAY if
|
|
|
|
* 'value' equals 'true' or vice versa).
|
2021-02-28 19:33:16 +02:00
|
|
|
*/
|
|
|
|
|
2021-10-05 22:30:55 +02:00
|
|
|
isc_result_t
|
|
|
|
isc__nm_socket_tcp_maxseg(uv_os_sock_t fd, int size);
|
|
|
|
/*%<
|
|
|
|
* Set the TCP maximum segment size
|
|
|
|
*/
|
|
|
|
|
2021-10-05 22:30:55 +02:00
|
|
|
isc_result_t
|
|
|
|
isc__nm_socket_min_mtu(uv_os_sock_t fd, sa_family_t sa_family);
|
|
|
|
/*%<
|
|
|
|
* Use minimum MTU on IPv6 sockets
|
|
|
|
*/
|
|
|
|
|
2020-12-02 20:51:38 +01:00
|
|
|
void
|
|
|
|
isc__nm_set_network_buffers(isc_nm_t *nm, uv_handle_t *handle);
|
|
|
|
/*%>
|
|
|
|
* Sets the pre-configured network buffers size on the handle.
|
|
|
|
*/
|
|
|
|
|
2022-10-14 20:45:40 +03:00
|
|
|
void
|
|
|
|
isc__nmsocket_barrier_init(isc_nmsocket_t *listener);
|
|
|
|
/*%>
|
|
|
|
* Initialise the socket synchronisation barrier according to the
|
|
|
|
* number of children.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nmsocket_stop(isc_nmsocket_t *listener);
|
|
|
|
/*%>
|
|
|
|
* Broadcast "stop" event for a listener socket across all workers and
|
|
|
|
* wait its processing completion - then, stop and close the underlying
|
|
|
|
* transport listener socket.
|
|
|
|
*
|
|
|
|
* The primitive is used in multi-layer transport listener sockets to
|
|
|
|
* implement shutdown properly: after the broadcasted events has been
|
|
|
|
* processed it is safe to destroy the shared data within the listener
|
|
|
|
* socket (including shutting down the underlying transport listener
|
|
|
|
* socket).
|
|
|
|
*/
|
|
|
|
|
2021-03-16 09:03:02 +01:00
|
|
|
void
|
2022-11-23 14:03:23 +01:00
|
|
|
isc__nm_udp_failed_read_cb(isc_nmsocket_t *sock, isc_result_t result,
|
|
|
|
bool async);
|
2021-03-16 09:03:02 +01:00
|
|
|
void
|
2022-11-23 14:03:23 +01:00
|
|
|
isc__nm_tcp_failed_read_cb(isc_nmsocket_t *sock, isc_result_t result,
|
|
|
|
bool async);
|
2022-11-22 20:43:05 +02:00
|
|
|
|
2021-03-18 09:27:38 +01:00
|
|
|
isc__nm_uvreq_t *
|
Fix the streaming read callback shutdown logic
When shutting down TCP sockets, the read callback calling logic was
flawed, it would call either one less callback or one extra. Fix the
logic in the way:
1. When isc_nm_read() has been called but isc_nm_read_stop() hasn't on
the handle, the read callback will be called with ISC_R_CANCELED to
cancel active reading from the socket/handle.
2. When isc_nm_read() has been called and isc_nm_read_stop() has been
called on the on the handle, the read callback will be called with
ISC_R_SHUTTINGDOWN to signal that the dormant (not-reading) socket
is being shut down.
3. The .reading and .recv_read flags are little bit tricky. The
.reading flag indicates if the outer layer is reading the data (that
would be uv_tcp_t for TCP and isc_nmsocket_t (TCP) for TLSStream),
the .recv_read flag indicates whether somebody is interested in the
data read from the socket.
Usually, you would expect that the .reading should be false when
.recv_read is false, but it gets even more tricky with TLSStream as
the TLS protocol might need to read from the socket even when sending
data.
Fix the usage of the .recv_read and .reading flags in the TLSStream
to their true meaning - which mostly consist of using .recv_read
everywhere and then wrapping isc_nm_read() and isc_nm_read_stop()
with the .reading flag.
4. The TLS failed read helper has been modified to resemble the TCP code
as much as possible, clearing and re-setting the .recv_read flag in
the TCP timeout code has been fixed and .recv_read is now cleared
when isc_nm_read_stop() has been called on the streaming socket.
5. The use of Network Manager in the named_controlconf, isccc_ccmsg, and
isc_httpd units have been greatly simplified due to the improved design.
6. More unit tests for TCP and TLS testing the shutdown conditions have
been added.
Co-authored-by: Ondřej Surý <ondrej@isc.org>
Co-authored-by: Artem Boldariev <artem@isc.org>
2023-04-13 17:27:50 +02:00
|
|
|
isc___nm_get_read_req(isc_nmsocket_t *sock, isc_sockaddr_t *sockaddr FLARG);
|
2021-03-18 09:27:38 +01:00
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_alloc_cb(uv_handle_t *handle, size_t size, uv_buf_t *buf);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_udp_read_cb(uv_udp_t *handle, ssize_t nrecv, const uv_buf_t *buf,
|
2022-09-07 17:22:47 +02:00
|
|
|
const struct sockaddr *addr, unsigned int flags);
|
2021-03-18 09:27:38 +01:00
|
|
|
void
|
2021-03-31 12:14:54 +02:00
|
|
|
isc__nm_tcp_read_cb(uv_stream_t *stream, ssize_t nread, const uv_buf_t *buf);
|
2021-03-18 09:27:38 +01:00
|
|
|
|
2022-06-14 09:17:08 +02:00
|
|
|
isc_result_t
|
2021-03-18 09:27:38 +01:00
|
|
|
isc__nm_start_reading(isc_nmsocket_t *sock);
|
|
|
|
void
|
|
|
|
isc__nm_stop_reading(isc_nmsocket_t *sock);
|
|
|
|
bool
|
2021-03-31 11:48:41 +02:00
|
|
|
isc__nmsocket_closing(isc_nmsocket_t *sock);
|
|
|
|
bool
|
2022-07-26 13:03:45 +02:00
|
|
|
isc__nm_closing(isc__networker_t *worker);
|
2021-03-18 09:27:38 +01:00
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_failed_send_cb(isc_nmsocket_t *sock, isc__nm_uvreq_t *req,
|
2022-11-24 17:11:22 +01:00
|
|
|
isc_result_t eresult, bool async);
|
2021-03-18 09:27:38 +01:00
|
|
|
void
|
|
|
|
isc__nm_failed_connect_cb(isc_nmsocket_t *sock, isc__nm_uvreq_t *req,
|
2021-04-06 18:27:38 +02:00
|
|
|
isc_result_t eresult, bool async);
|
2021-03-18 09:27:38 +01:00
|
|
|
void
|
2021-04-06 18:27:38 +02:00
|
|
|
isc__nm_failed_read_cb(isc_nmsocket_t *sock, isc_result_t result, bool async);
|
2021-03-18 09:27:38 +01:00
|
|
|
|
2021-03-30 09:25:09 +02:00
|
|
|
void
|
2022-12-07 09:45:34 +01:00
|
|
|
isc__nm_accept_connection_log(isc_nmsocket_t *sock, isc_result_t result,
|
|
|
|
bool can_log_quota);
|
2021-03-30 09:25:09 +02:00
|
|
|
|
2022-02-09 11:21:04 +01:00
|
|
|
/*
|
|
|
|
* Timeout callbacks
|
|
|
|
*/
|
2021-12-01 17:41:20 +01:00
|
|
|
void
|
2022-02-09 11:21:04 +01:00
|
|
|
isc__nmsocket_connecttimeout_cb(uv_timer_t *timer);
|
|
|
|
void
|
|
|
|
isc__nmsocket_readtimeout_cb(uv_timer_t *timer);
|
|
|
|
void
|
2022-03-10 13:51:08 +01:00
|
|
|
isc__nmsocket_writetimeout_cb(void *data, isc_result_t eresult);
|
2021-12-01 17:41:20 +01:00
|
|
|
|
2022-04-27 17:41:47 +02:00
|
|
|
/*
|
|
|
|
* Bind to the socket, but allow binding to IPv6 tentative addresses reported by
|
|
|
|
* the route socket by setting IP_FREEBIND (or equivalent).
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
isc__nm_udp_freebind(uv_udp_t *handle, const struct sockaddr *addr,
|
|
|
|
unsigned int flags);
|
|
|
|
|
|
|
|
int
|
|
|
|
isc__nm_tcp_freebind(uv_tcp_t *handle, const struct sockaddr *addr,
|
|
|
|
unsigned int flags);
|
2022-04-22 15:59:11 +03:00
|
|
|
|
|
|
|
void
|
|
|
|
isc__nmsocket_log_tls_session_reuse(isc_nmsocket_t *sock, isc_tls_t *tls);
|
2022-12-07 09:45:34 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Logging helpers
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
isc__netmgr_log(const isc_nm_t *netmgr, int level, const char *fmt, ...)
|
|
|
|
ISC_FORMAT_PRINTF(3, 4);
|
|
|
|
void
|
|
|
|
isc__nmsocket_log(const isc_nmsocket_t *sock, int level, const char *fmt, ...)
|
|
|
|
ISC_FORMAT_PRINTF(3, 4);
|
|
|
|
void
|
|
|
|
isc__nmhandle_log(const isc_nmhandle_t *handle, int level, const char *fmt, ...)
|
|
|
|
ISC_FORMAT_PRINTF(3, 4);
|
2022-10-18 15:21:10 +03:00
|
|
|
|
2023-03-16 12:50:04 +02:00
|
|
|
void
|
|
|
|
isc__nm_received_proxy_header_log(isc_nmhandle_t *handle,
|
|
|
|
const isc_proxy2_command_t cmd,
|
|
|
|
const int socktype,
|
|
|
|
const isc_sockaddr_t *restrict src_addr,
|
|
|
|
const isc_sockaddr_t *restrict dst_addr,
|
|
|
|
const isc_region_t *restrict tlvs);
|
|
|
|
|
2022-10-18 15:21:10 +03:00
|
|
|
void
|
|
|
|
isc__nmhandle_set_manual_timer(isc_nmhandle_t *handle, const bool manual);
|
|
|
|
/*
|
|
|
|
* Set manual read timer control mode - so that it will not get reset
|
|
|
|
* automatically on read nor get started when read is initiated.
|
|
|
|
*/
|
2022-08-03 14:46:33 +03:00
|
|
|
|
|
|
|
void
|
|
|
|
isc__nmhandle_get_selected_alpn(isc_nmhandle_t *handle,
|
|
|
|
const unsigned char **alpn,
|
|
|
|
unsigned int *alpnlen);
|
|
|
|
/*
|
|
|
|
* Returns a non zero terminated ALPN identifier via 'alpn'. The
|
|
|
|
* length of the identifier is returned via 'alpnlen'. If after the
|
|
|
|
* call either 'alpn == NULL' or 'alpnlen == 0', then identifier was
|
|
|
|
* not negotiated of the underlying protocol of the connection
|
|
|
|
* represented via the given handle does not support ALPN.
|
|
|
|
*/
|
2022-12-07 13:33:52 +02:00
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_senddns(isc_nmhandle_t *handle, isc_region_t *region, isc_nm_cb_t cb,
|
|
|
|
void *cbarg);
|
|
|
|
/*%<
|
|
|
|
* The same as 'isc_nm_send()', but with data length sent
|
|
|
|
* ahead of data (two bytes (16 bit) in big-endian format).
|
|
|
|
*/
|