2019-11-05 13:55:54 -08:00
|
|
|
/*
|
|
|
|
* Copyright (C) Internet Systems Consortium, Inc. ("ISC")
|
|
|
|
*
|
2021-06-03 08:37:05 +02:00
|
|
|
* SPDX-License-Identifier: MPL-2.0
|
|
|
|
*
|
2019-11-05 13:55:54 -08:00
|
|
|
* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
2020-09-14 16:20:40 -07:00
|
|
|
* file, you can obtain one at https://mozilla.org/MPL/2.0/.
|
2019-11-05 13:55:54 -08:00
|
|
|
*
|
|
|
|
* See the COPYRIGHT file distributed with this work for additional
|
|
|
|
* information regarding copyright ownership.
|
|
|
|
*/
|
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
#include <assert.h>
|
2019-11-05 13:55:54 -08:00
|
|
|
#include <inttypes.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
#include <isc/async.h>
|
2019-11-05 13:55:54 -08:00
|
|
|
#include <isc/atomic.h>
|
2021-04-27 16:20:03 +02:00
|
|
|
#include <isc/backtrace.h>
|
2021-05-05 11:51:39 +02:00
|
|
|
#include <isc/barrier.h>
|
2019-11-05 13:55:54 -08:00
|
|
|
#include <isc/buffer.h>
|
|
|
|
#include <isc/condition.h>
|
2020-11-07 20:48:37 +01:00
|
|
|
#include <isc/errno.h>
|
2022-02-22 23:40:39 +01:00
|
|
|
#include <isc/list.h>
|
2020-12-17 11:40:29 +01:00
|
|
|
#include <isc/log.h>
|
2022-07-26 13:03:45 +02:00
|
|
|
#include <isc/loop.h>
|
2019-11-05 13:55:54 -08:00
|
|
|
#include <isc/magic.h>
|
|
|
|
#include <isc/mem.h>
|
|
|
|
#include <isc/netmgr.h>
|
|
|
|
#include <isc/print.h>
|
2020-02-12 13:59:18 +01:00
|
|
|
#include <isc/quota.h>
|
2019-11-05 13:55:54 -08:00
|
|
|
#include <isc/random.h>
|
|
|
|
#include <isc/refcount.h>
|
|
|
|
#include <isc/region.h>
|
|
|
|
#include <isc/result.h>
|
|
|
|
#include <isc/sockaddr.h>
|
2020-01-05 01:02:12 -08:00
|
|
|
#include <isc/stats.h>
|
2020-11-07 20:48:37 +01:00
|
|
|
#include <isc/strerr.h>
|
Refactor taskmgr to run on top of netmgr
This commit changes the taskmgr to run the individual tasks on the
netmgr internal workers. While an effort has been put into keeping the
taskmgr interface intact, couple of changes have been made:
* The taskmgr has no concept of universal privileged mode - rather the
tasks are either privileged or unprivileged (normal). The privileged
tasks are run as a first thing when the netmgr is unpaused. There
are now four different queues in in the netmgr:
1. priority queue - netievent on the priority queue are run even when
the taskmgr enter exclusive mode and netmgr is paused. This is
needed to properly start listening on the interfaces, free
resources and resume.
2. privileged task queue - only privileged tasks are queued here and
this is the first queue that gets processed when network manager
is unpaused using isc_nm_resume(). All netmgr workers need to
clean the privileged task queue before they all proceed normal
operation. Both task queues are processed when the workers are
finished.
3. task queue - only (traditional) task are scheduled here and this
queue along with privileged task queues are process when the
netmgr workers are finishing. This is needed to process the task
shutdown events.
4. normal queue - this is the queue with netmgr events, e.g. reading,
sending, callbacks and pretty much everything is processed here.
* The isc_taskmgr_create() now requires initialized netmgr (isc_nm_t)
object.
* The isc_nm_destroy() function now waits for indefinite time, but it
will print out the active objects when in tracing mode
(-DNETMGR_TRACE=1 and -DNETMGR_TRACE_VERBOSE=1), the netmgr has been
made a little bit more asynchronous and it might take longer time to
shutdown all the active networking connections.
* Previously, the isc_nm_stoplistening() was a synchronous operation.
This has been changed and the isc_nm_stoplistening() just schedules
the child sockets to stop listening and exits. This was needed to
prevent a deadlock as the the (traditional) tasks are now executed on
the netmgr threads.
* The socket selection logic in isc__nm_udp_send() was flawed, but
fortunatelly, it was broken, so we never hit the problem where we
created uvreq_t on a socket from nmhandle_t, but then a different
socket could be picked up and then we were trying to run the send
callback on a socket that had different threadid than currently
running.
2021-04-09 11:31:19 +02:00
|
|
|
#include <isc/task.h>
|
2019-11-05 13:55:54 -08:00
|
|
|
#include <isc/thread.h>
|
2022-07-26 13:03:45 +02:00
|
|
|
#include <isc/tid.h>
|
2020-12-17 11:40:29 +01:00
|
|
|
#include <isc/tls.h>
|
2019-11-05 13:55:54 -08:00
|
|
|
#include <isc/util.h>
|
2022-04-27 17:41:47 +02:00
|
|
|
#include <isc/uv.h>
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
#include "../loop_p.h"
|
2019-11-05 13:55:54 -08:00
|
|
|
#include "netmgr-int.h"
|
2020-12-17 11:40:29 +01:00
|
|
|
#include "openssl_shim.h"
|
2021-05-27 09:45:07 +02:00
|
|
|
#include "trampoline_p.h"
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2020-01-29 15:16:02 +01:00
|
|
|
/*%
|
|
|
|
* How many isc_nmhandles and isc_nm_uvreqs will we be
|
|
|
|
* caching for reuse in a socket.
|
|
|
|
*/
|
|
|
|
#define ISC_NM_HANDLES_STACK_SIZE 600
|
|
|
|
#define ISC_NM_REQS_STACK_SIZE 600
|
|
|
|
|
2020-01-05 01:02:12 -08:00
|
|
|
/*%
|
|
|
|
* Shortcut index arrays to get access to statistics counters.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static const isc_statscounter_t udp4statsindex[] = {
|
|
|
|
isc_sockstatscounter_udp4open,
|
|
|
|
isc_sockstatscounter_udp4openfail,
|
|
|
|
isc_sockstatscounter_udp4close,
|
|
|
|
isc_sockstatscounter_udp4bindfail,
|
|
|
|
isc_sockstatscounter_udp4connectfail,
|
|
|
|
isc_sockstatscounter_udp4connect,
|
|
|
|
-1,
|
|
|
|
-1,
|
|
|
|
isc_sockstatscounter_udp4sendfail,
|
|
|
|
isc_sockstatscounter_udp4recvfail,
|
|
|
|
isc_sockstatscounter_udp4active
|
|
|
|
};
|
|
|
|
|
|
|
|
static const isc_statscounter_t udp6statsindex[] = {
|
|
|
|
isc_sockstatscounter_udp6open,
|
|
|
|
isc_sockstatscounter_udp6openfail,
|
|
|
|
isc_sockstatscounter_udp6close,
|
|
|
|
isc_sockstatscounter_udp6bindfail,
|
|
|
|
isc_sockstatscounter_udp6connectfail,
|
|
|
|
isc_sockstatscounter_udp6connect,
|
|
|
|
-1,
|
|
|
|
-1,
|
|
|
|
isc_sockstatscounter_udp6sendfail,
|
|
|
|
isc_sockstatscounter_udp6recvfail,
|
|
|
|
isc_sockstatscounter_udp6active
|
|
|
|
};
|
|
|
|
|
|
|
|
static const isc_statscounter_t tcp4statsindex[] = {
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_sockstatscounter_tcp4open, isc_sockstatscounter_tcp4openfail,
|
|
|
|
isc_sockstatscounter_tcp4close, isc_sockstatscounter_tcp4bindfail,
|
|
|
|
isc_sockstatscounter_tcp4connectfail, isc_sockstatscounter_tcp4connect,
|
|
|
|
isc_sockstatscounter_tcp4acceptfail, isc_sockstatscounter_tcp4accept,
|
|
|
|
isc_sockstatscounter_tcp4sendfail, isc_sockstatscounter_tcp4recvfail,
|
2020-01-05 01:02:12 -08:00
|
|
|
isc_sockstatscounter_tcp4active
|
|
|
|
};
|
|
|
|
|
|
|
|
static const isc_statscounter_t tcp6statsindex[] = {
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_sockstatscounter_tcp6open, isc_sockstatscounter_tcp6openfail,
|
|
|
|
isc_sockstatscounter_tcp6close, isc_sockstatscounter_tcp6bindfail,
|
|
|
|
isc_sockstatscounter_tcp6connectfail, isc_sockstatscounter_tcp6connect,
|
|
|
|
isc_sockstatscounter_tcp6acceptfail, isc_sockstatscounter_tcp6accept,
|
|
|
|
isc_sockstatscounter_tcp6sendfail, isc_sockstatscounter_tcp6recvfail,
|
2020-01-05 01:02:12 -08:00
|
|
|
isc_sockstatscounter_tcp6active
|
|
|
|
};
|
|
|
|
|
|
|
|
#if 0
|
|
|
|
/* XXX: not currently used */
|
|
|
|
static const isc_statscounter_t unixstatsindex[] = {
|
|
|
|
isc_sockstatscounter_unixopen,
|
|
|
|
isc_sockstatscounter_unixopenfail,
|
|
|
|
isc_sockstatscounter_unixclose,
|
|
|
|
isc_sockstatscounter_unixbindfail,
|
|
|
|
isc_sockstatscounter_unixconnectfail,
|
|
|
|
isc_sockstatscounter_unixconnect,
|
|
|
|
isc_sockstatscounter_unixacceptfail,
|
|
|
|
isc_sockstatscounter_unixaccept,
|
|
|
|
isc_sockstatscounter_unixsendfail,
|
|
|
|
isc_sockstatscounter_unixrecvfail,
|
|
|
|
isc_sockstatscounter_unixactive
|
|
|
|
};
|
2020-02-13 21:48:23 +01:00
|
|
|
#endif /* if 0 */
|
2020-01-05 01:02:12 -08:00
|
|
|
|
2021-10-03 00:27:52 -07:00
|
|
|
/*
|
|
|
|
* Set by the -T dscp option on the command line. If set to a value
|
|
|
|
* other than -1, we check to make sure DSCP values match it, and
|
|
|
|
* assert if not. (Not currently in use.)
|
|
|
|
*/
|
|
|
|
int isc_dscp_check_value = -1;
|
|
|
|
|
2020-02-14 08:14:03 +01:00
|
|
|
static void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
nmsocket_maybe_destroy(isc_nmsocket_t *sock FLARG);
|
2020-02-14 08:14:03 +01:00
|
|
|
static void
|
|
|
|
nmhandle_free(isc_nmsocket_t *sock, isc_nmhandle_t *handle);
|
2022-02-22 23:40:39 +01:00
|
|
|
|
2021-05-05 11:51:39 +02:00
|
|
|
static void
|
2022-07-26 13:03:45 +02:00
|
|
|
process_netievent(void *arg);
|
2021-05-07 12:58:40 +02:00
|
|
|
|
2020-10-12 17:51:09 +11:00
|
|
|
static void
|
|
|
|
isc__nm_async_detach(isc__networker_t *worker, isc__netievent_t *ev0);
|
2021-05-27 09:45:07 +02:00
|
|
|
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
/*%<
|
|
|
|
* Issue a 'handle closed' callback on the socket.
|
|
|
|
*/
|
2020-10-12 17:51:09 +11:00
|
|
|
|
|
|
|
static void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
nmhandle_detach_cb(isc_nmhandle_t **handlep FLARG);
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
static void
|
|
|
|
shutdown_walk_cb(uv_handle_t *handle, void *arg);
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
static void
|
|
|
|
networker_teardown(void *arg) {
|
|
|
|
isc__networker_t *worker = arg;
|
|
|
|
isc_loop_t *loop = worker->loop;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
worker->shuttingdown = true;
|
|
|
|
|
2022-12-07 09:45:34 +01:00
|
|
|
isc__netmgr_log(worker->netmgr, ISC_LOG_DEBUG(1),
|
|
|
|
"Shutting down network manager worker on loop %p(%d)",
|
|
|
|
loop, isc_tid());
|
2022-07-26 13:03:45 +02:00
|
|
|
|
|
|
|
uv_walk(&loop->loop, shutdown_walk_cb, NULL);
|
|
|
|
|
|
|
|
isc__networker_detach(&worker);
|
2021-05-22 18:12:11 +02:00
|
|
|
}
|
|
|
|
|
2021-05-27 09:45:07 +02:00
|
|
|
static void
|
2022-07-26 13:03:45 +02:00
|
|
|
netmgr_teardown(void *arg) {
|
|
|
|
isc_nm_t *netmgr = (void *)arg;
|
|
|
|
|
|
|
|
if (atomic_compare_exchange_strong(&netmgr->shuttingdown,
|
|
|
|
&(bool){ false }, true))
|
|
|
|
{
|
2022-12-07 09:45:34 +01:00
|
|
|
isc__netmgr_log(netmgr, ISC_LOG_DEBUG(1),
|
|
|
|
"Shutting down network manager");
|
2021-05-27 09:45:07 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-25 14:43:14 +02:00
|
|
|
#if HAVE_DECL_UV_UDP_LINUX_RECVERR
|
|
|
|
#define MINIMAL_UV_VERSION UV_VERSION(1, 42, 0)
|
|
|
|
#elif HAVE_DECL_UV_UDP_MMSG_FREE
|
|
|
|
#define MINIMAL_UV_VERSION UV_VERSION(1, 40, 0)
|
|
|
|
#elif HAVE_DECL_UV_UDP_RECVMMSG
|
|
|
|
#define MINIMAL_UV_VERSION UV_VERSION(1, 37, 0)
|
|
|
|
#elif HAVE_DECL_UV_UDP_MMSG_CHUNK
|
|
|
|
#define MINIMAL_UV_VERSION UV_VERSION(1, 35, 0)
|
|
|
|
#else
|
|
|
|
#define MINIMAL_UV_VERSION UV_VERSION(1, 0, 0)
|
|
|
|
#endif
|
|
|
|
|
2021-04-27 00:07:43 +02:00
|
|
|
void
|
2022-07-26 13:03:45 +02:00
|
|
|
isc_netmgr_create(isc_mem_t *mctx, isc_loopmgr_t *loopmgr, isc_nm_t **netmgrp) {
|
|
|
|
isc_nm_t *netmgr = NULL;
|
Refactor taskmgr to run on top of netmgr
This commit changes the taskmgr to run the individual tasks on the
netmgr internal workers. While an effort has been put into keeping the
taskmgr interface intact, couple of changes have been made:
* The taskmgr has no concept of universal privileged mode - rather the
tasks are either privileged or unprivileged (normal). The privileged
tasks are run as a first thing when the netmgr is unpaused. There
are now four different queues in in the netmgr:
1. priority queue - netievent on the priority queue are run even when
the taskmgr enter exclusive mode and netmgr is paused. This is
needed to properly start listening on the interfaces, free
resources and resume.
2. privileged task queue - only privileged tasks are queued here and
this is the first queue that gets processed when network manager
is unpaused using isc_nm_resume(). All netmgr workers need to
clean the privileged task queue before they all proceed normal
operation. Both task queues are processed when the workers are
finished.
3. task queue - only (traditional) task are scheduled here and this
queue along with privileged task queues are process when the
netmgr workers are finishing. This is needed to process the task
shutdown events.
4. normal queue - this is the queue with netmgr events, e.g. reading,
sending, callbacks and pretty much everything is processed here.
* The isc_taskmgr_create() now requires initialized netmgr (isc_nm_t)
object.
* The isc_nm_destroy() function now waits for indefinite time, but it
will print out the active objects when in tracing mode
(-DNETMGR_TRACE=1 and -DNETMGR_TRACE_VERBOSE=1), the netmgr has been
made a little bit more asynchronous and it might take longer time to
shutdown all the active networking connections.
* Previously, the isc_nm_stoplistening() was a synchronous operation.
This has been changed and the isc_nm_stoplistening() just schedules
the child sockets to stop listening and exits. This was needed to
prevent a deadlock as the the (traditional) tasks are now executed on
the netmgr threads.
* The socket selection logic in isc__nm_udp_send() was flawed, but
fortunatelly, it was broken, so we never hit the problem where we
created uvreq_t on a socket from nmhandle_t, but then a different
socket could be picked up and then we were trying to run the send
callback on a socket that had different threadid than currently
running.
2021-04-09 11:31:19 +02:00
|
|
|
|
2022-04-25 14:43:14 +02:00
|
|
|
if (uv_version() < MINIMAL_UV_VERSION) {
|
2022-10-14 17:18:07 +01:00
|
|
|
FATAL_ERROR("libuv version too old: running with libuv %s "
|
|
|
|
"when compiled with libuv %s will lead to "
|
|
|
|
"libuv failures because of unknown flags",
|
|
|
|
uv_version_string(), UV_VERSION_STRING);
|
2022-04-25 14:43:14 +02:00
|
|
|
}
|
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
netmgr = isc_mem_get(mctx, sizeof(*netmgr));
|
|
|
|
*netmgr = (isc_nm_t){
|
|
|
|
.loopmgr = loopmgr,
|
|
|
|
.nloops = isc_loopmgr_nloops(loopmgr),
|
|
|
|
};
|
|
|
|
|
|
|
|
isc_mem_attach(mctx, &netmgr->mctx);
|
|
|
|
isc_mutex_init(&netmgr->lock);
|
|
|
|
isc_refcount_init(&netmgr->references, 1);
|
|
|
|
atomic_init(&netmgr->maxudp, 0);
|
|
|
|
atomic_init(&netmgr->shuttingdown, false);
|
|
|
|
atomic_init(&netmgr->recv_tcp_buffer_size, 0);
|
|
|
|
atomic_init(&netmgr->send_tcp_buffer_size, 0);
|
|
|
|
atomic_init(&netmgr->recv_udp_buffer_size, 0);
|
|
|
|
atomic_init(&netmgr->send_udp_buffer_size, 0);
|
2022-04-01 14:43:14 +02:00
|
|
|
#if HAVE_SO_REUSEPORT_LB
|
2022-07-26 13:03:45 +02:00
|
|
|
netmgr->load_balance_sockets = true;
|
2022-04-01 14:43:14 +02:00
|
|
|
#else
|
2022-07-26 13:03:45 +02:00
|
|
|
netmgr->load_balance_sockets = false;
|
2022-04-01 14:43:14 +02:00
|
|
|
#endif
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2020-09-02 17:57:44 +02:00
|
|
|
#ifdef NETMGR_TRACE
|
2022-07-26 13:03:45 +02:00
|
|
|
ISC_LIST_INIT(netmgr->active_sockets);
|
2020-09-02 17:57:44 +02:00
|
|
|
#endif
|
|
|
|
|
2019-11-20 22:33:35 +01:00
|
|
|
/*
|
|
|
|
* Default TCP timeout values.
|
2019-11-21 17:08:06 -08:00
|
|
|
* May be updated by isc_nm_tcptimeouts().
|
2019-11-20 22:33:35 +01:00
|
|
|
*/
|
2022-07-26 13:03:45 +02:00
|
|
|
atomic_init(&netmgr->init, 30000);
|
|
|
|
atomic_init(&netmgr->idle, 30000);
|
|
|
|
atomic_init(&netmgr->keepalive, 30000);
|
|
|
|
atomic_init(&netmgr->advertised, 30000);
|
2019-11-20 22:33:35 +01:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
netmgr->workers =
|
|
|
|
isc_mem_get(mctx, netmgr->nloops * sizeof(netmgr->workers[0]));
|
2021-05-05 11:51:39 +02:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
isc_loopmgr_teardown(loopmgr, netmgr_teardown, netmgr);
|
2022-02-15 14:51:02 +01:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
netmgr->magic = NM_MAGIC;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
for (size_t i = 0; i < netmgr->nloops; i++) {
|
|
|
|
isc_loop_t *loop = isc_loop_get(netmgr->loopmgr, i);
|
|
|
|
isc__networker_t *worker = &netmgr->workers[i];
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
*worker = (isc__networker_t){
|
|
|
|
.recvbuf = isc_mem_get(loop->mctx,
|
|
|
|
ISC_NETMGR_RECVBUF_SIZE),
|
|
|
|
.sendbuf = isc_mem_get(loop->mctx,
|
|
|
|
ISC_NETMGR_SENDBUF_SIZE),
|
|
|
|
};
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
isc_nm_attach(netmgr, &worker->netmgr);
|
2021-05-05 11:51:39 +02:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
isc_mem_attach(loop->mctx, &worker->mctx);
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
isc_loop_attach(loop, &worker->loop);
|
|
|
|
isc_loop_teardown(loop, networker_teardown, worker);
|
|
|
|
isc_refcount_init(&worker->references, 1);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
*netmgrp = netmgr;
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free the resources of the network manager.
|
|
|
|
*/
|
|
|
|
static void
|
2020-02-13 14:44:37 -08:00
|
|
|
nm_destroy(isc_nm_t **mgr0) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NM(*mgr0));
|
|
|
|
|
|
|
|
isc_nm_t *mgr = *mgr0;
|
2020-02-08 04:37:54 -08:00
|
|
|
*mgr0 = NULL;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2019-12-10 10:47:08 +01:00
|
|
|
isc_refcount_destroy(&mgr->references);
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
mgr->magic = 0;
|
|
|
|
|
2020-01-05 01:02:12 -08:00
|
|
|
if (mgr->stats != NULL) {
|
|
|
|
isc_stats_detach(&mgr->stats);
|
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_mutex_destroy(&mgr->lock);
|
2019-11-21 17:08:06 -08:00
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_mem_put(mgr->mctx, mgr->workers,
|
2022-07-26 13:03:45 +02:00
|
|
|
mgr->nloops * sizeof(mgr->workers[0]));
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_mem_putanddetach(&mgr->mctx, mgr, sizeof(*mgr));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nm_attach(isc_nm_t *mgr, isc_nm_t **dst) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
REQUIRE(dst != NULL && *dst == NULL);
|
|
|
|
|
2020-01-14 09:43:37 +01:00
|
|
|
isc_refcount_increment(&mgr->references);
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
*dst = mgr;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nm_detach(isc_nm_t **mgr0) {
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_nm_t *mgr = NULL;
|
|
|
|
|
|
|
|
REQUIRE(mgr0 != NULL);
|
|
|
|
REQUIRE(VALID_NM(*mgr0));
|
|
|
|
|
|
|
|
mgr = *mgr0;
|
|
|
|
*mgr0 = NULL;
|
|
|
|
|
2020-01-14 09:43:37 +01:00
|
|
|
if (isc_refcount_decrement(&mgr->references) == 1) {
|
2019-11-05 13:55:54 -08:00
|
|
|
nm_destroy(&mgr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-22 15:57:42 -08:00
|
|
|
void
|
2022-07-26 13:03:45 +02:00
|
|
|
isc_netmgr_destroy(isc_nm_t **netmgrp) {
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_nm_t *mgr = NULL;
|
|
|
|
|
2021-04-27 00:07:43 +02:00
|
|
|
REQUIRE(VALID_NM(*netmgrp));
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2021-04-27 00:07:43 +02:00
|
|
|
mgr = *netmgrp;
|
2022-07-26 13:03:45 +02:00
|
|
|
*netmgrp = NULL;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
REQUIRE(isc_refcount_decrement(&mgr->references) == 1);
|
|
|
|
nm_destroy(&mgr);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nm_maxudp(isc_nm_t *mgr, uint32_t maxudp) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
|
|
|
|
atomic_store(&mgr->maxudp, maxudp);
|
|
|
|
}
|
|
|
|
|
2022-02-09 19:48:13 +01:00
|
|
|
void
|
|
|
|
isc_nmhandle_setwritetimeout(isc_nmhandle_t *handle, uint64_t write_timeout) {
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
2022-07-26 13:03:45 +02:00
|
|
|
REQUIRE(handle->sock->tid == isc_tid());
|
2022-06-23 20:18:58 +03:00
|
|
|
|
|
|
|
switch (handle->sock->type) {
|
|
|
|
case isc_nm_tcpsocket:
|
|
|
|
case isc_nm_udpsocket:
|
|
|
|
case isc_nm_tlsdnssocket:
|
|
|
|
handle->sock->write_timeout = write_timeout;
|
|
|
|
break;
|
|
|
|
case isc_nm_tlssocket:
|
|
|
|
isc__nmhandle_tls_setwritetimeout(handle, write_timeout);
|
|
|
|
break;
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
|
|
|
isc__nmhandle_streamdns_setwritetimeout(handle, write_timeout);
|
|
|
|
break;
|
2022-06-23 20:18:58 +03:00
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
break;
|
|
|
|
}
|
2022-02-09 19:48:13 +01:00
|
|
|
}
|
|
|
|
|
2019-11-20 22:33:35 +01:00
|
|
|
void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc_nm_settimeouts(isc_nm_t *mgr, uint32_t init, uint32_t idle,
|
|
|
|
uint32_t keepalive, uint32_t advertised) {
|
2019-11-20 22:33:35 +01:00
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
|
2021-03-18 11:16:45 +01:00
|
|
|
atomic_store(&mgr->init, init);
|
|
|
|
atomic_store(&mgr->idle, idle);
|
|
|
|
atomic_store(&mgr->keepalive, keepalive);
|
|
|
|
atomic_store(&mgr->advertised, advertised);
|
2019-11-20 22:33:35 +01:00
|
|
|
}
|
|
|
|
|
2020-12-02 20:51:38 +01:00
|
|
|
void
|
|
|
|
isc_nm_setnetbuffers(isc_nm_t *mgr, int32_t recv_tcp, int32_t send_tcp,
|
|
|
|
int32_t recv_udp, int32_t send_udp) {
|
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
|
|
|
|
atomic_store(&mgr->recv_tcp_buffer_size, recv_tcp);
|
|
|
|
atomic_store(&mgr->send_tcp_buffer_size, send_tcp);
|
|
|
|
atomic_store(&mgr->recv_udp_buffer_size, recv_udp);
|
|
|
|
atomic_store(&mgr->send_udp_buffer_size, send_udp);
|
|
|
|
}
|
|
|
|
|
2022-04-05 01:20:13 +02:00
|
|
|
bool
|
|
|
|
isc_nm_getloadbalancesockets(isc_nm_t *mgr) {
|
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
|
|
|
|
return (mgr->load_balance_sockets);
|
|
|
|
}
|
|
|
|
|
2022-04-01 14:43:14 +02:00
|
|
|
void
|
|
|
|
isc_nm_setloadbalancesockets(isc_nm_t *mgr, bool enabled) {
|
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
|
|
|
|
#if HAVE_SO_REUSEPORT_LB
|
|
|
|
mgr->load_balance_sockets = enabled;
|
|
|
|
#else
|
|
|
|
UNUSED(enabled);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2019-11-20 22:33:35 +01:00
|
|
|
void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc_nm_gettimeouts(isc_nm_t *mgr, uint32_t *initial, uint32_t *idle,
|
|
|
|
uint32_t *keepalive, uint32_t *advertised) {
|
2019-11-20 22:33:35 +01:00
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
|
|
|
|
if (initial != NULL) {
|
2021-03-18 11:16:45 +01:00
|
|
|
*initial = atomic_load(&mgr->init);
|
2019-11-20 22:33:35 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (idle != NULL) {
|
2021-03-18 11:16:45 +01:00
|
|
|
*idle = atomic_load(&mgr->idle);
|
2019-11-20 22:33:35 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (keepalive != NULL) {
|
2021-03-18 11:16:45 +01:00
|
|
|
*keepalive = atomic_load(&mgr->keepalive);
|
2019-11-20 22:33:35 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (advertised != NULL) {
|
2021-03-18 11:16:45 +01:00
|
|
|
*advertised = atomic_load(&mgr->advertised);
|
2019-11-20 22:33:35 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
/*
|
|
|
|
* The two macros here generate the individual cases for the process_netievent()
|
|
|
|
* function. The NETIEVENT_CASE(type) macro is the common case, and
|
|
|
|
* NETIEVENT_CASE_NOMORE(type) is a macro that causes the loop in the
|
|
|
|
* process_queue() to stop, e.g. it's only used for the netievent that
|
|
|
|
* stops/pauses processing the enqueued netievents.
|
|
|
|
*/
|
2022-07-26 13:03:45 +02:00
|
|
|
#define NETIEVENT_CASE(type) \
|
|
|
|
case netievent_##type: { \
|
|
|
|
isc__nm_async_##type(worker, ievent); \
|
|
|
|
isc__nm_put_netievent_##type( \
|
|
|
|
worker, (isc__netievent_##type##_t *)ievent); \
|
|
|
|
return; \
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
}
|
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
static void
|
|
|
|
process_netievent(void *arg) {
|
|
|
|
isc__netievent_t *ievent = (isc__netievent_t *)arg;
|
|
|
|
isc__networker_t *worker = ievent->worker;
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
|
|
|
|
switch (ievent->type) {
|
|
|
|
NETIEVENT_CASE(udplisten);
|
|
|
|
NETIEVENT_CASE(udpstop);
|
|
|
|
NETIEVENT_CASE(udpcancel);
|
2021-10-02 14:52:46 -07:00
|
|
|
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
NETIEVENT_CASE(tcpaccept);
|
|
|
|
NETIEVENT_CASE(tcplisten);
|
|
|
|
NETIEVENT_CASE(tcpstop);
|
|
|
|
|
2020-12-17 11:40:29 +01:00
|
|
|
NETIEVENT_CASE(tlsdnscycle);
|
|
|
|
NETIEVENT_CASE(tlsdnsaccept);
|
|
|
|
NETIEVENT_CASE(tlsdnslisten);
|
|
|
|
NETIEVENT_CASE(tlsdnsconnect);
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
NETIEVENT_CASE(tlsdnssend);
|
|
|
|
NETIEVENT_CASE(tlsdnscancel);
|
|
|
|
NETIEVENT_CASE(tlsdnsclose);
|
|
|
|
NETIEVENT_CASE(tlsdnsread);
|
|
|
|
NETIEVENT_CASE(tlsdnsstop);
|
2020-12-17 11:40:29 +01:00
|
|
|
NETIEVENT_CASE(tlsdnsshutdown);
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
|
2021-04-21 13:52:15 +02:00
|
|
|
NETIEVENT_CASE(tlssend);
|
|
|
|
NETIEVENT_CASE(tlsclose);
|
|
|
|
NETIEVENT_CASE(tlsdobio);
|
2022-10-18 15:36:00 +03:00
|
|
|
#if HAVE_LIBNGHTTP2
|
2020-12-07 14:19:10 +02:00
|
|
|
NETIEVENT_CASE(httpsend);
|
|
|
|
NETIEVENT_CASE(httpclose);
|
2022-06-22 19:31:18 +03:00
|
|
|
NETIEVENT_CASE(httpendpoints);
|
2022-07-26 13:03:45 +02:00
|
|
|
#endif
|
2022-06-20 20:30:12 +03:00
|
|
|
NETIEVENT_CASE(streamdnsread);
|
|
|
|
NETIEVENT_CASE(streamdnssend);
|
|
|
|
NETIEVENT_CASE(streamdnsclose);
|
|
|
|
NETIEVENT_CASE(streamdnscancel);
|
|
|
|
|
2022-10-24 13:54:39 -07:00
|
|
|
NETIEVENT_CASE(settlsctx);
|
2022-10-14 20:45:40 +03:00
|
|
|
NETIEVENT_CASE(sockstop);
|
2020-12-07 14:19:10 +02:00
|
|
|
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
NETIEVENT_CASE(connectcb);
|
|
|
|
NETIEVENT_CASE(readcb);
|
|
|
|
NETIEVENT_CASE(sendcb);
|
|
|
|
|
|
|
|
NETIEVENT_CASE(detach);
|
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
}
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
2022-07-26 13:03:45 +02:00
|
|
|
isc__nm_get_netievent(isc__networker_t *worker, isc__netievent_type type) {
|
|
|
|
isc__netievent_storage_t *event = isc_mem_get(worker->mctx,
|
2021-05-12 21:16:17 +02:00
|
|
|
sizeof(*event));
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2020-02-12 13:59:18 +01:00
|
|
|
*event = (isc__netievent_storage_t){ .ni.type = type };
|
2022-02-22 23:40:39 +01:00
|
|
|
ISC_LINK_INIT(&(event->ni), link);
|
2022-07-26 13:03:45 +02:00
|
|
|
|
|
|
|
isc__networker_ref(worker);
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
return (event);
|
|
|
|
}
|
|
|
|
|
2019-11-19 11:56:00 +01:00
|
|
|
void
|
2022-07-26 13:03:45 +02:00
|
|
|
isc__nm_put_netievent(isc__networker_t *worker, void *ievent) {
|
|
|
|
isc_mem_put(worker->mctx, ievent, sizeof(isc__netievent_storage_t));
|
|
|
|
isc__networker_unref(worker);
|
2019-11-19 11:56:00 +01:00
|
|
|
}
|
|
|
|
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
NETIEVENT_SOCKET_DEF(tcplisten);
|
|
|
|
NETIEVENT_SOCKET_DEF(tcpstop);
|
2021-01-25 17:44:39 +02:00
|
|
|
NETIEVENT_SOCKET_DEF(tlsclose);
|
|
|
|
NETIEVENT_SOCKET_DEF(tlsconnect);
|
|
|
|
NETIEVENT_SOCKET_DEF(tlsdobio);
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
NETIEVENT_SOCKET_DEF(udplisten);
|
|
|
|
NETIEVENT_SOCKET_DEF(udpstop);
|
2022-07-26 13:03:45 +02:00
|
|
|
NETIEVENT_SOCKET_HANDLE_DEF(udpcancel);
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
|
|
|
|
NETIEVENT_SOCKET_DEF(tlsdnsclose);
|
|
|
|
NETIEVENT_SOCKET_DEF(tlsdnsread);
|
|
|
|
NETIEVENT_SOCKET_DEF(tlsdnsstop);
|
2020-12-17 11:40:29 +01:00
|
|
|
NETIEVENT_SOCKET_DEF(tlsdnslisten);
|
|
|
|
NETIEVENT_SOCKET_REQ_DEF(tlsdnsconnect);
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
NETIEVENT_SOCKET_REQ_DEF(tlsdnssend);
|
|
|
|
NETIEVENT_SOCKET_HANDLE_DEF(tlsdnscancel);
|
2020-12-17 11:40:29 +01:00
|
|
|
NETIEVENT_SOCKET_QUOTA_DEF(tlsdnsaccept);
|
|
|
|
NETIEVENT_SOCKET_DEF(tlsdnscycle);
|
|
|
|
NETIEVENT_SOCKET_DEF(tlsdnsshutdown);
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
|
2022-06-22 19:31:18 +03:00
|
|
|
#ifdef HAVE_LIBNGHTTP2
|
2020-12-07 14:19:10 +02:00
|
|
|
NETIEVENT_SOCKET_REQ_DEF(httpsend);
|
|
|
|
NETIEVENT_SOCKET_DEF(httpclose);
|
2022-06-22 19:31:18 +03:00
|
|
|
NETIEVENT_SOCKET_HTTP_EPS_DEF(httpendpoints);
|
|
|
|
#endif /* HAVE_LIBNGHTTP2 */
|
2020-12-07 14:19:10 +02:00
|
|
|
|
2021-01-25 17:44:39 +02:00
|
|
|
NETIEVENT_SOCKET_REQ_DEF(tlssend);
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
NETIEVENT_SOCKET_REQ_RESULT_DEF(connectcb);
|
|
|
|
NETIEVENT_SOCKET_REQ_RESULT_DEF(readcb);
|
|
|
|
NETIEVENT_SOCKET_REQ_RESULT_DEF(sendcb);
|
|
|
|
|
|
|
|
NETIEVENT_SOCKET_DEF(detach);
|
|
|
|
|
|
|
|
NETIEVENT_SOCKET_QUOTA_DEF(tcpaccept);
|
|
|
|
|
2022-06-20 20:30:12 +03:00
|
|
|
NETIEVENT_SOCKET_DEF(streamdnsclose);
|
|
|
|
NETIEVENT_SOCKET_REQ_DEF(streamdnssend);
|
|
|
|
NETIEVENT_SOCKET_DEF(streamdnsread);
|
|
|
|
NETIEVENT_SOCKET_HANDLE_DEF(streamdnscancel);
|
|
|
|
|
2022-03-31 13:47:48 +03:00
|
|
|
NETIEVENT_SOCKET_TLSCTX_DEF(settlsctx);
|
2022-10-14 20:45:40 +03:00
|
|
|
NETIEVENT_SOCKET_DEF(sockstop);
|
2022-03-31 13:47:48 +03:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
void
|
|
|
|
isc__nm_process_ievent(isc__networker_t *worker, isc__netievent_t *event) {
|
|
|
|
event->worker = worker;
|
|
|
|
process_netievent(event);
|
|
|
|
}
|
|
|
|
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
void
|
|
|
|
isc__nm_maybe_enqueue_ievent(isc__networker_t *worker,
|
|
|
|
isc__netievent_t *event) {
|
|
|
|
/*
|
|
|
|
* If we are already in the matching nmthread, process the ievent
|
|
|
|
* directly.
|
|
|
|
*/
|
2022-07-26 13:03:45 +02:00
|
|
|
if (worker->loop == isc_loop_current(worker->netmgr->loopmgr)) {
|
|
|
|
isc__nm_process_ievent(worker, event);
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
isc__nm_enqueue_ievent(worker, event);
|
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc__nm_enqueue_ievent(isc__networker_t *worker, isc__netievent_t *event) {
|
2022-07-26 13:03:45 +02:00
|
|
|
event->worker = worker;
|
2022-02-22 23:40:39 +01:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
isc_async_run(worker->loop, process_netievent, event);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2020-01-16 11:52:58 +01:00
|
|
|
bool
|
2020-02-13 14:44:37 -08:00
|
|
|
isc__nmsocket_active(isc_nmsocket_t *sock) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
if (sock->parent != NULL) {
|
|
|
|
return (atomic_load(&sock->parent->active));
|
|
|
|
}
|
|
|
|
|
|
|
|
return (atomic_load(&sock->active));
|
|
|
|
}
|
|
|
|
|
2020-10-22 10:07:56 +02:00
|
|
|
bool
|
|
|
|
isc__nmsocket_deactivate(isc_nmsocket_t *sock) {
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
|
|
|
|
if (sock->parent != NULL) {
|
|
|
|
return (atomic_compare_exchange_strong(&sock->parent->active,
|
|
|
|
&(bool){ true }, false));
|
|
|
|
}
|
|
|
|
|
|
|
|
return (atomic_compare_exchange_strong(&sock->active, &(bool){ true },
|
|
|
|
false));
|
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc___nmsocket_attach(isc_nmsocket_t *sock, isc_nmsocket_t **target FLARG) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(target != NULL && *target == NULL);
|
|
|
|
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc_nmsocket_t *rsock = NULL;
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
if (sock->parent != NULL) {
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
rsock = sock->parent;
|
|
|
|
INSIST(rsock->parent == NULL); /* sanity check */
|
2019-11-05 13:55:54 -08:00
|
|
|
} else {
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
rsock = sock;
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2021-01-29 13:00:46 +01:00
|
|
|
NETMGR_TRACE_LOG("isc__nmsocket_attach():%p->references = %" PRIuFAST32
|
|
|
|
"\n",
|
|
|
|
rsock, isc_refcount_current(&rsock->references) + 1);
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
|
|
|
|
isc_refcount_increment0(&rsock->references);
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
*target = sock;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free all resources inside a socket (including its children if any).
|
|
|
|
*/
|
|
|
|
static void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
nmsocket_cleanup(isc_nmsocket_t *sock, bool dofree FLARG) {
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nmhandle_t *handle = NULL;
|
2019-11-05 13:55:54 -08:00
|
|
|
isc__nm_uvreq_t *uvreq = NULL;
|
|
|
|
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(!isc__nmsocket_active(sock));
|
|
|
|
|
2021-01-29 13:00:46 +01:00
|
|
|
NETMGR_TRACE_LOG("nmsocket_cleanup():%p->references = %" PRIuFAST32
|
|
|
|
"\n",
|
|
|
|
sock, isc_refcount_current(&sock->references));
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
|
2022-08-29 13:42:14 +02:00
|
|
|
isc_refcount_destroy(&sock->references);
|
|
|
|
|
2021-10-02 16:26:43 -07:00
|
|
|
isc__nm_decstats(sock, STATID_ACTIVE);
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
atomic_store(&sock->destroying, true);
|
|
|
|
|
|
|
|
if (sock->parent == NULL && sock->children != NULL) {
|
|
|
|
/*
|
|
|
|
* We shouldn't be here unless there are no active handles,
|
|
|
|
* so we can clean up and free the children.
|
|
|
|
*/
|
2020-12-03 17:58:10 +01:00
|
|
|
for (size_t i = 0; i < sock->nchildren; i++) {
|
2022-08-29 13:42:14 +02:00
|
|
|
REQUIRE(!atomic_load(&sock->children[i].destroying));
|
|
|
|
if (isc_refcount_decrement(
|
2022-11-02 19:33:14 +01:00
|
|
|
&sock->children[i].references))
|
|
|
|
{
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
nmsocket_cleanup(&sock->children[i],
|
|
|
|
false FLARG_PASS);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-05 11:51:39 +02:00
|
|
|
/*
|
|
|
|
* Now free them.
|
2019-11-05 13:55:54 -08:00
|
|
|
*/
|
2022-07-26 13:03:45 +02:00
|
|
|
isc_mem_put(sock->worker->mctx, sock->children,
|
2019-11-05 13:55:54 -08:00
|
|
|
sock->nchildren * sizeof(*sock));
|
|
|
|
sock->children = NULL;
|
|
|
|
sock->nchildren = 0;
|
|
|
|
}
|
|
|
|
|
2020-06-10 11:32:39 +02:00
|
|
|
sock->statichandle = NULL;
|
2020-06-04 23:13:54 -07:00
|
|
|
|
|
|
|
if (sock->outerhandle != NULL) {
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc__nmhandle_detach(&sock->outerhandle FLARG_PASS);
|
2020-06-04 23:13:54 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (sock->outer != NULL) {
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc___nmsocket_detach(&sock->outer FLARG_PASS);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
while ((handle = isc_astack_pop(sock->inactivehandles)) != NULL) {
|
|
|
|
nmhandle_free(sock, handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sock->buf != NULL) {
|
2022-07-26 13:03:45 +02:00
|
|
|
isc_mem_put(sock->worker->mctx, sock->buf, sock->buf_size);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (sock->quota != NULL) {
|
|
|
|
isc_quota_detach(&sock->quota);
|
|
|
|
}
|
|
|
|
|
2019-11-22 13:19:45 +01:00
|
|
|
sock->pquota = NULL;
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_astack_destroy(sock->inactivehandles);
|
|
|
|
|
|
|
|
while ((uvreq = isc_astack_pop(sock->inactivereqs)) != NULL) {
|
2022-07-26 13:03:45 +02:00
|
|
|
isc_mem_put(sock->worker->mctx, uvreq, sizeof(*uvreq));
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
isc_astack_destroy(sock->inactivereqs);
|
|
|
|
|
2022-03-16 17:02:46 +02:00
|
|
|
isc__nm_tlsdns_cleanup_data(sock);
|
2021-01-25 17:44:39 +02:00
|
|
|
isc__nm_tls_cleanup_data(sock);
|
2022-10-18 15:36:00 +03:00
|
|
|
#if HAVE_LIBNGHTTP2
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
isc__nm_http_cleanup_data(sock);
|
2021-04-21 13:52:15 +02:00
|
|
|
#endif
|
2022-06-20 20:30:12 +03:00
|
|
|
isc__nm_streamdns_cleanup_data(sock);
|
2022-07-26 13:03:45 +02:00
|
|
|
|
2022-10-14 20:45:40 +03:00
|
|
|
if (sock->barrier_initialised) {
|
|
|
|
isc_barrier_destroy(&sock->barrier);
|
|
|
|
}
|
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
sock->magic = 0;
|
|
|
|
|
2020-09-02 17:57:44 +02:00
|
|
|
#ifdef NETMGR_TRACE
|
2022-07-26 13:03:45 +02:00
|
|
|
LOCK(&sock->worker->netmgr->lock);
|
|
|
|
ISC_LIST_UNLINK(sock->worker->netmgr->active_sockets, sock,
|
|
|
|
active_link);
|
|
|
|
UNLOCK(&sock->worker->netmgr->lock);
|
|
|
|
isc_mutex_destroy(&sock->tracelock);
|
2020-09-02 17:57:44 +02:00
|
|
|
#endif
|
2022-07-26 13:03:45 +02:00
|
|
|
|
|
|
|
isc_mutex_destroy(&sock->lock);
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
if (dofree) {
|
2022-07-26 13:03:45 +02:00
|
|
|
isc__networker_t *worker = sock->worker;
|
|
|
|
isc_mem_put(worker->mctx, sock, sizeof(*sock));
|
|
|
|
isc__networker_detach(&worker);
|
2019-11-05 13:55:54 -08:00
|
|
|
} else {
|
2022-07-26 13:03:45 +02:00
|
|
|
isc__networker_detach(&sock->worker);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
nmsocket_maybe_destroy(isc_nmsocket_t *sock FLARG) {
|
2020-02-13 14:44:37 -08:00
|
|
|
int active_handles;
|
2019-11-05 13:55:54 -08:00
|
|
|
bool destroy = false;
|
|
|
|
|
2021-03-30 09:25:09 +02:00
|
|
|
NETMGR_TRACE_LOG("%s():%p->references = %" PRIuFAST32 "\n", __func__,
|
|
|
|
sock, isc_refcount_current(&sock->references));
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
if (sock->parent != NULL) {
|
|
|
|
/*
|
|
|
|
* This is a child socket and cannot be destroyed except
|
|
|
|
* as a side effect of destroying the parent, so let's go
|
|
|
|
* see if the parent is ready to be destroyed.
|
|
|
|
*/
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
nmsocket_maybe_destroy(sock->parent FLARG_PASS);
|
2019-11-05 13:55:54 -08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is a parent socket (or a standalone). See whether the
|
|
|
|
* children have active handles before deciding whether to
|
|
|
|
* accept destruction.
|
|
|
|
*/
|
2019-12-08 22:44:08 +01:00
|
|
|
if (atomic_load(&sock->active) || atomic_load(&sock->destroying) ||
|
2020-02-13 14:44:37 -08:00
|
|
|
!atomic_load(&sock->closed) || atomic_load(&sock->references) != 0)
|
|
|
|
{
|
2019-12-08 22:44:08 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
active_handles = atomic_load(&sock->ah);
|
2019-11-05 13:55:54 -08:00
|
|
|
if (sock->children != NULL) {
|
2020-12-03 17:58:10 +01:00
|
|
|
for (size_t i = 0; i < sock->nchildren; i++) {
|
2019-11-19 11:56:00 +01:00
|
|
|
active_handles += atomic_load(&sock->children[i].ah);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-10 11:32:39 +02:00
|
|
|
if (active_handles == 0 || sock->statichandle != NULL) {
|
2019-11-05 13:55:54 -08:00
|
|
|
destroy = true;
|
|
|
|
}
|
|
|
|
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
NETMGR_TRACE_LOG("%s:%p->active_handles = %d, .statichandle = %p\n",
|
|
|
|
__func__, sock, active_handles, sock->statichandle);
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
if (destroy) {
|
2020-06-22 15:46:11 -07:00
|
|
|
atomic_store(&sock->destroying, true);
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
nmsocket_cleanup(sock, true FLARG_PASS);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-29 10:55:10 +02:00
|
|
|
void
|
|
|
|
isc_nmhandle_close(isc_nmhandle_t *handle) {
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
|
|
|
|
|
|
|
isc__nmsocket_clearcb(handle->sock);
|
|
|
|
isc__nm_failed_read_cb(handle->sock, ISC_R_EOF, false);
|
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc___nmsocket_prep_destroy(isc_nmsocket_t *sock FLARG) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(sock->parent == NULL);
|
|
|
|
|
2021-01-29 13:00:46 +01:00
|
|
|
NETMGR_TRACE_LOG("isc___nmsocket_prep_destroy():%p->references = "
|
|
|
|
"%" PRIuFAST32 "\n",
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
sock, isc_refcount_current(&sock->references));
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
/*
|
|
|
|
* The final external reference to the socket is gone. We can try
|
|
|
|
* destroying the socket, but we have to wait for all the inflight
|
|
|
|
* handles to finish first.
|
|
|
|
*/
|
|
|
|
atomic_store(&sock->active, false);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the socket has children, they'll need to be marked inactive
|
|
|
|
* so they can be cleaned up too.
|
|
|
|
*/
|
|
|
|
if (sock->children != NULL) {
|
2020-12-03 17:58:10 +01:00
|
|
|
for (size_t i = 0; i < sock->nchildren; i++) {
|
2019-11-05 13:55:54 -08:00
|
|
|
atomic_store(&sock->children[i].active, false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we're here then we already stopped listening; otherwise
|
|
|
|
* we'd have a hanging reference from the listening process.
|
|
|
|
*
|
|
|
|
* If it's a regular socket we may need to close it.
|
|
|
|
*/
|
2022-08-29 10:55:10 +02:00
|
|
|
if (!atomic_load(&sock->closing) && !atomic_load(&sock->closed)) {
|
2019-11-05 13:55:54 -08:00
|
|
|
switch (sock->type) {
|
2020-09-05 11:07:40 -07:00
|
|
|
case isc_nm_udpsocket:
|
|
|
|
isc__nm_udp_close(sock);
|
|
|
|
return;
|
2019-11-05 13:55:54 -08:00
|
|
|
case isc_nm_tcpsocket:
|
|
|
|
isc__nm_tcp_close(sock);
|
2020-07-01 00:49:12 -07:00
|
|
|
return;
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
case isc_nm_tlsdnssocket:
|
|
|
|
isc__nm_tlsdns_close(sock);
|
|
|
|
return;
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
|
|
|
isc__nm_streamdns_close(sock);
|
|
|
|
return;
|
2021-04-21 13:52:15 +02:00
|
|
|
case isc_nm_tlssocket:
|
|
|
|
isc__nm_tls_close(sock);
|
2022-08-29 10:55:10 +02:00
|
|
|
return;
|
2022-10-18 15:36:00 +03:00
|
|
|
#if HAVE_LIBNGHTTP2
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
case isc_nm_httpsocket:
|
2020-12-07 14:19:10 +02:00
|
|
|
isc__nm_http_close(sock);
|
|
|
|
return;
|
2021-04-21 13:52:15 +02:00
|
|
|
#endif
|
2019-11-05 13:55:54 -08:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
nmsocket_maybe_destroy(sock FLARG_PASS);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc___nmsocket_detach(isc_nmsocket_t **sockp FLARG) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(sockp != NULL && *sockp != NULL);
|
|
|
|
REQUIRE(VALID_NMSOCK(*sockp));
|
|
|
|
|
|
|
|
isc_nmsocket_t *sock = *sockp, *rsock = NULL;
|
|
|
|
*sockp = NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the socket is a part of a set (a child socket) we are
|
|
|
|
* counting references for the whole set at the parent.
|
|
|
|
*/
|
|
|
|
if (sock->parent != NULL) {
|
|
|
|
rsock = sock->parent;
|
|
|
|
INSIST(rsock->parent == NULL); /* Sanity check */
|
|
|
|
} else {
|
|
|
|
rsock = sock;
|
|
|
|
}
|
|
|
|
|
2021-01-29 13:00:46 +01:00
|
|
|
NETMGR_TRACE_LOG("isc__nmsocket_detach():%p->references = %" PRIuFAST32
|
|
|
|
"\n",
|
|
|
|
rsock, isc_refcount_current(&rsock->references) - 1);
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
|
2020-01-14 09:43:37 +01:00
|
|
|
if (isc_refcount_decrement(&rsock->references) == 1) {
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc___nmsocket_prep_destroy(rsock FLARG_PASS);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-04 14:54:36 -07:00
|
|
|
void
|
|
|
|
isc_nmsocket_close(isc_nmsocket_t **sockp) {
|
|
|
|
REQUIRE(sockp != NULL);
|
|
|
|
REQUIRE(VALID_NMSOCK(*sockp));
|
|
|
|
REQUIRE((*sockp)->type == isc_nm_udplistener ||
|
|
|
|
(*sockp)->type == isc_nm_tcplistener ||
|
2021-01-25 17:44:39 +02:00
|
|
|
(*sockp)->type == isc_nm_tlsdnslistener ||
|
2022-06-20 20:30:12 +03:00
|
|
|
(*sockp)->type == isc_nm_streamdnslistener ||
|
2020-12-07 14:19:10 +02:00
|
|
|
(*sockp)->type == isc_nm_tlslistener ||
|
|
|
|
(*sockp)->type == isc_nm_httplistener);
|
2020-06-04 14:54:36 -07:00
|
|
|
|
|
|
|
isc__nmsocket_detach(sockp);
|
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
void
|
2022-07-26 13:03:45 +02:00
|
|
|
isc___nmsocket_init(isc_nmsocket_t *sock, isc__networker_t *worker,
|
|
|
|
isc_nmsocket_type type, isc_sockaddr_t *iface FLARG) {
|
2020-01-05 01:02:12 -08:00
|
|
|
uint16_t family;
|
|
|
|
|
|
|
|
REQUIRE(sock != NULL);
|
2022-07-26 13:03:45 +02:00
|
|
|
REQUIRE(worker != NULL);
|
|
|
|
|
|
|
|
*sock = (isc_nmsocket_t){
|
|
|
|
.type = type,
|
|
|
|
.tid = worker->loop->tid,
|
|
|
|
.fd = -1,
|
|
|
|
.inactivehandles = isc_astack_new(worker->mctx,
|
|
|
|
ISC_NM_HANDLES_STACK_SIZE),
|
|
|
|
.inactivereqs = isc_astack_new(worker->mctx,
|
|
|
|
ISC_NM_REQS_STACK_SIZE),
|
|
|
|
.result = ISC_R_UNSET,
|
|
|
|
};
|
2020-01-05 01:02:12 -08:00
|
|
|
|
2022-04-13 16:24:20 +03:00
|
|
|
ISC_LIST_INIT(sock->tls.sendreqs);
|
2022-07-26 13:03:45 +02:00
|
|
|
isc_mutex_init(&sock->lock);
|
2022-04-13 16:24:20 +03:00
|
|
|
|
2021-10-02 14:52:46 -07:00
|
|
|
if (iface != NULL) {
|
|
|
|
family = iface->type.sa.sa_family;
|
|
|
|
sock->iface = *iface;
|
|
|
|
} else {
|
|
|
|
family = AF_UNSPEC;
|
|
|
|
}
|
|
|
|
|
2021-01-29 13:00:46 +01:00
|
|
|
#if NETMGR_TRACE
|
2021-04-27 16:20:03 +02:00
|
|
|
sock->backtrace_size = isc_backtrace(sock->backtrace, TRACE_SIZE);
|
2020-09-02 17:57:44 +02:00
|
|
|
ISC_LINK_INIT(sock, active_link);
|
|
|
|
ISC_LIST_INIT(sock->active_handles);
|
2022-07-26 13:03:45 +02:00
|
|
|
LOCK(&worker->netmgr->lock);
|
|
|
|
ISC_LIST_APPEND(worker->netmgr->active_sockets, sock, active_link);
|
|
|
|
UNLOCK(&worker->netmgr->lock);
|
|
|
|
isc_mutex_init(&sock->tracelock);
|
2020-09-02 17:57:44 +02:00
|
|
|
#endif
|
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
isc__networker_attach(worker, &sock->worker);
|
2019-11-05 13:55:54 -08:00
|
|
|
sock->uv_handle.handle.data = sock;
|
|
|
|
|
2020-03-24 13:38:51 +01:00
|
|
|
ISC_LINK_INIT(&sock->quotacb, link);
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2020-01-05 01:02:12 -08:00
|
|
|
switch (type) {
|
|
|
|
case isc_nm_udpsocket:
|
|
|
|
case isc_nm_udplistener:
|
2021-10-02 16:26:43 -07:00
|
|
|
switch (family) {
|
|
|
|
case AF_INET:
|
2020-01-05 01:02:12 -08:00
|
|
|
sock->statsindex = udp4statsindex;
|
2021-10-02 16:26:43 -07:00
|
|
|
break;
|
|
|
|
case AF_INET6:
|
2020-01-05 01:02:12 -08:00
|
|
|
sock->statsindex = udp6statsindex;
|
2021-10-02 16:26:43 -07:00
|
|
|
break;
|
2021-10-02 14:52:46 -07:00
|
|
|
case AF_UNSPEC:
|
|
|
|
/*
|
|
|
|
* Route sockets are AF_UNSPEC, and don't
|
|
|
|
* have stats counters.
|
|
|
|
*/
|
|
|
|
break;
|
2021-10-02 16:26:43 -07:00
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2020-01-05 01:02:12 -08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case isc_nm_tcpsocket:
|
|
|
|
case isc_nm_tcplistener:
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
case isc_nm_tlsdnssocket:
|
|
|
|
case isc_nm_tlsdnslistener:
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
case isc_nm_httpsocket:
|
2020-12-07 14:19:10 +02:00
|
|
|
case isc_nm_httplistener:
|
2021-10-02 16:26:43 -07:00
|
|
|
switch (family) {
|
|
|
|
case AF_INET:
|
2020-01-05 01:02:12 -08:00
|
|
|
sock->statsindex = tcp4statsindex;
|
2021-10-02 16:26:43 -07:00
|
|
|
break;
|
|
|
|
case AF_INET6:
|
2020-01-05 01:02:12 -08:00
|
|
|
sock->statsindex = tcp6statsindex;
|
2021-10-02 16:26:43 -07:00
|
|
|
break;
|
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2020-01-05 01:02:12 -08:00
|
|
|
}
|
2020-02-05 15:50:29 +11:00
|
|
|
break;
|
2020-01-05 01:02:12 -08:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_refcount_init(&sock->references, 1);
|
2019-11-08 10:52:49 -08:00
|
|
|
|
2021-01-25 17:44:39 +02:00
|
|
|
memset(&sock->tlsstream, 0, sizeof(sock->tlsstream));
|
|
|
|
|
2021-01-29 13:00:46 +01:00
|
|
|
NETMGR_TRACE_LOG("isc__nmsocket_init():%p->references = %" PRIuFAST32
|
|
|
|
"\n",
|
|
|
|
sock, isc_refcount_current(&sock->references));
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
atomic_init(&sock->active, true);
|
2020-10-29 12:04:00 +01:00
|
|
|
atomic_init(&sock->closing, false);
|
2021-05-26 17:55:43 +10:00
|
|
|
atomic_init(&sock->listening, 0);
|
|
|
|
atomic_init(&sock->closed, 0);
|
|
|
|
atomic_init(&sock->destroying, 0);
|
|
|
|
atomic_init(&sock->ah, 0);
|
|
|
|
atomic_init(&sock->client, 0);
|
|
|
|
atomic_init(&sock->connecting, false);
|
|
|
|
atomic_init(&sock->keepalive, false);
|
|
|
|
atomic_init(&sock->connected, false);
|
2022-02-22 18:12:18 +01:00
|
|
|
atomic_init(&sock->timedout, false);
|
2021-05-26 17:55:43 +10:00
|
|
|
|
|
|
|
atomic_init(&sock->active_child_connections, 0);
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
|
2021-04-21 13:52:15 +02:00
|
|
|
#if HAVE_LIBNGHTTP2
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
isc__nm_http_initsocket(sock);
|
2021-04-21 13:52:15 +02:00
|
|
|
#endif
|
2020-12-07 14:19:10 +02:00
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
sock->magic = NMSOCK_MAGIC;
|
2021-10-02 16:26:43 -07:00
|
|
|
|
|
|
|
isc__nm_incstats(sock, STATID_ACTIVE);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2020-06-20 15:03:05 -07:00
|
|
|
void
|
|
|
|
isc__nmsocket_clearcb(isc_nmsocket_t *sock) {
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
2022-07-26 13:03:45 +02:00
|
|
|
REQUIRE(sock->tid == isc_tid());
|
2020-06-20 15:03:05 -07:00
|
|
|
|
2020-09-11 10:53:31 +02:00
|
|
|
sock->recv_cb = NULL;
|
|
|
|
sock->recv_cbarg = NULL;
|
|
|
|
sock->accept_cb = NULL;
|
2020-06-20 15:03:05 -07:00
|
|
|
sock->accept_cbarg = NULL;
|
2020-10-29 12:04:00 +01:00
|
|
|
sock->connect_cb = NULL;
|
|
|
|
sock->connect_cbarg = NULL;
|
2020-06-20 15:03:05 -07:00
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc__nm_free_uvbuf(isc_nmsocket_t *sock, const uv_buf_t *buf) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
REQUIRE(buf->base == sock->worker->recvbuf);
|
Fix the UDP recvmmsg support
Previously, the netmgr/udp.c tried to detect the recvmmsg detection in
libuv with #ifdef UV_UDP_<foo> preprocessor macros. However, because
the UV_UDP_<foo> are not preprocessor macros, but enum members, the
detection didn't work. Because the detection didn't work, the code
didn't have access to the information when we received the final chunk
of the recvmmsg and tried to free the uvbuf every time. Fortunately,
the isc__nm_free_uvbuf() had a kludge that detected attempt to free in
the middle of the receive buffer, so the code worked.
However, libuv 1.37.0 changed the way the recvmmsg was enabled from
implicit to explicit, and we checked for yet another enum member
presence with preprocessor macro, so in fact libuv recvmmsg support was
never enabled with libuv >= 1.37.0.
This commit changes to the preprocessor macros to autoconf checks for
declaration, so the detection now works again. On top of that, it's now
possible to cleanup the alloc_cb and free_uvbuf functions because now,
the information whether we can or cannot free the buffer is available to
us.
2022-01-11 12:14:23 +01:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
sock->worker->recvbuf_inuse = false;
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static isc_nmhandle_t *
|
2020-02-13 14:44:37 -08:00
|
|
|
alloc_handle(isc_nmsocket_t *sock) {
|
2022-07-26 13:03:45 +02:00
|
|
|
isc_nmhandle_t *handle = isc_mem_get(sock->worker->mctx,
|
2022-03-23 13:57:15 +01:00
|
|
|
sizeof(isc_nmhandle_t));
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2020-02-12 13:59:18 +01:00
|
|
|
*handle = (isc_nmhandle_t){ .magic = NMHANDLE_MAGIC };
|
2020-09-02 17:57:44 +02:00
|
|
|
#ifdef NETMGR_TRACE
|
|
|
|
ISC_LINK_INIT(handle, active_link);
|
|
|
|
#endif
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_refcount_init(&handle->references, 1);
|
|
|
|
|
|
|
|
return (handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
isc_nmhandle_t *
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc___nmhandle_get(isc_nmsocket_t *sock, isc_sockaddr_t *peer,
|
|
|
|
isc_sockaddr_t *local FLARG) {
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_nmhandle_t *handle = NULL;
|
|
|
|
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
|
|
|
|
handle = isc_astack_pop(sock->inactivehandles);
|
|
|
|
|
|
|
|
if (handle == NULL) {
|
|
|
|
handle = alloc_handle(sock);
|
|
|
|
} else {
|
2020-06-10 11:32:39 +02:00
|
|
|
isc_refcount_init(&handle->references, 1);
|
2020-07-01 00:49:12 -07:00
|
|
|
INSIST(VALID_NMHANDLE(handle));
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2021-01-29 13:00:46 +01:00
|
|
|
NETMGR_TRACE_LOG(
|
|
|
|
"isc__nmhandle_get():handle %p->references = %" PRIuFAST32 "\n",
|
|
|
|
handle, isc_refcount_current(&handle->references));
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
|
|
|
|
isc___nmsocket_attach(sock, &handle->sock FLARG_PASS);
|
2020-06-04 23:13:54 -07:00
|
|
|
|
2021-01-29 13:00:46 +01:00
|
|
|
#if NETMGR_TRACE
|
2021-04-27 16:20:03 +02:00
|
|
|
handle->backtrace_size = isc_backtrace(handle->backtrace, TRACE_SIZE);
|
2020-09-02 17:57:44 +02:00
|
|
|
#endif
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
if (peer != NULL) {
|
2021-05-26 08:15:34 +02:00
|
|
|
handle->peer = *peer;
|
2019-11-05 13:55:54 -08:00
|
|
|
} else {
|
2021-05-26 08:15:34 +02:00
|
|
|
handle->peer = sock->peer;
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (local != NULL) {
|
2021-05-26 08:15:34 +02:00
|
|
|
handle->local = *local;
|
2019-11-05 13:55:54 -08:00
|
|
|
} else {
|
2021-05-26 08:15:34 +02:00
|
|
|
handle->local = sock->iface;
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2022-02-23 08:54:49 +01:00
|
|
|
(void)atomic_fetch_add(&sock->ah, 1);
|
2019-11-19 11:56:00 +01:00
|
|
|
|
2020-09-02 17:57:44 +02:00
|
|
|
#ifdef NETMGR_TRACE
|
2022-07-26 13:03:45 +02:00
|
|
|
LOCK(&sock->tracelock);
|
2020-09-02 17:57:44 +02:00
|
|
|
ISC_LIST_APPEND(sock->active_handles, handle, active_link);
|
2022-07-26 13:03:45 +02:00
|
|
|
UNLOCK(&sock->tracelock);
|
2022-02-23 08:54:49 +01:00
|
|
|
#endif
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2021-03-29 10:52:05 +02:00
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_udpsocket:
|
|
|
|
case isc_nm_tlsdnssocket:
|
|
|
|
if (!atomic_load(&sock->client)) {
|
|
|
|
break;
|
|
|
|
}
|
2021-10-11 12:09:16 +02:00
|
|
|
FALLTHROUGH;
|
2021-03-29 10:52:05 +02:00
|
|
|
case isc_nm_tcpsocket:
|
|
|
|
case isc_nm_tlssocket:
|
2020-06-10 11:32:39 +02:00
|
|
|
INSIST(sock->statichandle == NULL);
|
2020-09-03 13:31:27 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* statichandle must be assigned, not attached;
|
|
|
|
* otherwise, if a handle was detached elsewhere
|
|
|
|
* it could never reach 0 references, and the
|
|
|
|
* handle and socket would never be freed.
|
|
|
|
*/
|
2020-06-10 11:32:39 +02:00
|
|
|
sock->statichandle = handle;
|
2021-03-29 10:52:05 +02:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2021-04-21 13:52:15 +02:00
|
|
|
#if HAVE_LIBNGHTTP2
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
if (sock->type == isc_nm_httpsocket && sock->h2.session) {
|
|
|
|
isc__nm_httpsession_attach(sock->h2.session,
|
|
|
|
&handle->httpsession);
|
2020-12-07 14:19:10 +02:00
|
|
|
}
|
2021-04-21 13:52:15 +02:00
|
|
|
#endif
|
2020-12-07 14:19:10 +02:00
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
return (handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc__nmhandle_attach(isc_nmhandle_t *handle, isc_nmhandle_t **handlep FLARG) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
2020-09-03 13:31:27 -07:00
|
|
|
REQUIRE(handlep != NULL && *handlep == NULL);
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2021-01-29 13:00:46 +01:00
|
|
|
NETMGR_TRACE_LOG("isc__nmhandle_attach():handle %p->references = "
|
|
|
|
"%" PRIuFAST32 "\n",
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
handle, isc_refcount_current(&handle->references) + 1);
|
|
|
|
|
2020-01-14 09:43:37 +01:00
|
|
|
isc_refcount_increment(&handle->references);
|
2020-09-03 13:31:27 -07:00
|
|
|
*handlep = handle;
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nmhandle_is_stream(isc_nmhandle_t *handle) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
|
|
|
return (handle->sock->type == isc_nm_tcpsocket ||
|
2021-01-25 17:44:39 +02:00
|
|
|
handle->sock->type == isc_nm_tlssocket ||
|
2021-06-01 15:16:19 +03:00
|
|
|
handle->sock->type == isc_nm_tlsdnssocket ||
|
2022-06-20 20:30:12 +03:00
|
|
|
handle->sock->type == isc_nm_httpsocket ||
|
|
|
|
handle->sock->type == isc_nm_streamdnssocket);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-02-13 14:44:37 -08:00
|
|
|
nmhandle_free(isc_nmsocket_t *sock, isc_nmhandle_t *handle) {
|
2019-12-10 11:09:56 +01:00
|
|
|
isc_refcount_destroy(&handle->references);
|
|
|
|
|
2019-11-08 10:52:49 -08:00
|
|
|
if (handle->dofree != NULL) {
|
2019-11-05 13:55:54 -08:00
|
|
|
handle->dofree(handle->opaque);
|
|
|
|
}
|
|
|
|
|
2020-02-12 13:59:18 +01:00
|
|
|
*handle = (isc_nmhandle_t){ .magic = 0 };
|
2019-11-19 11:56:00 +01:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
isc_mem_put(sock->worker->mctx, handle, sizeof(isc_nmhandle_t));
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2019-12-08 22:44:08 +01:00
|
|
|
static void
|
2020-02-13 14:44:37 -08:00
|
|
|
nmhandle_deactivate(isc_nmsocket_t *sock, isc_nmhandle_t *handle) {
|
2019-12-16 18:24:55 -08:00
|
|
|
bool reuse = false;
|
2021-10-21 02:28:48 -07:00
|
|
|
uint_fast32_t ah;
|
2019-12-16 18:24:55 -08:00
|
|
|
|
2019-12-08 22:44:08 +01:00
|
|
|
/*
|
|
|
|
* We do all of this under lock to avoid races with socket
|
|
|
|
* destruction. We have to do this now, because at this point the
|
|
|
|
* socket is either unused or still attached to event->sock.
|
|
|
|
*/
|
2020-09-02 17:57:44 +02:00
|
|
|
#ifdef NETMGR_TRACE
|
|
|
|
ISC_LIST_UNLINK(sock->active_handles, handle, active_link);
|
|
|
|
#endif
|
|
|
|
|
2021-10-21 02:28:48 -07:00
|
|
|
ah = atomic_fetch_sub(&sock->ah, 1);
|
|
|
|
INSIST(ah > 0);
|
2022-02-23 08:54:49 +01:00
|
|
|
|
2022-02-24 00:11:45 +01:00
|
|
|
#if !__SANITIZE_ADDRESS__ && !__SANITIZE_THREAD__
|
2019-12-08 22:44:08 +01:00
|
|
|
if (atomic_load(&sock->active)) {
|
2020-02-12 13:59:18 +01:00
|
|
|
reuse = isc_astack_trypush(sock->inactivehandles, handle);
|
2019-12-08 22:44:08 +01:00
|
|
|
}
|
2022-02-24 00:11:45 +01:00
|
|
|
#endif /* !__SANITIZE_ADDRESS__ && !__SANITIZE_THREAD__ */
|
2019-12-08 22:44:08 +01:00
|
|
|
if (!reuse) {
|
|
|
|
nmhandle_free(sock, handle);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc__nmhandle_detach(isc_nmhandle_t **handlep FLARG) {
|
2020-06-04 23:13:54 -07:00
|
|
|
isc_nmsocket_t *sock = NULL;
|
2020-09-03 13:31:27 -07:00
|
|
|
isc_nmhandle_t *handle = NULL;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2020-09-03 13:31:27 -07:00
|
|
|
REQUIRE(handlep != NULL);
|
|
|
|
REQUIRE(VALID_NMHANDLE(*handlep));
|
|
|
|
|
|
|
|
handle = *handlep;
|
|
|
|
*handlep = NULL;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
Run .closehandle_cb asynchrounosly in nmhandle_detach_cb()
When sock->closehandle_cb is set, we need to run nmhandle_detach_cb()
asynchronously to ensure correct order of multiple packets processing in
the isc__nm_process_sock_buffer(). When not run asynchronously, it
would cause:
a) out-of-order processing of the return codes from processbuffer();
b) stack growth because the next TCP DNS message read callback will
be called from within the current TCP DNS message read callback.
The sock->closehandle_cb is set to isc__nm_resume_processing() for TCP
sockets which calls isc__nm_process_sock_buffer(). If the read callback
(called from isc__nm_process_sock_buffer()->processbuffer()) doesn't
attach to the nmhandle (f.e. because it wants to drop the processing or
we send the response directly via uv_try_write()), the
isc__nm_resume_processing() (via .closehandle_cb) would call
isc__nm_process_sock_buffer() recursively.
The below shortened code path shows how the stack can grow:
1: ns__client_request(handle, ...);
2: isc_nm_tcpdns_sequential(handle);
3: ns_query_start(client, handle);
4: query_lookup(qctx);
5: query_send(qctcx->client);
6: isc__nmhandle_detach(&client->reqhandle);
7: nmhandle_detach_cb(&handle);
8: sock->closehandle_cb(sock); // isc__nm_resume_processing
9: isc__nm_process_sock_buffer(sock);
10: processbuffer(sock); // isc__nm_tcpdns_processbuffer
11: isc_nmhandle_attach(req->handle, &handle);
12: isc__nm_readcb(sock, req, ISC_R_SUCCESS);
13: isc__nm_async_readcb(NULL, ...);
14: uvreq->cb.recv(...); // ns__client_request
Instead, if 'sock->closehandle_cb' is set, we need to run detach the
handle asynchroniously in 'isc__nmhandle_detach', so that on line 8 in
the code flow above does not start this recursion. This ensures the
correct order when processing multiple packets in the function
'isc__nm_process_sock_buffer()' and prevents the stack growth.
When not run asynchronously, the out-of-order processing leaves the
first TCP socket open until all requests on the stream have been
processed.
If the pipelining is disabled on the TCP via `keep-response-order`
configuration option, named would keep the first socket in lingering
CLOSE_WAIT state when the client sends an incomplete packet and then
closes the connection from the client side.
2022-02-08 12:42:34 +01:00
|
|
|
/*
|
|
|
|
* If the closehandle_cb is set, it needs to run asynchronously to
|
|
|
|
* ensure correct ordering of the isc__nm_process_sock_buffer().
|
|
|
|
*/
|
2020-10-12 17:51:09 +11:00
|
|
|
sock = handle->sock;
|
2022-07-26 13:03:45 +02:00
|
|
|
if (sock->tid == isc_tid() && sock->closehandle_cb == NULL) {
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
nmhandle_detach_cb(&handle FLARG_PASS);
|
2020-10-12 17:51:09 +11:00
|
|
|
} else {
|
|
|
|
isc__netievent_detach_t *event =
|
2022-07-26 13:03:45 +02:00
|
|
|
isc__nm_get_netievent_detach(sock->worker, sock);
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
/*
|
|
|
|
* we are using implicit "attach" as the last reference
|
|
|
|
* need to be destroyed explicitly in the async callback
|
|
|
|
*/
|
|
|
|
event->handle = handle;
|
|
|
|
FLARG_IEVENT_PASS(event);
|
2022-07-26 13:03:45 +02:00
|
|
|
isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)event);
|
2020-10-12 17:51:09 +11:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
nmhandle_detach_cb(isc_nmhandle_t **handlep FLARG) {
|
2020-10-12 17:51:09 +11:00
|
|
|
isc_nmsocket_t *sock = NULL;
|
|
|
|
isc_nmhandle_t *handle = NULL;
|
|
|
|
|
|
|
|
REQUIRE(handlep != NULL);
|
|
|
|
REQUIRE(VALID_NMHANDLE(*handlep));
|
|
|
|
|
|
|
|
handle = *handlep;
|
|
|
|
*handlep = NULL;
|
|
|
|
|
2021-01-29 13:00:46 +01:00
|
|
|
NETMGR_TRACE_LOG("isc__nmhandle_detach():%p->references = %" PRIuFAST32
|
|
|
|
"\n",
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
handle, isc_refcount_current(&handle->references) - 1);
|
|
|
|
|
2020-01-14 09:43:37 +01:00
|
|
|
if (isc_refcount_decrement(&handle->references) > 1) {
|
2019-11-19 11:56:00 +01:00
|
|
|
return;
|
|
|
|
}
|
2020-09-03 13:31:27 -07:00
|
|
|
|
2020-06-11 13:01:26 +02:00
|
|
|
/* We need an acquire memory barrier here */
|
|
|
|
(void)isc_refcount_current(&handle->references);
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2019-11-19 11:56:00 +01:00
|
|
|
sock = handle->sock;
|
|
|
|
handle->sock = NULL;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2019-11-19 11:56:00 +01:00
|
|
|
if (handle->doreset != NULL) {
|
|
|
|
handle->doreset(handle->opaque);
|
|
|
|
}
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2021-04-21 13:52:15 +02:00
|
|
|
#if HAVE_LIBNGHTTP2
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
if (sock->type == isc_nm_httpsocket && handle->httpsession != NULL) {
|
|
|
|
isc__nm_httpsession_detach(&handle->httpsession);
|
|
|
|
}
|
2021-04-21 13:52:15 +02:00
|
|
|
#endif
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
|
2020-03-26 14:25:06 +01:00
|
|
|
nmhandle_deactivate(sock, handle);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The handle is gone now. If the socket has a callback configured
|
2019-12-06 20:45:00 +01:00
|
|
|
* for that (e.g., to perform cleanup after request processing),
|
2022-07-26 13:03:45 +02:00
|
|
|
* call it now..
|
2019-12-06 20:45:00 +01:00
|
|
|
*/
|
|
|
|
if (sock->closehandle_cb != NULL) {
|
2022-07-26 13:03:45 +02:00
|
|
|
sock->closehandle_cb(sock);
|
2019-12-06 20:45:00 +01:00
|
|
|
}
|
|
|
|
|
2020-06-10 11:32:39 +02:00
|
|
|
if (handle == sock->statichandle) {
|
2020-09-03 13:31:27 -07:00
|
|
|
/* statichandle is assigned, not attached. */
|
2020-06-10 11:32:39 +02:00
|
|
|
sock->statichandle = NULL;
|
|
|
|
}
|
|
|
|
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc___nmsocket_detach(&sock FLARG_PASS);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nmhandle_getdata(isc_nmhandle_t *handle) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
|
|
|
return (handle->opaque);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc_nmhandle_setdata(isc_nmhandle_t *handle, void *arg,
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nm_opaquecb_t doreset, isc_nm_opaquecb_t dofree) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
|
|
|
handle->opaque = arg;
|
|
|
|
handle->doreset = doreset;
|
|
|
|
handle->dofree = dofree;
|
|
|
|
}
|
|
|
|
|
2021-03-18 09:27:38 +01:00
|
|
|
void
|
|
|
|
isc__nm_alloc_dnsbuf(isc_nmsocket_t *sock, size_t len) {
|
|
|
|
REQUIRE(len <= NM_BIG_BUF);
|
|
|
|
|
|
|
|
if (sock->buf == NULL) {
|
|
|
|
/* We don't have the buffer at all */
|
|
|
|
size_t alloc_len = len < NM_REG_BUF ? NM_REG_BUF : NM_BIG_BUF;
|
2022-07-26 13:03:45 +02:00
|
|
|
sock->buf = isc_mem_get(sock->worker->mctx, alloc_len);
|
2021-03-18 09:27:38 +01:00
|
|
|
sock->buf_size = alloc_len;
|
|
|
|
} else {
|
|
|
|
/* We have the buffer but it's too small */
|
2022-07-26 13:03:45 +02:00
|
|
|
sock->buf = isc_mem_reget(sock->worker->mctx, sock->buf,
|
2021-09-22 18:55:02 +02:00
|
|
|
sock->buf_size, NM_BIG_BUF);
|
2021-03-18 09:27:38 +01:00
|
|
|
sock->buf_size = NM_BIG_BUF;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_failed_send_cb(isc_nmsocket_t *sock, isc__nm_uvreq_t *req,
|
2022-11-24 17:11:22 +01:00
|
|
|
isc_result_t eresult, bool async) {
|
2021-03-18 09:27:38 +01:00
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(VALID_UVREQ(req));
|
|
|
|
|
|
|
|
if (req->cb.send != NULL) {
|
2022-11-24 17:11:22 +01:00
|
|
|
isc__nm_sendcb(sock, req, eresult, async);
|
2021-03-18 09:27:38 +01:00
|
|
|
} else {
|
|
|
|
isc__nm_uvreq_put(&req, sock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_failed_accept_cb(isc_nmsocket_t *sock, isc_result_t eresult) {
|
2021-07-26 13:14:41 +02:00
|
|
|
REQUIRE(atomic_load(&sock->accepting));
|
2021-03-18 09:27:38 +01:00
|
|
|
REQUIRE(sock->server);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Detach the quota early to make room for other connections;
|
|
|
|
* otherwise it'd be detached later asynchronously, and clog
|
|
|
|
* the quota unnecessarily.
|
|
|
|
*/
|
|
|
|
if (sock->quota != NULL) {
|
|
|
|
isc_quota_detach(&sock->quota);
|
|
|
|
}
|
|
|
|
|
|
|
|
isc__nmsocket_detach(&sock->server);
|
|
|
|
|
2021-07-26 13:14:41 +02:00
|
|
|
atomic_store(&sock->accepting, false);
|
2021-03-18 09:27:38 +01:00
|
|
|
|
|
|
|
switch (eresult) {
|
|
|
|
case ISC_R_NOTCONNECTED:
|
|
|
|
/* IGNORE: The client disconnected before we could accept */
|
|
|
|
break;
|
|
|
|
default:
|
2022-12-07 09:45:34 +01:00
|
|
|
isc__nmsocket_log(sock, ISC_LOG_ERROR,
|
|
|
|
"Accepting TCP connection failed: %s",
|
|
|
|
isc_result_totext(eresult));
|
2021-03-18 09:27:38 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_failed_connect_cb(isc_nmsocket_t *sock, isc__nm_uvreq_t *req,
|
2021-04-06 18:27:38 +02:00
|
|
|
isc_result_t eresult, bool async) {
|
2021-03-18 09:27:38 +01:00
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(VALID_UVREQ(req));
|
2022-07-26 13:03:45 +02:00
|
|
|
REQUIRE(sock->tid == isc_tid());
|
2021-03-18 09:27:38 +01:00
|
|
|
REQUIRE(req->cb.connect != NULL);
|
|
|
|
|
2022-07-14 13:22:34 +02:00
|
|
|
isc__nm_incstats(sock, STATID_CONNECTFAIL);
|
|
|
|
|
2021-03-30 09:25:09 +02:00
|
|
|
isc__nmsocket_timer_stop(sock);
|
2022-02-09 10:59:08 +01:00
|
|
|
uv_handle_set_data((uv_handle_t *)&sock->read_timer, sock);
|
2021-03-30 09:25:09 +02:00
|
|
|
|
2021-10-21 02:28:48 -07:00
|
|
|
atomic_compare_exchange_enforced(&sock->connecting, &(bool){ true },
|
|
|
|
false);
|
2021-03-18 09:27:38 +01:00
|
|
|
|
|
|
|
isc__nmsocket_clearcb(sock);
|
2021-04-06 18:27:38 +02:00
|
|
|
isc__nm_connectcb(sock, req, eresult, async);
|
2021-03-18 09:27:38 +01:00
|
|
|
|
|
|
|
isc__nmsocket_prep_destroy(sock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2021-04-06 18:27:38 +02:00
|
|
|
isc__nm_failed_read_cb(isc_nmsocket_t *sock, isc_result_t result, bool async) {
|
2021-03-16 09:03:02 +01:00
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_udpsocket:
|
2022-11-23 14:03:23 +01:00
|
|
|
isc__nm_udp_failed_read_cb(sock, result, async);
|
2021-03-16 09:03:02 +01:00
|
|
|
return;
|
|
|
|
case isc_nm_tcpsocket:
|
2022-11-23 14:03:23 +01:00
|
|
|
isc__nm_tcp_failed_read_cb(sock, result, async);
|
2021-03-16 09:03:02 +01:00
|
|
|
return;
|
|
|
|
case isc_nm_tlsdnssocket:
|
2021-04-06 18:27:38 +02:00
|
|
|
isc__nm_tlsdns_failed_read_cb(sock, result, async);
|
2021-03-16 09:03:02 +01:00
|
|
|
return;
|
2022-08-29 10:55:10 +02:00
|
|
|
case isc_nm_tlssocket:
|
2022-11-23 14:03:23 +01:00
|
|
|
isc__nm_tls_failed_read_cb(sock, result, async);
|
2022-08-29 10:55:10 +02:00
|
|
|
return;
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
|
|
|
isc__nm_streamdns_failed_read_cb(sock, result, async);
|
|
|
|
return;
|
2021-03-16 09:03:02 +01:00
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2021-03-16 09:03:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-30 09:25:09 +02:00
|
|
|
void
|
|
|
|
isc__nmsocket_connecttimeout_cb(uv_timer_t *timer) {
|
|
|
|
uv_connect_t *uvreq = uv_handle_get_data((uv_handle_t *)timer);
|
|
|
|
isc_nmsocket_t *sock = uv_handle_get_data((uv_handle_t *)uvreq->handle);
|
|
|
|
isc__nm_uvreq_t *req = uv_handle_get_data((uv_handle_t *)uvreq);
|
|
|
|
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
2022-07-26 13:03:45 +02:00
|
|
|
REQUIRE(sock->tid == isc_tid());
|
2021-03-30 09:25:09 +02:00
|
|
|
REQUIRE(atomic_load(&sock->connecting));
|
|
|
|
REQUIRE(VALID_UVREQ(req));
|
|
|
|
REQUIRE(VALID_NMHANDLE(req->handle));
|
|
|
|
|
|
|
|
isc__nmsocket_timer_stop(sock);
|
|
|
|
|
2021-04-06 18:27:38 +02:00
|
|
|
if (sock->tls.pending_req != NULL) {
|
|
|
|
REQUIRE(req == sock->tls.pending_req);
|
|
|
|
sock->tls.pending_req = NULL;
|
|
|
|
}
|
|
|
|
|
2022-02-22 18:12:18 +01:00
|
|
|
/*
|
|
|
|
* Mark the connection as timed out and shutdown the socket.
|
|
|
|
*/
|
2021-10-21 02:28:48 -07:00
|
|
|
atomic_compare_exchange_enforced(&sock->timedout, &(bool){ false },
|
|
|
|
true);
|
2022-02-22 18:12:18 +01:00
|
|
|
isc__nmsocket_clearcb(sock);
|
|
|
|
isc__nmsocket_shutdown(sock);
|
2021-03-30 09:25:09 +02:00
|
|
|
}
|
|
|
|
|
2021-12-01 17:41:20 +01:00
|
|
|
void
|
2022-12-07 09:45:34 +01:00
|
|
|
isc__nm_accept_connection_log(isc_nmsocket_t *sock, isc_result_t result,
|
|
|
|
bool can_log_quota) {
|
2021-12-01 17:41:20 +01:00
|
|
|
int level;
|
|
|
|
|
|
|
|
switch (result) {
|
|
|
|
case ISC_R_SUCCESS:
|
|
|
|
case ISC_R_NOCONN:
|
|
|
|
return;
|
|
|
|
case ISC_R_QUOTA:
|
|
|
|
case ISC_R_SOFTQUOTA:
|
|
|
|
if (!can_log_quota) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
level = ISC_LOG_INFO;
|
|
|
|
break;
|
|
|
|
case ISC_R_NOTCONNECTED:
|
|
|
|
level = ISC_LOG_INFO;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
level = ISC_LOG_ERROR;
|
|
|
|
}
|
|
|
|
|
2022-12-07 09:45:34 +01:00
|
|
|
isc__nmsocket_log(sock, level, "Accepting TCP connection failed: %s",
|
|
|
|
isc_result_totext(result));
|
2021-12-01 17:41:20 +01:00
|
|
|
}
|
|
|
|
|
2022-02-09 11:21:04 +01:00
|
|
|
void
|
2022-03-10 13:51:08 +01:00
|
|
|
isc__nmsocket_writetimeout_cb(void *data, isc_result_t eresult) {
|
|
|
|
isc__nm_uvreq_t *req = data;
|
|
|
|
isc_nmsocket_t *sock = NULL;
|
2022-02-09 11:21:04 +01:00
|
|
|
|
2022-03-10 13:51:08 +01:00
|
|
|
REQUIRE(eresult == ISC_R_TIMEDOUT);
|
|
|
|
REQUIRE(VALID_UVREQ(req));
|
|
|
|
REQUIRE(VALID_NMSOCK(req->sock));
|
|
|
|
|
|
|
|
sock = req->sock;
|
2022-02-09 11:21:04 +01:00
|
|
|
|
2022-02-15 14:41:15 +01:00
|
|
|
isc__nmsocket_reset(sock);
|
2022-02-09 11:21:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2021-03-16 09:03:02 +01:00
|
|
|
isc__nmsocket_readtimeout_cb(uv_timer_t *timer) {
|
|
|
|
isc_nmsocket_t *sock = uv_handle_get_data((uv_handle_t *)timer);
|
|
|
|
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
2022-07-26 13:03:45 +02:00
|
|
|
REQUIRE(sock->tid == isc_tid());
|
2022-08-29 10:55:10 +02:00
|
|
|
REQUIRE(sock->reading);
|
2021-03-16 09:03:02 +01:00
|
|
|
|
2021-03-29 10:52:05 +02:00
|
|
|
if (atomic_load(&sock->client)) {
|
2021-03-30 09:25:09 +02:00
|
|
|
uv_timer_stop(timer);
|
|
|
|
|
2021-07-26 13:14:41 +02:00
|
|
|
sock->recv_read = false;
|
|
|
|
|
2021-03-29 10:52:05 +02:00
|
|
|
if (sock->recv_cb != NULL) {
|
|
|
|
isc__nm_uvreq_t *req = isc__nm_get_read_req(sock, NULL);
|
2022-11-23 14:03:23 +01:00
|
|
|
isc__nm_readcb(sock, req, ISC_R_TIMEDOUT, false);
|
2021-03-29 10:52:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!isc__nmsocket_timer_running(sock)) {
|
|
|
|
isc__nmsocket_clearcb(sock);
|
2021-04-06 18:27:38 +02:00
|
|
|
isc__nm_failed_read_cb(sock, ISC_R_CANCELED, false);
|
2021-03-29 10:52:05 +02:00
|
|
|
}
|
|
|
|
} else {
|
2021-04-06 18:27:38 +02:00
|
|
|
isc__nm_failed_read_cb(sock, ISC_R_TIMEDOUT, false);
|
2021-03-29 10:52:05 +02:00
|
|
|
}
|
2021-03-16 09:03:02 +01:00
|
|
|
}
|
|
|
|
|
2020-11-02 19:58:05 -08:00
|
|
|
void
|
2021-03-16 09:03:02 +01:00
|
|
|
isc__nmsocket_timer_restart(isc_nmsocket_t *sock) {
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
2020-11-02 19:58:05 -08:00
|
|
|
|
2022-07-27 16:26:55 +03:00
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_tlssocket:
|
|
|
|
isc__nmsocket_tls_timer_restart(sock);
|
|
|
|
return;
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
|
|
|
isc__nmsocket_streamdns_timer_restart(sock);
|
|
|
|
return;
|
2022-07-27 16:26:55 +03:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2022-05-10 19:44:28 +03:00
|
|
|
if (uv_is_closing((uv_handle_t *)&sock->read_timer)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-03-30 09:25:09 +02:00
|
|
|
if (atomic_load(&sock->connecting)) {
|
2022-02-15 14:51:02 +01:00
|
|
|
int r;
|
|
|
|
|
2021-03-30 09:25:09 +02:00
|
|
|
if (sock->connect_timeout == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-02-09 10:59:08 +01:00
|
|
|
r = uv_timer_start(&sock->read_timer,
|
2021-03-30 09:25:09 +02:00
|
|
|
isc__nmsocket_connecttimeout_cb,
|
|
|
|
sock->connect_timeout + 10, 0);
|
2022-02-15 14:51:02 +01:00
|
|
|
UV_RUNTIME_CHECK(uv_timer_start, r);
|
2021-03-30 09:25:09 +02:00
|
|
|
|
|
|
|
} else {
|
2022-02-15 14:51:02 +01:00
|
|
|
int r;
|
|
|
|
|
2021-03-30 09:25:09 +02:00
|
|
|
if (sock->read_timeout == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-02-09 10:59:08 +01:00
|
|
|
r = uv_timer_start(&sock->read_timer,
|
|
|
|
isc__nmsocket_readtimeout_cb,
|
2021-03-30 09:25:09 +02:00
|
|
|
sock->read_timeout, 0);
|
2022-02-15 14:51:02 +01:00
|
|
|
UV_RUNTIME_CHECK(uv_timer_start, r);
|
2021-03-16 09:03:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-29 10:52:05 +02:00
|
|
|
bool
|
|
|
|
isc__nmsocket_timer_running(isc_nmsocket_t *sock) {
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
|
2022-07-26 17:36:32 +03:00
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_tlssocket:
|
|
|
|
return (isc__nmsocket_tls_timer_running(sock));
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
|
|
|
return (isc__nmsocket_streamdns_timer_running(sock));
|
2022-07-26 17:36:32 +03:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2022-02-09 10:59:08 +01:00
|
|
|
return (uv_is_active((uv_handle_t *)&sock->read_timer));
|
2021-03-29 10:52:05 +02:00
|
|
|
}
|
|
|
|
|
2021-03-16 09:03:02 +01:00
|
|
|
void
|
|
|
|
isc__nmsocket_timer_start(isc_nmsocket_t *sock) {
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
|
2021-03-29 10:52:05 +02:00
|
|
|
if (isc__nmsocket_timer_running(sock)) {
|
2021-03-16 09:03:02 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
isc__nmsocket_timer_restart(sock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nmsocket_timer_stop(isc_nmsocket_t *sock) {
|
2022-02-15 14:51:02 +01:00
|
|
|
int r;
|
|
|
|
|
2021-03-16 09:03:02 +01:00
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
|
2022-07-27 16:26:55 +03:00
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_tlssocket:
|
|
|
|
isc__nmsocket_tls_timer_stop(sock);
|
|
|
|
return;
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
|
|
|
isc__nmsocket_streamdns_timer_stop(sock);
|
|
|
|
return;
|
2022-07-27 16:26:55 +03:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2021-03-29 10:52:05 +02:00
|
|
|
/* uv_timer_stop() is idempotent, no need to check if running */
|
2021-03-16 09:03:02 +01:00
|
|
|
|
2022-02-09 10:59:08 +01:00
|
|
|
r = uv_timer_stop(&sock->read_timer);
|
2022-02-15 14:51:02 +01:00
|
|
|
UV_RUNTIME_CHECK(uv_timer_stop, r);
|
2021-03-16 09:03:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
isc__nm_uvreq_t *
|
|
|
|
isc__nm_get_read_req(isc_nmsocket_t *sock, isc_sockaddr_t *sockaddr) {
|
|
|
|
isc__nm_uvreq_t *req = NULL;
|
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
req = isc__nm_uvreq_get(sock->worker, sock);
|
2021-03-16 09:03:02 +01:00
|
|
|
req->cb.recv = sock->recv_cb;
|
|
|
|
req->cbarg = sock->recv_cbarg;
|
|
|
|
|
2021-03-29 10:52:05 +02:00
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_tcpsocket:
|
|
|
|
case isc_nm_tlssocket:
|
2021-03-16 09:03:02 +01:00
|
|
|
isc_nmhandle_attach(sock->statichandle, &req->handle);
|
2021-03-29 10:52:05 +02:00
|
|
|
break;
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
|
|
|
isc_nmhandle_attach(sock->recv_handle, &req->handle);
|
|
|
|
break;
|
2021-03-29 10:52:05 +02:00
|
|
|
default:
|
2021-10-17 13:30:47 -07:00
|
|
|
if (atomic_load(&sock->client) && sock->statichandle != NULL) {
|
2021-03-29 10:52:05 +02:00
|
|
|
isc_nmhandle_attach(sock->statichandle, &req->handle);
|
|
|
|
} else {
|
|
|
|
req->handle = isc__nmhandle_get(sock, sockaddr, NULL);
|
|
|
|
}
|
|
|
|
break;
|
2021-03-16 09:03:02 +01:00
|
|
|
}
|
|
|
|
|
2021-03-29 10:52:05 +02:00
|
|
|
return (req);
|
2021-03-16 09:03:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*%<
|
Fix the UDP recvmmsg support
Previously, the netmgr/udp.c tried to detect the recvmmsg detection in
libuv with #ifdef UV_UDP_<foo> preprocessor macros. However, because
the UV_UDP_<foo> are not preprocessor macros, but enum members, the
detection didn't work. Because the detection didn't work, the code
didn't have access to the information when we received the final chunk
of the recvmmsg and tried to free the uvbuf every time. Fortunately,
the isc__nm_free_uvbuf() had a kludge that detected attempt to free in
the middle of the receive buffer, so the code worked.
However, libuv 1.37.0 changed the way the recvmmsg was enabled from
implicit to explicit, and we checked for yet another enum member
presence with preprocessor macro, so in fact libuv recvmmsg support was
never enabled with libuv >= 1.37.0.
This commit changes to the preprocessor macros to autoconf checks for
declaration, so the detection now works again. On top of that, it's now
possible to cleanup the alloc_cb and free_uvbuf functions because now,
the information whether we can or cannot free the buffer is available to
us.
2022-01-11 12:14:23 +01:00
|
|
|
* Allocator callback for read operations.
|
2021-03-16 09:03:02 +01:00
|
|
|
*
|
|
|
|
* Note this doesn't actually allocate anything, it just assigns the
|
|
|
|
* worker's receive buffer to a socket, and marks it as "in use".
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
isc__nm_alloc_cb(uv_handle_t *handle, size_t size, uv_buf_t *buf) {
|
|
|
|
isc_nmsocket_t *sock = uv_handle_get_data(handle);
|
|
|
|
isc__networker_t *worker = NULL;
|
|
|
|
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
Fix the UDP recvmmsg support
Previously, the netmgr/udp.c tried to detect the recvmmsg detection in
libuv with #ifdef UV_UDP_<foo> preprocessor macros. However, because
the UV_UDP_<foo> are not preprocessor macros, but enum members, the
detection didn't work. Because the detection didn't work, the code
didn't have access to the information when we received the final chunk
of the recvmmsg and tried to free the uvbuf every time. Fortunately,
the isc__nm_free_uvbuf() had a kludge that detected attempt to free in
the middle of the receive buffer, so the code worked.
However, libuv 1.37.0 changed the way the recvmmsg was enabled from
implicit to explicit, and we checked for yet another enum member
presence with preprocessor macro, so in fact libuv recvmmsg support was
never enabled with libuv >= 1.37.0.
This commit changes to the preprocessor macros to autoconf checks for
declaration, so the detection now works again. On top of that, it's now
possible to cleanup the alloc_cb and free_uvbuf functions because now,
the information whether we can or cannot free the buffer is available to
us.
2022-01-11 12:14:23 +01:00
|
|
|
/*
|
|
|
|
* The size provided by libuv is only suggested size, and it always
|
|
|
|
* defaults to 64 * 1024 in the current versions of libuv (see
|
|
|
|
* src/unix/udp.c and src/unix/stream.c).
|
|
|
|
*/
|
|
|
|
UNUSED(size);
|
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
worker = sock->worker;
|
Fix the UDP recvmmsg support
Previously, the netmgr/udp.c tried to detect the recvmmsg detection in
libuv with #ifdef UV_UDP_<foo> preprocessor macros. However, because
the UV_UDP_<foo> are not preprocessor macros, but enum members, the
detection didn't work. Because the detection didn't work, the code
didn't have access to the information when we received the final chunk
of the recvmmsg and tried to free the uvbuf every time. Fortunately,
the isc__nm_free_uvbuf() had a kludge that detected attempt to free in
the middle of the receive buffer, so the code worked.
However, libuv 1.37.0 changed the way the recvmmsg was enabled from
implicit to explicit, and we checked for yet another enum member
presence with preprocessor macro, so in fact libuv recvmmsg support was
never enabled with libuv >= 1.37.0.
This commit changes to the preprocessor macros to autoconf checks for
declaration, so the detection now works again. On top of that, it's now
possible to cleanup the alloc_cb and free_uvbuf functions because now,
the information whether we can or cannot free the buffer is available to
us.
2022-01-11 12:14:23 +01:00
|
|
|
INSIST(!worker->recvbuf_inuse);
|
|
|
|
INSIST(worker->recvbuf != NULL);
|
2021-03-16 09:03:02 +01:00
|
|
|
|
|
|
|
switch (sock->type) {
|
2020-11-02 19:58:05 -08:00
|
|
|
case isc_nm_udpsocket:
|
Fix the UDP recvmmsg support
Previously, the netmgr/udp.c tried to detect the recvmmsg detection in
libuv with #ifdef UV_UDP_<foo> preprocessor macros. However, because
the UV_UDP_<foo> are not preprocessor macros, but enum members, the
detection didn't work. Because the detection didn't work, the code
didn't have access to the information when we received the final chunk
of the recvmmsg and tried to free the uvbuf every time. Fortunately,
the isc__nm_free_uvbuf() had a kludge that detected attempt to free in
the middle of the receive buffer, so the code worked.
However, libuv 1.37.0 changed the way the recvmmsg was enabled from
implicit to explicit, and we checked for yet another enum member
presence with preprocessor macro, so in fact libuv recvmmsg support was
never enabled with libuv >= 1.37.0.
This commit changes to the preprocessor macros to autoconf checks for
declaration, so the detection now works again. On top of that, it's now
possible to cleanup the alloc_cb and free_uvbuf functions because now,
the information whether we can or cannot free the buffer is available to
us.
2022-01-11 12:14:23 +01:00
|
|
|
buf->len = ISC_NETMGR_UDP_RECVBUF_SIZE;
|
2020-11-02 19:58:05 -08:00
|
|
|
break;
|
2021-03-31 12:14:54 +02:00
|
|
|
case isc_nm_tcpsocket:
|
2021-03-16 09:03:02 +01:00
|
|
|
case isc_nm_tlsdnssocket:
|
Fix the UDP recvmmsg support
Previously, the netmgr/udp.c tried to detect the recvmmsg detection in
libuv with #ifdef UV_UDP_<foo> preprocessor macros. However, because
the UV_UDP_<foo> are not preprocessor macros, but enum members, the
detection didn't work. Because the detection didn't work, the code
didn't have access to the information when we received the final chunk
of the recvmmsg and tried to free the uvbuf every time. Fortunately,
the isc__nm_free_uvbuf() had a kludge that detected attempt to free in
the middle of the receive buffer, so the code worked.
However, libuv 1.37.0 changed the way the recvmmsg was enabled from
implicit to explicit, and we checked for yet another enum member
presence with preprocessor macro, so in fact libuv recvmmsg support was
never enabled with libuv >= 1.37.0.
This commit changes to the preprocessor macros to autoconf checks for
declaration, so the detection now works again. On top of that, it's now
possible to cleanup the alloc_cb and free_uvbuf functions because now,
the information whether we can or cannot free the buffer is available to
us.
2022-01-11 12:14:23 +01:00
|
|
|
buf->len = ISC_NETMGR_TCP_RECVBUF_SIZE;
|
2021-03-16 09:03:02 +01:00
|
|
|
break;
|
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2021-03-16 09:03:02 +01:00
|
|
|
}
|
|
|
|
|
Fix the UDP recvmmsg support
Previously, the netmgr/udp.c tried to detect the recvmmsg detection in
libuv with #ifdef UV_UDP_<foo> preprocessor macros. However, because
the UV_UDP_<foo> are not preprocessor macros, but enum members, the
detection didn't work. Because the detection didn't work, the code
didn't have access to the information when we received the final chunk
of the recvmmsg and tried to free the uvbuf every time. Fortunately,
the isc__nm_free_uvbuf() had a kludge that detected attempt to free in
the middle of the receive buffer, so the code worked.
However, libuv 1.37.0 changed the way the recvmmsg was enabled from
implicit to explicit, and we checked for yet another enum member
presence with preprocessor macro, so in fact libuv recvmmsg support was
never enabled with libuv >= 1.37.0.
This commit changes to the preprocessor macros to autoconf checks for
declaration, so the detection now works again. On top of that, it's now
possible to cleanup the alloc_cb and free_uvbuf functions because now,
the information whether we can or cannot free the buffer is available to
us.
2022-01-11 12:14:23 +01:00
|
|
|
REQUIRE(buf->len <= ISC_NETMGR_RECVBUF_SIZE);
|
2021-03-16 09:03:02 +01:00
|
|
|
buf->base = worker->recvbuf;
|
Fix the UDP recvmmsg support
Previously, the netmgr/udp.c tried to detect the recvmmsg detection in
libuv with #ifdef UV_UDP_<foo> preprocessor macros. However, because
the UV_UDP_<foo> are not preprocessor macros, but enum members, the
detection didn't work. Because the detection didn't work, the code
didn't have access to the information when we received the final chunk
of the recvmmsg and tried to free the uvbuf every time. Fortunately,
the isc__nm_free_uvbuf() had a kludge that detected attempt to free in
the middle of the receive buffer, so the code worked.
However, libuv 1.37.0 changed the way the recvmmsg was enabled from
implicit to explicit, and we checked for yet another enum member
presence with preprocessor macro, so in fact libuv recvmmsg support was
never enabled with libuv >= 1.37.0.
This commit changes to the preprocessor macros to autoconf checks for
declaration, so the detection now works again. On top of that, it's now
possible to cleanup the alloc_cb and free_uvbuf functions because now,
the information whether we can or cannot free the buffer is available to
us.
2022-01-11 12:14:23 +01:00
|
|
|
|
2021-03-16 09:03:02 +01:00
|
|
|
worker->recvbuf_inuse = true;
|
|
|
|
}
|
|
|
|
|
2022-06-14 09:17:08 +02:00
|
|
|
isc_result_t
|
2021-03-16 09:03:02 +01:00
|
|
|
isc__nm_start_reading(isc_nmsocket_t *sock) {
|
2022-06-14 09:17:08 +02:00
|
|
|
isc_result_t result = ISC_R_SUCCESS;
|
2021-03-16 09:03:02 +01:00
|
|
|
int r;
|
|
|
|
|
2022-08-29 10:55:10 +02:00
|
|
|
if (sock->reading) {
|
2022-06-14 09:17:08 +02:00
|
|
|
return (ISC_R_SUCCESS);
|
2021-03-16 09:03:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_udpsocket:
|
|
|
|
r = uv_udp_recv_start(&sock->uv_handle.udp, isc__nm_alloc_cb,
|
|
|
|
isc__nm_udp_read_cb);
|
2020-11-02 19:58:05 -08:00
|
|
|
break;
|
2021-03-31 12:14:54 +02:00
|
|
|
case isc_nm_tcpsocket:
|
|
|
|
r = uv_read_start(&sock->uv_handle.stream, isc__nm_alloc_cb,
|
|
|
|
isc__nm_tcp_read_cb);
|
|
|
|
break;
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
case isc_nm_tlsdnssocket:
|
2021-03-16 09:03:02 +01:00
|
|
|
r = uv_read_start(&sock->uv_handle.stream, isc__nm_alloc_cb,
|
|
|
|
isc__nm_tlsdns_read_cb);
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
break;
|
2021-03-16 09:03:02 +01:00
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2021-03-16 09:03:02 +01:00
|
|
|
}
|
2022-06-14 09:17:08 +02:00
|
|
|
if (r != 0) {
|
|
|
|
result = isc_uverr2result(r);
|
|
|
|
} else {
|
2022-08-29 10:55:10 +02:00
|
|
|
sock->reading = true;
|
2022-06-14 09:17:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return (result);
|
2021-03-16 09:03:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_stop_reading(isc_nmsocket_t *sock) {
|
|
|
|
int r;
|
|
|
|
|
2022-08-29 10:55:10 +02:00
|
|
|
if (!sock->reading) {
|
2021-03-16 09:03:02 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_udpsocket:
|
|
|
|
r = uv_udp_recv_stop(&sock->uv_handle.udp);
|
2022-02-15 14:51:02 +01:00
|
|
|
UV_RUNTIME_CHECK(uv_udp_recv_stop, r);
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
break;
|
2021-03-31 12:14:54 +02:00
|
|
|
case isc_nm_tcpsocket:
|
2021-03-16 09:03:02 +01:00
|
|
|
case isc_nm_tlsdnssocket:
|
|
|
|
r = uv_read_stop(&sock->uv_handle.stream);
|
2022-02-15 14:51:02 +01:00
|
|
|
UV_RUNTIME_CHECK(uv_read_stop, r);
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
break;
|
2020-11-02 19:58:05 -08:00
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2020-11-02 19:58:05 -08:00
|
|
|
}
|
2022-08-29 10:55:10 +02:00
|
|
|
sock->reading = false;
|
2021-03-16 09:03:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2022-07-26 13:03:45 +02:00
|
|
|
isc__nm_closing(isc__networker_t *worker) {
|
|
|
|
return (worker->shuttingdown);
|
2021-03-31 11:48:41 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
isc__nmsocket_closing(isc_nmsocket_t *sock) {
|
2021-03-16 09:03:02 +01:00
|
|
|
return (!isc__nmsocket_active(sock) || atomic_load(&sock->closing) ||
|
2022-07-26 13:03:45 +02:00
|
|
|
isc__nm_closing(sock->worker) ||
|
2021-03-16 09:03:02 +01:00
|
|
|
(sock->server != NULL && !isc__nmsocket_active(sock->server)));
|
|
|
|
}
|
|
|
|
|
|
|
|
static isc_result_t
|
|
|
|
processbuffer(isc_nmsocket_t *sock) {
|
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_tlsdnssocket:
|
2021-03-18 21:19:18 +01:00
|
|
|
return (isc__nm_tlsdns_processbuffer(sock));
|
2021-03-16 09:03:02 +01:00
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2021-03-16 09:03:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Process a DNS message.
|
|
|
|
*
|
|
|
|
* If we only have an incomplete DNS message, we don't touch any
|
|
|
|
* timers. If we do have a full message, reset the timer.
|
|
|
|
*
|
2022-01-27 10:09:07 +01:00
|
|
|
* Stop reading if this is a client socket. In this case we'll be
|
|
|
|
* called again later by isc__nm_resume_processing().
|
2021-03-16 09:03:02 +01:00
|
|
|
*/
|
2022-06-14 09:17:08 +02:00
|
|
|
isc_result_t
|
2021-03-16 09:03:02 +01:00
|
|
|
isc__nm_process_sock_buffer(isc_nmsocket_t *sock) {
|
|
|
|
for (;;) {
|
|
|
|
int_fast32_t ah = atomic_load(&sock->ah);
|
|
|
|
isc_result_t result = processbuffer(sock);
|
|
|
|
switch (result) {
|
|
|
|
case ISC_R_NOMORE:
|
|
|
|
/*
|
|
|
|
* Don't reset the timer until we have a
|
|
|
|
* full DNS message.
|
|
|
|
*/
|
2022-06-14 09:17:08 +02:00
|
|
|
result = isc__nm_start_reading(sock);
|
|
|
|
if (result != ISC_R_SUCCESS) {
|
|
|
|
return (result);
|
|
|
|
}
|
2021-03-16 09:03:02 +01:00
|
|
|
/*
|
|
|
|
* Start the timer only if there are no externally used
|
|
|
|
* active handles, there's always one active handle
|
|
|
|
* attached internally to sock->recv_handle in
|
|
|
|
* accept_connection()
|
|
|
|
*/
|
|
|
|
if (ah == 1) {
|
|
|
|
isc__nmsocket_timer_start(sock);
|
|
|
|
}
|
2022-06-14 09:17:08 +02:00
|
|
|
goto done;
|
2021-03-16 09:03:02 +01:00
|
|
|
case ISC_R_CANCELED:
|
|
|
|
isc__nmsocket_timer_stop(sock);
|
|
|
|
isc__nm_stop_reading(sock);
|
2022-06-14 09:17:08 +02:00
|
|
|
goto done;
|
2021-03-16 09:03:02 +01:00
|
|
|
case ISC_R_SUCCESS:
|
|
|
|
/*
|
|
|
|
* Stop the timer on the successful message read, this
|
|
|
|
* also allows to restart the timer when we have no more
|
|
|
|
* data.
|
|
|
|
*/
|
|
|
|
isc__nmsocket_timer_stop(sock);
|
|
|
|
|
2022-01-27 10:09:07 +01:00
|
|
|
if (atomic_load(&sock->client)) {
|
2021-03-16 09:03:02 +01:00
|
|
|
isc__nm_stop_reading(sock);
|
2022-06-14 09:17:08 +02:00
|
|
|
goto done;
|
2021-03-16 09:03:02 +01:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
2022-03-28 12:59:43 +02:00
|
|
|
UNREACHABLE();
|
2021-03-16 09:03:02 +01:00
|
|
|
}
|
|
|
|
}
|
2022-06-14 09:17:08 +02:00
|
|
|
done:
|
|
|
|
return (ISC_R_SUCCESS);
|
2021-03-16 09:03:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_resume_processing(void *arg) {
|
|
|
|
isc_nmsocket_t *sock = (isc_nmsocket_t *)arg;
|
|
|
|
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
2022-07-26 13:03:45 +02:00
|
|
|
REQUIRE(sock->tid == isc_tid());
|
2021-03-16 09:03:02 +01:00
|
|
|
REQUIRE(!atomic_load(&sock->client));
|
|
|
|
|
2021-03-31 11:48:41 +02:00
|
|
|
if (isc__nmsocket_closing(sock)) {
|
2021-03-16 09:03:02 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
isc__nm_process_sock_buffer(sock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc_nmhandle_cleartimeout(isc_nmhandle_t *handle) {
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
|
|
|
|
|
|
|
switch (handle->sock->type) {
|
2021-04-21 13:52:15 +02:00
|
|
|
#if HAVE_LIBNGHTTP2
|
2021-03-16 09:03:02 +01:00
|
|
|
case isc_nm_httpsocket:
|
|
|
|
isc__nm_http_cleartimeout(handle);
|
|
|
|
return;
|
2022-10-18 15:36:00 +03:00
|
|
|
#endif
|
2021-03-16 09:03:02 +01:00
|
|
|
case isc_nm_tlssocket:
|
|
|
|
isc__nm_tls_cleartimeout(handle);
|
|
|
|
return;
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
|
|
|
isc__nmhandle_streamdns_cleartimeout(handle);
|
|
|
|
return;
|
2021-03-16 09:03:02 +01:00
|
|
|
default:
|
|
|
|
handle->sock->read_timeout = 0;
|
|
|
|
|
2022-02-09 10:59:08 +01:00
|
|
|
if (uv_is_active((uv_handle_t *)&handle->sock->read_timer)) {
|
2021-03-16 09:03:02 +01:00
|
|
|
isc__nmsocket_timer_stop(handle->sock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc_nmhandle_settimeout(isc_nmhandle_t *handle, uint32_t timeout) {
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
|
|
|
|
|
|
|
switch (handle->sock->type) {
|
2021-04-21 13:52:15 +02:00
|
|
|
#if HAVE_LIBNGHTTP2
|
2021-03-16 09:03:02 +01:00
|
|
|
case isc_nm_httpsocket:
|
|
|
|
isc__nm_http_settimeout(handle, timeout);
|
|
|
|
return;
|
2022-10-18 15:36:00 +03:00
|
|
|
#endif
|
2021-03-16 09:03:02 +01:00
|
|
|
case isc_nm_tlssocket:
|
|
|
|
isc__nm_tls_settimeout(handle, timeout);
|
|
|
|
return;
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
|
|
|
isc__nmhandle_streamdns_settimeout(handle, timeout);
|
|
|
|
return;
|
2021-03-16 09:03:02 +01:00
|
|
|
default:
|
|
|
|
handle->sock->read_timeout = timeout;
|
2021-03-29 10:52:05 +02:00
|
|
|
isc__nmsocket_timer_restart(handle->sock);
|
2021-03-16 09:03:02 +01:00
|
|
|
}
|
2020-11-02 19:58:05 -08:00
|
|
|
}
|
|
|
|
|
2020-11-02 18:33:20 -08:00
|
|
|
void
|
|
|
|
isc_nmhandle_keepalive(isc_nmhandle_t *handle, bool value) {
|
2021-07-14 21:12:37 -07:00
|
|
|
isc_nmsocket_t *sock = NULL;
|
2022-07-26 13:03:45 +02:00
|
|
|
isc_nm_t *netmgr = NULL;
|
2021-07-14 21:12:37 -07:00
|
|
|
|
2020-11-02 18:33:20 -08:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
2021-07-14 21:12:37 -07:00
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
2020-11-02 18:33:20 -08:00
|
|
|
|
2021-07-14 21:12:37 -07:00
|
|
|
sock = handle->sock;
|
2022-07-26 13:03:45 +02:00
|
|
|
netmgr = sock->worker->netmgr;
|
2021-07-14 21:12:37 -07:00
|
|
|
|
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_tcpsocket:
|
2020-11-02 18:33:20 -08:00
|
|
|
case isc_nm_tlsdnssocket:
|
2021-07-14 21:12:37 -07:00
|
|
|
atomic_store(&sock->keepalive, value);
|
2022-07-26 13:03:45 +02:00
|
|
|
sock->read_timeout = value ? atomic_load(&netmgr->keepalive)
|
|
|
|
: atomic_load(&netmgr->idle);
|
|
|
|
sock->write_timeout = value ? atomic_load(&netmgr->keepalive)
|
|
|
|
: atomic_load(&netmgr->idle);
|
2020-11-02 18:33:20 -08:00
|
|
|
break;
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
|
|
|
isc__nmhandle_streamdns_keepalive(handle, value);
|
|
|
|
break;
|
2021-07-14 21:12:37 -07:00
|
|
|
case isc_nm_tlssocket:
|
|
|
|
isc__nmhandle_tls_keepalive(handle, value);
|
|
|
|
break;
|
2022-10-18 15:36:00 +03:00
|
|
|
#if HAVE_LIBNGHTTP2
|
2021-07-14 21:12:37 -07:00
|
|
|
case isc_nm_httpsocket:
|
|
|
|
isc__nmhandle_http_keepalive(handle, value);
|
|
|
|
break;
|
|
|
|
#endif /* HAVE_LIBNGHTTP2 */
|
2020-11-02 18:33:20 -08:00
|
|
|
default:
|
2021-07-14 21:12:37 -07:00
|
|
|
/*
|
|
|
|
* For any other protocol, this is a no-op.
|
|
|
|
*/
|
2020-11-02 18:33:20 -08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-22 18:58:01 -07:00
|
|
|
bool
|
|
|
|
isc_nmhandle_timer_running(isc_nmhandle_t *handle) {
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
|
|
|
|
|
|
|
return (isc__nmsocket_timer_running(handle->sock));
|
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_sockaddr_t
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nmhandle_peeraddr(isc_nmhandle_t *handle) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
|
|
|
return (handle->peer);
|
|
|
|
}
|
|
|
|
|
|
|
|
isc_sockaddr_t
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nmhandle_localaddr(isc_nmhandle_t *handle) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
|
|
|
return (handle->local);
|
|
|
|
}
|
|
|
|
|
2019-11-20 22:33:35 +01:00
|
|
|
isc_nm_t *
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nmhandle_netmgr(isc_nmhandle_t *handle) {
|
2019-11-20 22:33:35 +01:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
return (handle->sock->worker->netmgr);
|
2019-11-20 22:33:35 +01:00
|
|
|
}
|
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
/* FIXME: Use per-worker mempool */
|
2019-11-05 13:55:54 -08:00
|
|
|
isc__nm_uvreq_t *
|
2022-07-26 13:03:45 +02:00
|
|
|
isc___nm_uvreq_get(isc__networker_t *worker, isc_nmsocket_t *sock FLARG) {
|
2019-11-05 13:55:54 -08:00
|
|
|
isc__nm_uvreq_t *req = NULL;
|
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
REQUIRE(worker != NULL);
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
|
2020-10-22 10:07:56 +02:00
|
|
|
if (sock != NULL && isc__nmsocket_active(sock)) {
|
2019-11-05 13:55:54 -08:00
|
|
|
/* Try to reuse one */
|
|
|
|
req = isc_astack_pop(sock->inactivereqs);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (req == NULL) {
|
2022-07-26 13:03:45 +02:00
|
|
|
req = isc_mem_get(worker->mctx, sizeof(*req));
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2022-07-13 09:34:47 +02:00
|
|
|
*req = (isc__nm_uvreq_t){
|
|
|
|
.magic = 0,
|
|
|
|
.connect_tries = 3,
|
|
|
|
};
|
2020-05-13 17:37:51 +02:00
|
|
|
ISC_LINK_INIT(req, link);
|
2019-11-05 13:55:54 -08:00
|
|
|
req->uv_req.req.data = req;
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc___nmsocket_attach(sock, &req->sock FLARG_PASS);
|
2019-11-05 13:55:54 -08:00
|
|
|
req->magic = UVREQ_MAGIC;
|
|
|
|
|
|
|
|
return (req);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc___nm_uvreq_put(isc__nm_uvreq_t **req0, isc_nmsocket_t *sock FLARG) {
|
2019-11-05 13:55:54 -08:00
|
|
|
isc__nm_uvreq_t *req = NULL;
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nmhandle_t *handle = NULL;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
REQUIRE(req0 != NULL);
|
|
|
|
REQUIRE(VALID_UVREQ(*req0));
|
|
|
|
|
|
|
|
req = *req0;
|
|
|
|
*req0 = NULL;
|
|
|
|
|
|
|
|
INSIST(sock == req->sock);
|
|
|
|
|
|
|
|
req->magic = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We need to save this first to make sure that handle,
|
|
|
|
* sock, and the netmgr won't all disappear.
|
|
|
|
*/
|
|
|
|
handle = req->handle;
|
|
|
|
req->handle = NULL;
|
|
|
|
|
2022-02-24 00:14:26 +01:00
|
|
|
#if !__SANITIZE_ADDRESS__ && !__SANITIZE_THREAD__
|
2020-10-22 10:07:56 +02:00
|
|
|
if (!isc__nmsocket_active(sock) ||
|
2022-11-02 19:33:14 +01:00
|
|
|
!isc_astack_trypush(sock->inactivereqs, req))
|
|
|
|
{
|
2022-07-26 13:03:45 +02:00
|
|
|
isc_mem_put(sock->worker->mctx, req, sizeof(*req));
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
2022-02-24 00:14:26 +01:00
|
|
|
#else /* !__SANITIZE_ADDRESS__ && !__SANITIZE_THREAD__ */
|
2022-07-26 13:03:45 +02:00
|
|
|
isc_mem_put(sock->worker->mctx, req, sizeof(*req));
|
2022-02-24 00:14:26 +01:00
|
|
|
#endif /* !__SANITIZE_ADDRESS__ && !__SANITIZE_THREAD__ */
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
if (handle != NULL) {
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc__nmhandle_detach(&handle FLARG_PASS);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc___nmsocket_detach(&sock FLARG_PASS);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2020-10-21 12:52:09 +02:00
|
|
|
void
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_nm_send(isc_nmhandle_t *handle, isc_region_t *region, isc_nm_cb_t cb,
|
2020-02-13 14:44:37 -08:00
|
|
|
void *cbarg) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
|
|
|
switch (handle->sock->type) {
|
|
|
|
case isc_nm_udpsocket:
|
|
|
|
case isc_nm_udplistener:
|
2020-10-21 12:52:09 +02:00
|
|
|
isc__nm_udp_send(handle, region, cb, cbarg);
|
|
|
|
break;
|
2019-11-05 13:55:54 -08:00
|
|
|
case isc_nm_tcpsocket:
|
2020-10-21 12:52:09 +02:00
|
|
|
isc__nm_tcp_send(handle, region, cb, cbarg);
|
|
|
|
break;
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
case isc_nm_tlsdnssocket:
|
|
|
|
isc__nm_tlsdns_send(handle, region, cb, cbarg);
|
|
|
|
break;
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
|
|
|
isc__nm_streamdns_send(handle, region, cb, cbarg);
|
|
|
|
break;
|
2021-04-21 13:52:15 +02:00
|
|
|
case isc_nm_tlssocket:
|
|
|
|
isc__nm_tls_send(handle, region, cb, cbarg);
|
|
|
|
break;
|
2022-10-18 15:36:00 +03:00
|
|
|
#if HAVE_LIBNGHTTP2
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
case isc_nm_httpsocket:
|
2020-10-31 20:42:18 +01:00
|
|
|
isc__nm_http_send(handle, region, cb, cbarg);
|
|
|
|
break;
|
2021-04-21 13:52:15 +02:00
|
|
|
#endif
|
2019-11-05 13:55:54 -08:00
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-21 12:52:09 +02:00
|
|
|
void
|
2020-03-20 11:55:10 +01:00
|
|
|
isc_nm_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg) {
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
|
|
|
switch (handle->sock->type) {
|
2020-09-05 11:07:40 -07:00
|
|
|
case isc_nm_udpsocket:
|
|
|
|
isc__nm_udp_read(handle, cb, cbarg);
|
|
|
|
break;
|
2020-03-20 11:55:10 +01:00
|
|
|
case isc_nm_tcpsocket:
|
2020-10-21 12:52:09 +02:00
|
|
|
isc__nm_tcp_read(handle, cb, cbarg);
|
|
|
|
break;
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
case isc_nm_tlsdnssocket:
|
|
|
|
isc__nm_tlsdns_read(handle, cb, cbarg);
|
|
|
|
break;
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
|
|
|
isc__nm_streamdns_read(handle, cb, cbarg);
|
|
|
|
break;
|
2021-04-21 13:52:15 +02:00
|
|
|
case isc_nm_tlssocket:
|
|
|
|
isc__nm_tls_read(handle, cb, cbarg);
|
|
|
|
break;
|
2022-10-18 15:36:00 +03:00
|
|
|
#if HAVE_LIBNGHTTP2
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
case isc_nm_httpsocket:
|
|
|
|
isc__nm_http_read(handle, cb, cbarg);
|
|
|
|
break;
|
2021-04-21 13:52:15 +02:00
|
|
|
#endif
|
2020-03-20 11:55:10 +01:00
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2020-03-20 11:55:10 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-05 17:32:36 -07:00
|
|
|
void
|
|
|
|
isc_nm_cancelread(isc_nmhandle_t *handle) {
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
|
|
|
switch (handle->sock->type) {
|
2020-09-05 11:07:40 -07:00
|
|
|
case isc_nm_udpsocket:
|
|
|
|
isc__nm_udp_cancelread(handle);
|
|
|
|
break;
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
case isc_nm_tlsdnssocket:
|
|
|
|
isc__nm_tlsdns_cancelread(handle);
|
|
|
|
break;
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
|
|
|
isc__nm_streamdns_cancelread(handle);
|
|
|
|
break;
|
2020-03-20 11:55:10 +01:00
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2020-03-20 11:55:10 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-21 12:52:09 +02:00
|
|
|
void
|
2022-08-29 10:55:10 +02:00
|
|
|
isc_nm_read_stop(isc_nmhandle_t *handle) {
|
2020-07-01 16:17:09 -07:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
|
|
|
isc_nmsocket_t *sock = handle->sock;
|
2020-06-05 17:32:36 -07:00
|
|
|
|
2020-03-20 11:55:10 +01:00
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_tcpsocket:
|
2022-08-29 10:55:10 +02:00
|
|
|
isc__nm_tcp_read_stop(handle);
|
2020-10-21 12:52:09 +02:00
|
|
|
break;
|
2021-01-25 17:44:39 +02:00
|
|
|
case isc_nm_tlssocket:
|
2022-08-29 10:55:10 +02:00
|
|
|
isc__nm_tls_read_stop(handle);
|
2021-01-25 17:44:39 +02:00
|
|
|
break;
|
2020-03-20 11:55:10 +01:00
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2020-03-20 11:55:10 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc_nm_stoplistening(isc_nmsocket_t *sock) {
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
2020-06-05 17:32:36 -07:00
|
|
|
|
2020-03-20 11:55:10 +01:00
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_udplistener:
|
|
|
|
isc__nm_udp_stoplistening(sock);
|
|
|
|
break;
|
|
|
|
case isc_nm_tcplistener:
|
|
|
|
isc__nm_tcp_stoplistening(sock);
|
|
|
|
break;
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
case isc_nm_tlsdnslistener:
|
|
|
|
isc__nm_tlsdns_stoplistening(sock);
|
|
|
|
break;
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnslistener:
|
|
|
|
isc__nm_streamdns_stoplistening(sock);
|
|
|
|
break;
|
2021-04-21 13:52:15 +02:00
|
|
|
case isc_nm_tlslistener:
|
|
|
|
isc__nm_tls_stoplistening(sock);
|
|
|
|
break;
|
2022-10-18 15:36:00 +03:00
|
|
|
#if HAVE_LIBNGHTTP2
|
2020-12-07 14:19:10 +02:00
|
|
|
case isc_nm_httplistener:
|
|
|
|
isc__nm_http_stoplistening(sock);
|
|
|
|
break;
|
2021-04-21 13:52:15 +02:00
|
|
|
#endif
|
2020-03-20 11:55:10 +01:00
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2020-03-20 11:55:10 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-14 20:45:40 +03:00
|
|
|
void
|
|
|
|
isc__nmsocket_stop(isc_nmsocket_t *listener) {
|
|
|
|
isc__netievent_sockstop_t ievent = { .sock = listener };
|
|
|
|
|
|
|
|
REQUIRE(VALID_NMSOCK(listener));
|
|
|
|
REQUIRE(listener->tid == isc_tid());
|
|
|
|
REQUIRE(listener->tid == 0);
|
|
|
|
|
|
|
|
if (!atomic_compare_exchange_strong(&listener->closing,
|
2022-11-02 19:33:14 +01:00
|
|
|
&(bool){ false }, true))
|
|
|
|
{
|
2022-10-14 20:45:40 +03:00
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
|
|
|
|
for (size_t i = 1; i < listener->nchildren; i++) {
|
|
|
|
isc__networker_t *worker =
|
|
|
|
&listener->worker->netmgr->workers[i];
|
|
|
|
isc__netievent_sockstop_t *ev =
|
|
|
|
isc__nm_get_netievent_sockstop(worker, listener);
|
|
|
|
isc__nm_enqueue_ievent(worker, (isc__netievent_t *)ev);
|
|
|
|
}
|
|
|
|
|
|
|
|
isc__nm_async_sockstop(listener->worker, (isc__netievent_t *)&ievent);
|
|
|
|
INSIST(atomic_load(&listener->rchildren) == 0);
|
|
|
|
|
|
|
|
if (!atomic_compare_exchange_strong(&listener->listening,
|
|
|
|
&(bool){ true }, false))
|
|
|
|
{
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
|
|
|
|
listener->accept_cb = NULL;
|
|
|
|
listener->accept_cbarg = NULL;
|
|
|
|
listener->recv_cb = NULL;
|
|
|
|
listener->recv_cbarg = NULL;
|
|
|
|
|
|
|
|
if (listener->outer != NULL) {
|
|
|
|
isc_nm_stoplistening(listener->outer);
|
|
|
|
isc__nmsocket_detach(&listener->outer);
|
|
|
|
}
|
|
|
|
|
|
|
|
atomic_store(&listener->closed, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nmsocket_barrier_init(isc_nmsocket_t *listener) {
|
|
|
|
REQUIRE(listener->nchildren > 0);
|
|
|
|
isc_barrier_init(&listener->barrier, listener->nchildren);
|
|
|
|
listener->barrier_initialised = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_async_sockstop(isc__networker_t *worker, isc__netievent_t *ev0) {
|
|
|
|
isc__netievent_sockstop_t *ievent = (isc__netievent_sockstop_t *)ev0;
|
|
|
|
isc_nmsocket_t *listener = ievent->sock;
|
|
|
|
UNUSED(worker);
|
|
|
|
|
|
|
|
(void)atomic_fetch_sub(&listener->rchildren, 1);
|
|
|
|
isc_barrier_wait(&listener->barrier);
|
|
|
|
}
|
|
|
|
|
2021-03-30 09:25:09 +02:00
|
|
|
void
|
|
|
|
isc__nm_connectcb(isc_nmsocket_t *sock, isc__nm_uvreq_t *uvreq,
|
|
|
|
isc_result_t eresult, bool async) {
|
2020-11-11 10:46:33 +01:00
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(VALID_UVREQ(uvreq));
|
|
|
|
REQUIRE(VALID_NMHANDLE(uvreq->handle));
|
|
|
|
|
2021-03-30 09:25:09 +02:00
|
|
|
if (!async) {
|
|
|
|
isc__netievent_connectcb_t ievent = { .sock = sock,
|
|
|
|
.req = uvreq,
|
|
|
|
.result = eresult };
|
|
|
|
isc__nm_async_connectcb(NULL, (isc__netievent_t *)&ievent);
|
2021-03-31 18:32:32 +02:00
|
|
|
} else {
|
|
|
|
isc__netievent_connectcb_t *ievent =
|
2022-07-26 13:03:45 +02:00
|
|
|
isc__nm_get_netievent_connectcb(sock->worker, sock,
|
|
|
|
uvreq, eresult);
|
|
|
|
isc__nm_enqueue_ievent(sock->worker,
|
2021-03-31 18:32:32 +02:00
|
|
|
(isc__netievent_t *)ievent);
|
2020-11-11 10:46:33 +01:00
|
|
|
}
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
}
|
|
|
|
|
2020-11-11 10:46:33 +01:00
|
|
|
void
|
|
|
|
isc__nm_async_connectcb(isc__networker_t *worker, isc__netievent_t *ev0) {
|
|
|
|
isc__netievent_connectcb_t *ievent = (isc__netievent_connectcb_t *)ev0;
|
|
|
|
isc_nmsocket_t *sock = ievent->sock;
|
|
|
|
isc__nm_uvreq_t *uvreq = ievent->req;
|
|
|
|
isc_result_t eresult = ievent->result;
|
|
|
|
|
|
|
|
UNUSED(worker);
|
|
|
|
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(VALID_UVREQ(uvreq));
|
|
|
|
REQUIRE(VALID_NMHANDLE(uvreq->handle));
|
2022-07-26 13:03:45 +02:00
|
|
|
REQUIRE(ievent->sock->tid == isc_tid());
|
2020-11-11 10:46:33 +01:00
|
|
|
REQUIRE(uvreq->cb.connect != NULL);
|
|
|
|
|
|
|
|
uvreq->cb.connect(uvreq->handle, eresult, uvreq->cbarg);
|
|
|
|
|
|
|
|
isc__nm_uvreq_put(&uvreq, sock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_readcb(isc_nmsocket_t *sock, isc__nm_uvreq_t *uvreq,
|
2022-11-23 14:03:23 +01:00
|
|
|
isc_result_t eresult, bool async) {
|
2020-11-11 10:46:33 +01:00
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(VALID_UVREQ(uvreq));
|
|
|
|
REQUIRE(VALID_NMHANDLE(uvreq->handle));
|
|
|
|
|
2022-11-23 14:03:23 +01:00
|
|
|
if (!async) {
|
2022-07-26 13:03:45 +02:00
|
|
|
isc__netievent_readcb_t ievent = { .type = netievent_readcb,
|
|
|
|
.sock = sock,
|
2020-12-02 08:54:51 +01:00
|
|
|
.req = uvreq,
|
|
|
|
.result = eresult };
|
|
|
|
|
|
|
|
isc__nm_async_readcb(NULL, (isc__netievent_t *)&ievent);
|
2022-11-23 14:03:23 +01:00
|
|
|
return;
|
2020-11-11 10:46:33 +01:00
|
|
|
}
|
2022-11-23 14:03:23 +01:00
|
|
|
|
|
|
|
isc__netievent_readcb_t *ievent = isc__nm_get_netievent_readcb(
|
|
|
|
sock->worker, sock, uvreq, eresult);
|
|
|
|
isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent);
|
2020-11-11 10:46:33 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_async_readcb(isc__networker_t *worker, isc__netievent_t *ev0) {
|
|
|
|
isc__netievent_readcb_t *ievent = (isc__netievent_readcb_t *)ev0;
|
|
|
|
isc_nmsocket_t *sock = ievent->sock;
|
|
|
|
isc__nm_uvreq_t *uvreq = ievent->req;
|
|
|
|
isc_result_t eresult = ievent->result;
|
2021-09-28 10:08:26 +10:00
|
|
|
isc_region_t region;
|
2020-11-11 10:46:33 +01:00
|
|
|
|
|
|
|
UNUSED(worker);
|
|
|
|
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(VALID_UVREQ(uvreq));
|
|
|
|
REQUIRE(VALID_NMHANDLE(uvreq->handle));
|
2022-07-26 13:03:45 +02:00
|
|
|
REQUIRE(sock->tid == isc_tid());
|
2020-11-11 10:46:33 +01:00
|
|
|
|
2021-09-28 10:08:26 +10:00
|
|
|
region.base = (unsigned char *)uvreq->uvbuf.base;
|
|
|
|
region.length = uvreq->uvbuf.len;
|
|
|
|
|
2020-11-11 10:46:33 +01:00
|
|
|
uvreq->cb.recv(uvreq->handle, eresult, ®ion, uvreq->cbarg);
|
|
|
|
|
|
|
|
isc__nm_uvreq_put(&uvreq, sock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_sendcb(isc_nmsocket_t *sock, isc__nm_uvreq_t *uvreq,
|
2021-03-16 09:03:02 +01:00
|
|
|
isc_result_t eresult, bool async) {
|
2020-11-11 10:46:33 +01:00
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(VALID_UVREQ(uvreq));
|
|
|
|
REQUIRE(VALID_NMHANDLE(uvreq->handle));
|
|
|
|
|
2021-03-16 09:03:02 +01:00
|
|
|
if (!async) {
|
2020-12-02 08:54:51 +01:00
|
|
|
isc__netievent_sendcb_t ievent = { .sock = sock,
|
|
|
|
.req = uvreq,
|
|
|
|
.result = eresult };
|
|
|
|
isc__nm_async_sendcb(NULL, (isc__netievent_t *)&ievent);
|
2021-03-16 09:03:02 +01:00
|
|
|
return;
|
2020-11-11 10:46:33 +01:00
|
|
|
}
|
2021-03-16 09:03:02 +01:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
isc__netievent_sendcb_t *ievent = isc__nm_get_netievent_sendcb(
|
|
|
|
sock->worker, sock, uvreq, eresult);
|
|
|
|
isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent);
|
2020-11-11 10:46:33 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_async_sendcb(isc__networker_t *worker, isc__netievent_t *ev0) {
|
|
|
|
isc__netievent_sendcb_t *ievent = (isc__netievent_sendcb_t *)ev0;
|
|
|
|
isc_nmsocket_t *sock = ievent->sock;
|
|
|
|
isc__nm_uvreq_t *uvreq = ievent->req;
|
|
|
|
isc_result_t eresult = ievent->result;
|
|
|
|
|
|
|
|
UNUSED(worker);
|
|
|
|
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(VALID_UVREQ(uvreq));
|
|
|
|
REQUIRE(VALID_NMHANDLE(uvreq->handle));
|
2022-07-26 13:03:45 +02:00
|
|
|
REQUIRE(sock->tid == isc_tid());
|
2020-11-11 10:46:33 +01:00
|
|
|
|
|
|
|
uvreq->cb.send(uvreq->handle, eresult, uvreq->cbarg);
|
|
|
|
|
|
|
|
isc__nm_uvreq_put(&uvreq, sock);
|
|
|
|
}
|
|
|
|
|
2020-10-12 17:51:09 +11:00
|
|
|
void
|
|
|
|
isc__nm_async_detach(isc__networker_t *worker, isc__netievent_t *ev0) {
|
|
|
|
isc__netievent_detach_t *ievent = (isc__netievent_detach_t *)ev0;
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
FLARG_IEVENT(ievent);
|
2020-10-12 17:51:09 +11:00
|
|
|
|
|
|
|
REQUIRE(VALID_NMSOCK(ievent->sock));
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
REQUIRE(VALID_NMHANDLE(ievent->handle));
|
2022-07-26 13:03:45 +02:00
|
|
|
REQUIRE(ievent->sock->tid == isc_tid());
|
2020-10-12 17:51:09 +11:00
|
|
|
|
|
|
|
UNUSED(worker);
|
|
|
|
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
nmhandle_detach_cb(&ievent->handle FLARG_PASS);
|
2020-10-12 17:51:09 +11:00
|
|
|
}
|
|
|
|
|
2022-03-10 13:51:08 +01:00
|
|
|
static void
|
|
|
|
reset_shutdown(uv_handle_t *handle) {
|
|
|
|
isc_nmsocket_t *sock = uv_handle_get_data(handle);
|
|
|
|
|
|
|
|
isc__nmsocket_shutdown(sock);
|
2022-03-10 13:58:58 +01:00
|
|
|
isc__nmsocket_detach(&sock);
|
2022-03-10 13:51:08 +01:00
|
|
|
}
|
|
|
|
|
2022-02-15 14:41:15 +01:00
|
|
|
void
|
|
|
|
isc__nmsocket_reset(isc_nmsocket_t *sock) {
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
|
|
|
|
switch (sock->type) {
|
2022-02-28 10:25:06 +01:00
|
|
|
case isc_nm_tcpsocket:
|
2022-02-15 14:41:15 +01:00
|
|
|
case isc_nm_tlsdnssocket:
|
2022-02-28 10:25:06 +01:00
|
|
|
/*
|
|
|
|
* This can be called from the TCP write timeout, or
|
|
|
|
* from the TCPDNS or TLSDNS branches of isc_nm_bad_request().
|
|
|
|
*/
|
2022-02-15 14:41:15 +01:00
|
|
|
REQUIRE(sock->parent == NULL);
|
|
|
|
break;
|
2022-07-26 17:07:19 +03:00
|
|
|
case isc_nm_tlssocket:
|
|
|
|
isc__nmsocket_tls_reset(sock);
|
|
|
|
return;
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
|
|
|
isc__nmsocket_streamdns_reset(sock);
|
|
|
|
return;
|
2022-02-15 14:41:15 +01:00
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2022-02-15 14:41:15 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2022-03-10 13:58:58 +01:00
|
|
|
if (!uv_is_closing(&sock->uv_handle.handle) &&
|
|
|
|
uv_is_active(&sock->uv_handle.handle))
|
|
|
|
{
|
2022-02-15 14:41:15 +01:00
|
|
|
/*
|
|
|
|
* The real shutdown will be handled in the respective
|
|
|
|
* close functions.
|
|
|
|
*/
|
2022-03-10 13:58:58 +01:00
|
|
|
isc__nmsocket_attach(sock, &(isc_nmsocket_t *){ NULL });
|
2022-03-10 13:51:08 +01:00
|
|
|
int r = uv_tcp_close_reset(&sock->uv_handle.tcp,
|
|
|
|
reset_shutdown);
|
2022-02-15 14:41:15 +01:00
|
|
|
UV_RUNTIME_CHECK(uv_tcp_close_reset, r);
|
2022-03-10 13:58:58 +01:00
|
|
|
} else {
|
|
|
|
isc__nmsocket_shutdown(sock);
|
2022-02-15 14:41:15 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-31 11:48:41 +02:00
|
|
|
void
|
2021-03-30 09:25:09 +02:00
|
|
|
isc__nmsocket_shutdown(isc_nmsocket_t *sock) {
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_udpsocket:
|
2020-10-26 14:19:37 +01:00
|
|
|
isc__nm_udp_shutdown(sock);
|
2020-10-26 12:30:54 +01:00
|
|
|
break;
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
case isc_nm_tcpsocket:
|
2020-10-26 14:19:37 +01:00
|
|
|
isc__nm_tcp_shutdown(sock);
|
2019-11-22 14:13:19 +01:00
|
|
|
break;
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
case isc_nm_tlsdnssocket:
|
2020-12-17 11:40:29 +01:00
|
|
|
isc__nm_tlsdns_shutdown(sock);
|
2019-11-22 14:13:19 +01:00
|
|
|
break;
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
case isc_nm_udplistener:
|
|
|
|
case isc_nm_tcplistener:
|
|
|
|
case isc_nm_tlsdnslistener:
|
|
|
|
return;
|
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2019-11-22 14:13:19 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-30 09:25:09 +02:00
|
|
|
static void
|
|
|
|
shutdown_walk_cb(uv_handle_t *handle, void *arg) {
|
2022-07-26 13:03:45 +02:00
|
|
|
isc_nmsocket_t *sock = NULL;
|
2021-03-30 09:25:09 +02:00
|
|
|
UNUSED(arg);
|
|
|
|
|
|
|
|
if (uv_is_closing(handle)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
sock = uv_handle_get_data(handle);
|
|
|
|
|
2021-03-30 09:25:09 +02:00
|
|
|
switch (handle->type) {
|
|
|
|
case UV_UDP:
|
2022-03-10 13:58:58 +01:00
|
|
|
isc__nmsocket_shutdown(sock);
|
|
|
|
return;
|
2021-03-30 09:25:09 +02:00
|
|
|
case UV_TCP:
|
2022-03-10 13:58:58 +01:00
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_tcpsocket:
|
|
|
|
case isc_nm_tlsdnssocket:
|
|
|
|
if (sock->parent == NULL) {
|
|
|
|
/* Reset the TCP connections on shutdown */
|
|
|
|
isc__nmsocket_reset(sock);
|
|
|
|
return;
|
|
|
|
}
|
2021-10-11 12:09:16 +02:00
|
|
|
FALLTHROUGH;
|
2022-03-10 13:58:58 +01:00
|
|
|
default:
|
|
|
|
isc__nmsocket_shutdown(sock);
|
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
2021-03-30 09:25:09 +02:00
|
|
|
default:
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-05 01:02:12 -08:00
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nm_setstats(isc_nm_t *mgr, isc_stats_t *stats) {
|
2020-01-05 01:02:12 -08:00
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
REQUIRE(mgr->stats == NULL);
|
|
|
|
REQUIRE(isc_stats_ncounters(stats) == isc_sockstatscounter_max);
|
|
|
|
|
|
|
|
isc_stats_attach(stats, &mgr->stats);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2021-10-02 16:26:43 -07:00
|
|
|
isc__nm_incstats(isc_nmsocket_t *sock, isc__nm_statid_t id) {
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(id < STATID_MAX);
|
2020-01-05 01:02:12 -08:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
if (sock->statsindex != NULL && sock->worker->netmgr->stats != NULL) {
|
|
|
|
isc_stats_increment(sock->worker->netmgr->stats,
|
|
|
|
sock->statsindex[id]);
|
2020-01-05 01:02:12 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2021-10-02 16:26:43 -07:00
|
|
|
isc__nm_decstats(isc_nmsocket_t *sock, isc__nm_statid_t id) {
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(id < STATID_MAX);
|
2020-01-05 01:02:12 -08:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
if (sock->statsindex != NULL && sock->worker->netmgr->stats != NULL) {
|
|
|
|
isc_stats_decrement(sock->worker->netmgr->stats,
|
|
|
|
sock->statsindex[id]);
|
2020-01-05 01:02:12 -08:00
|
|
|
}
|
|
|
|
}
|
2020-07-21 13:29:14 +02:00
|
|
|
|
2021-07-26 13:14:41 +02:00
|
|
|
isc_result_t
|
|
|
|
isc_nm_checkaddr(const isc_sockaddr_t *addr, isc_socktype_t type) {
|
|
|
|
int proto, pf, addrlen, fd, r;
|
|
|
|
|
|
|
|
REQUIRE(addr != NULL);
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case isc_socktype_tcp:
|
|
|
|
proto = SOCK_STREAM;
|
|
|
|
break;
|
|
|
|
case isc_socktype_udp:
|
|
|
|
proto = SOCK_DGRAM;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return (ISC_R_NOTIMPLEMENTED);
|
|
|
|
}
|
|
|
|
|
|
|
|
pf = isc_sockaddr_pf(addr);
|
|
|
|
if (pf == AF_INET) {
|
|
|
|
addrlen = sizeof(struct sockaddr_in);
|
|
|
|
} else {
|
|
|
|
addrlen = sizeof(struct sockaddr_in6);
|
|
|
|
}
|
|
|
|
|
|
|
|
fd = socket(pf, proto, 0);
|
|
|
|
if (fd < 0) {
|
|
|
|
return (isc_errno_toresult(errno));
|
|
|
|
}
|
|
|
|
|
|
|
|
r = bind(fd, (const struct sockaddr *)&addr->type.sa, addrlen);
|
|
|
|
if (r < 0) {
|
|
|
|
close(fd);
|
|
|
|
return (isc_errno_toresult(errno));
|
|
|
|
}
|
|
|
|
|
|
|
|
close(fd);
|
|
|
|
return (ISC_R_SUCCESS);
|
|
|
|
}
|
|
|
|
|
2021-05-20 15:53:50 +02:00
|
|
|
#if defined(TCP_CONNECTIONTIMEOUT)
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
#define TIMEOUT_TYPE int
|
|
|
|
#define TIMEOUT_DIV 1000
|
|
|
|
#define TIMEOUT_OPTNAME TCP_CONNECTIONTIMEOUT
|
|
|
|
#elif defined(TCP_RXT_CONNDROPTIME)
|
|
|
|
#define TIMEOUT_TYPE int
|
|
|
|
#define TIMEOUT_DIV 1000
|
|
|
|
#define TIMEOUT_OPTNAME TCP_RXT_CONNDROPTIME
|
|
|
|
#elif defined(TCP_USER_TIMEOUT)
|
|
|
|
#define TIMEOUT_TYPE unsigned int
|
|
|
|
#define TIMEOUT_DIV 1
|
|
|
|
#define TIMEOUT_OPTNAME TCP_USER_TIMEOUT
|
2020-12-02 21:54:25 +01:00
|
|
|
#elif defined(TCP_KEEPINIT)
|
|
|
|
#define TIMEOUT_TYPE int
|
|
|
|
#define TIMEOUT_DIV 1000
|
|
|
|
#define TIMEOUT_OPTNAME TCP_KEEPINIT
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
#endif
|
|
|
|
|
2020-12-02 20:51:38 +01:00
|
|
|
void
|
|
|
|
isc__nm_set_network_buffers(isc_nm_t *nm, uv_handle_t *handle) {
|
|
|
|
int32_t recv_buffer_size = 0;
|
|
|
|
int32_t send_buffer_size = 0;
|
|
|
|
|
|
|
|
switch (handle->type) {
|
|
|
|
case UV_TCP:
|
|
|
|
recv_buffer_size =
|
|
|
|
atomic_load_relaxed(&nm->recv_tcp_buffer_size);
|
|
|
|
send_buffer_size =
|
|
|
|
atomic_load_relaxed(&nm->send_tcp_buffer_size);
|
|
|
|
break;
|
|
|
|
case UV_UDP:
|
|
|
|
recv_buffer_size =
|
|
|
|
atomic_load_relaxed(&nm->recv_udp_buffer_size);
|
|
|
|
send_buffer_size =
|
|
|
|
atomic_load_relaxed(&nm->send_udp_buffer_size);
|
|
|
|
break;
|
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2020-12-02 20:51:38 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (recv_buffer_size > 0) {
|
|
|
|
int r = uv_recv_buffer_size(handle, &recv_buffer_size);
|
2022-02-15 14:51:02 +01:00
|
|
|
UV_RUNTIME_CHECK(uv_recv_buffer_size, r);
|
2020-12-02 20:51:38 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (send_buffer_size > 0) {
|
|
|
|
int r = uv_send_buffer_size(handle, &send_buffer_size);
|
2022-02-15 14:51:02 +01:00
|
|
|
UV_RUNTIME_CHECK(uv_send_buffer_size, r);
|
2020-12-02 20:51:38 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-06 16:36:17 +03:00
|
|
|
void
|
|
|
|
isc_nm_bad_request(isc_nmhandle_t *handle) {
|
2022-02-15 14:41:15 +01:00
|
|
|
isc_nmsocket_t *sock = NULL;
|
2021-07-06 16:36:17 +03:00
|
|
|
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
|
|
|
|
|
|
|
sock = handle->sock;
|
|
|
|
|
2022-02-15 14:41:15 +01:00
|
|
|
switch (sock->type) {
|
2021-07-06 16:36:17 +03:00
|
|
|
case isc_nm_udpsocket:
|
2022-02-15 14:41:15 +01:00
|
|
|
return;
|
2021-07-06 16:36:17 +03:00
|
|
|
case isc_nm_tlsdnssocket:
|
2022-07-26 17:07:19 +03:00
|
|
|
case isc_nm_tcpsocket:
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
2022-07-26 17:07:19 +03:00
|
|
|
case isc_nm_tlssocket:
|
2022-02-15 14:41:15 +01:00
|
|
|
REQUIRE(sock->parent == NULL);
|
|
|
|
isc__nmsocket_reset(sock);
|
2021-07-06 16:36:17 +03:00
|
|
|
return;
|
2022-02-15 14:41:15 +01:00
|
|
|
#if HAVE_LIBNGHTTP2
|
|
|
|
case isc_nm_httpsocket:
|
|
|
|
isc__nm_http_bad_request(handle);
|
|
|
|
return;
|
2021-07-06 16:36:17 +03:00
|
|
|
#endif /* HAVE_LIBNGHTTP2 */
|
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2021-07-06 16:36:17 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-26 15:07:20 +03:00
|
|
|
bool
|
|
|
|
isc_nm_xfr_allowed(isc_nmhandle_t *handle) {
|
2022-07-26 13:03:45 +02:00
|
|
|
isc_nmsocket_t *sock = NULL;
|
2021-08-26 15:07:20 +03:00
|
|
|
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
|
|
|
|
|
|
|
sock = handle->sock;
|
|
|
|
|
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_tlsdnssocket:
|
|
|
|
return (isc__nm_tlsdns_xfr_allowed(sock));
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
|
|
|
return (isc__nm_streamdns_xfr_allowed(sock));
|
2021-08-26 15:07:20 +03:00
|
|
|
default:
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2021-08-26 15:07:20 +03:00
|
|
|
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
2021-11-08 12:44:55 -08:00
|
|
|
bool
|
|
|
|
isc_nm_is_http_handle(isc_nmhandle_t *handle) {
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
|
|
|
|
|
|
|
return (handle->sock->type == isc_nm_httpsocket);
|
|
|
|
}
|
|
|
|
|
2021-10-06 14:09:53 +03:00
|
|
|
void
|
|
|
|
isc_nm_set_maxage(isc_nmhandle_t *handle, const uint32_t ttl) {
|
2021-11-08 12:44:55 -08:00
|
|
|
isc_nmsocket_t *sock = NULL;
|
2021-10-06 14:09:53 +03:00
|
|
|
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
|
|
|
REQUIRE(!atomic_load(&handle->sock->client));
|
|
|
|
|
2021-11-08 12:44:55 -08:00
|
|
|
#if !HAVE_LIBNGHTTP2
|
|
|
|
UNUSED(ttl);
|
|
|
|
#endif
|
|
|
|
|
2021-10-06 14:09:53 +03:00
|
|
|
sock = handle->sock;
|
|
|
|
switch (sock->type) {
|
|
|
|
#if HAVE_LIBNGHTTP2
|
|
|
|
case isc_nm_httpsocket:
|
|
|
|
isc__nm_http_set_maxage(handle, ttl);
|
|
|
|
break;
|
|
|
|
#endif /* HAVE_LIBNGHTTP2 */
|
|
|
|
case isc_nm_udpsocket:
|
|
|
|
case isc_nm_tlsdnssocket:
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
2021-10-06 14:09:53 +03:00
|
|
|
return;
|
|
|
|
break;
|
|
|
|
case isc_nm_tcpsocket:
|
|
|
|
case isc_nm_tlssocket:
|
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2021-10-06 14:09:53 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-15 17:42:15 +02:00
|
|
|
isc_nmsocket_type
|
|
|
|
isc_nm_socket_type(const isc_nmhandle_t *handle) {
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
|
|
|
|
|
|
|
return (handle->sock->type);
|
|
|
|
}
|
|
|
|
|
2021-11-16 13:35:37 +02:00
|
|
|
bool
|
|
|
|
isc_nm_has_encryption(const isc_nmhandle_t *handle) {
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
|
|
|
|
|
|
|
switch (handle->sock->type) {
|
|
|
|
case isc_nm_tlsdnssocket:
|
|
|
|
case isc_nm_tlssocket:
|
|
|
|
return (true);
|
|
|
|
#if HAVE_LIBNGHTTP2
|
|
|
|
case isc_nm_httpsocket:
|
|
|
|
return (isc__nm_http_has_encryption(handle));
|
|
|
|
#endif /* HAVE_LIBNGHTTP2 */
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
|
|
|
return (isc__nm_streamdns_has_encryption(handle));
|
2021-11-16 13:35:37 +02:00
|
|
|
default:
|
|
|
|
return (false);
|
|
|
|
};
|
|
|
|
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
2022-01-13 14:35:24 +02:00
|
|
|
const char *
|
|
|
|
isc_nm_verify_tls_peer_result_string(const isc_nmhandle_t *handle) {
|
2022-07-26 13:03:45 +02:00
|
|
|
isc_nmsocket_t *sock = NULL;
|
2022-01-13 14:35:24 +02:00
|
|
|
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
|
|
|
|
|
|
|
sock = handle->sock;
|
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_tlsdnssocket:
|
|
|
|
return (isc__nm_tlsdns_verify_tls_peer_result_string(handle));
|
|
|
|
break;
|
|
|
|
case isc_nm_tlssocket:
|
|
|
|
return (isc__nm_tls_verify_tls_peer_result_string(handle));
|
|
|
|
break;
|
2022-10-18 15:36:00 +03:00
|
|
|
#if HAVE_LIBNGHTTP2
|
2022-01-13 14:35:24 +02:00
|
|
|
case isc_nm_httpsocket:
|
|
|
|
return (isc__nm_http_verify_tls_peer_result_string(handle));
|
|
|
|
break;
|
|
|
|
#endif /* HAVE_LIBNGHTTP2 */
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
|
|
|
return (isc__nm_streamdns_verify_tls_peer_result_string(
|
|
|
|
handle));
|
|
|
|
break;
|
2022-01-13 14:35:24 +02:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
2022-03-31 13:47:48 +03:00
|
|
|
void
|
|
|
|
isc__nm_async_settlsctx(isc__networker_t *worker, isc__netievent_t *ev0) {
|
|
|
|
isc__netievent__tlsctx_t *ev_tlsctx = (isc__netievent__tlsctx_t *)ev0;
|
2022-07-26 13:03:45 +02:00
|
|
|
const int tid = isc_tid();
|
2022-03-31 13:47:48 +03:00
|
|
|
isc_nmsocket_t *listener = ev_tlsctx->sock;
|
|
|
|
isc_tlsctx_t *tlsctx = ev_tlsctx->tlsctx;
|
|
|
|
|
|
|
|
UNUSED(worker);
|
|
|
|
|
|
|
|
switch (listener->type) {
|
|
|
|
case isc_nm_tlsdnslistener:
|
|
|
|
isc__nm_async_tlsdns_set_tlsctx(listener, tlsctx, tid);
|
|
|
|
break;
|
|
|
|
case isc_nm_tlslistener:
|
|
|
|
isc__nm_async_tls_set_tlsctx(listener, tlsctx, tid);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
break;
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
set_tlsctx_workers(isc_nmsocket_t *listener, isc_tlsctx_t *tlsctx) {
|
2022-10-14 21:36:51 +03:00
|
|
|
const size_t nworkers =
|
|
|
|
(size_t)isc_loopmgr_nloops(listener->worker->netmgr->loopmgr);
|
2022-03-31 13:47:48 +03:00
|
|
|
/* Update the TLS context reference for every worker thread. */
|
2022-10-14 21:36:51 +03:00
|
|
|
for (size_t i = 0; i < nworkers; i++) {
|
|
|
|
isc__networker_t *worker =
|
|
|
|
&listener->worker->netmgr->workers[i];
|
2022-03-31 13:47:48 +03:00
|
|
|
isc__netievent__tlsctx_t *ievent =
|
2022-10-14 21:36:51 +03:00
|
|
|
isc__nm_get_netievent_settlsctx(worker, listener,
|
|
|
|
tlsctx);
|
|
|
|
isc__nm_enqueue_ievent(worker, (isc__netievent_t *)ievent);
|
2022-03-31 13:47:48 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc_nmsocket_set_tlsctx(isc_nmsocket_t *listener, isc_tlsctx_t *tlsctx) {
|
|
|
|
REQUIRE(VALID_NMSOCK(listener));
|
|
|
|
REQUIRE(tlsctx != NULL);
|
|
|
|
|
|
|
|
switch (listener->type) {
|
|
|
|
#if HAVE_LIBNGHTTP2
|
|
|
|
case isc_nm_httplistener:
|
|
|
|
/*
|
|
|
|
* We handle HTTP listener sockets differently, as they rely
|
|
|
|
* on underlying TLS sockets for networking. The TLS context
|
|
|
|
* will get passed to these underlying sockets via the call to
|
|
|
|
* isc__nm_http_set_tlsctx().
|
|
|
|
*/
|
|
|
|
isc__nm_http_set_tlsctx(listener, tlsctx);
|
|
|
|
break;
|
2022-10-18 15:36:00 +03:00
|
|
|
#endif /* HAVE_LIBNGHTTP2 */
|
2022-03-31 13:47:48 +03:00
|
|
|
case isc_nm_tlslistener:
|
|
|
|
set_tlsctx_workers(listener, tlsctx);
|
|
|
|
break;
|
|
|
|
case isc_nm_tlsdnslistener:
|
|
|
|
set_tlsctx_workers(listener, tlsctx);
|
|
|
|
break;
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnslistener:
|
|
|
|
isc__nm_streamdns_set_tlsctx(listener, tlsctx);
|
|
|
|
break;
|
2022-03-31 13:47:48 +03:00
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
break;
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2022-06-22 16:45:28 +03:00
|
|
|
void
|
|
|
|
isc_nmsocket_set_max_streams(isc_nmsocket_t *listener,
|
|
|
|
const uint32_t max_streams) {
|
|
|
|
REQUIRE(VALID_NMSOCK(listener));
|
|
|
|
switch (listener->type) {
|
|
|
|
#if HAVE_LIBNGHTTP2
|
|
|
|
case isc_nm_httplistener:
|
|
|
|
isc__nm_http_set_max_streams(listener, max_streams);
|
|
|
|
break;
|
|
|
|
#endif /* HAVE_LIBNGHTTP2 */
|
|
|
|
default:
|
|
|
|
UNUSED(max_streams);
|
|
|
|
break;
|
|
|
|
};
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-04-22 15:59:11 +03:00
|
|
|
void
|
|
|
|
isc__nmsocket_log_tls_session_reuse(isc_nmsocket_t *sock, isc_tls_t *tls) {
|
|
|
|
const int log_level = ISC_LOG_DEBUG(1);
|
|
|
|
char client_sabuf[ISC_SOCKADDR_FORMATSIZE];
|
|
|
|
char local_sabuf[ISC_SOCKADDR_FORMATSIZE];
|
|
|
|
|
|
|
|
REQUIRE(tls != NULL);
|
|
|
|
|
|
|
|
if (!isc_log_wouldlog(isc_lctx, log_level)) {
|
|
|
|
return;
|
|
|
|
};
|
|
|
|
|
|
|
|
isc_sockaddr_format(&sock->peer, client_sabuf, sizeof(client_sabuf));
|
|
|
|
isc_sockaddr_format(&sock->iface, local_sabuf, sizeof(local_sabuf));
|
2022-12-07 09:45:34 +01:00
|
|
|
isc__nmsocket_log(sock, log_level, "TLS %s session %s for %s on %s",
|
|
|
|
SSL_is_server(tls) ? "server" : "client",
|
|
|
|
SSL_session_reused(tls) ? "resumed" : "created",
|
|
|
|
client_sabuf, local_sabuf);
|
2022-04-22 15:59:11 +03:00
|
|
|
}
|
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
static void
|
|
|
|
isc__networker_destroy(isc__networker_t *worker) {
|
|
|
|
isc_nm_t *netmgr = worker->netmgr;
|
|
|
|
worker->netmgr = NULL;
|
|
|
|
|
2022-12-07 09:45:34 +01:00
|
|
|
isc__netmgr_log(netmgr, ISC_LOG_DEBUG(1),
|
|
|
|
"Destroying down network manager worker on loop %p(%d)",
|
|
|
|
worker->loop, isc_tid());
|
2022-07-26 13:03:45 +02:00
|
|
|
|
|
|
|
isc_loop_detach(&worker->loop);
|
|
|
|
|
|
|
|
isc_mem_put(worker->mctx, worker->sendbuf, ISC_NETMGR_SENDBUF_SIZE);
|
|
|
|
isc_mem_putanddetach(&worker->mctx, worker->recvbuf,
|
|
|
|
ISC_NETMGR_RECVBUF_SIZE);
|
|
|
|
isc_nm_detach(&netmgr);
|
|
|
|
}
|
|
|
|
|
|
|
|
ISC_REFCOUNT_IMPL(isc__networker, isc__networker_destroy);
|
|
|
|
|
2022-12-07 09:45:34 +01:00
|
|
|
void
|
|
|
|
isc__netmgr_log(const isc_nm_t *netmgr, int level, const char *fmt, ...) {
|
|
|
|
char msgbuf[2048];
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
if (!isc_log_wouldlog(isc_lctx, level)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
|
|
|
vsnprintf(msgbuf, sizeof(msgbuf), fmt, ap);
|
|
|
|
va_end(ap);
|
|
|
|
|
|
|
|
isc_log_write(isc_lctx, ISC_LOGCATEGORY_DEFAULT, ISC_LOGMODULE_NETMGR,
|
|
|
|
level, "netmgr %p: %s", netmgr, msgbuf);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nmsocket_log(const isc_nmsocket_t *sock, int level, const char *fmt, ...) {
|
|
|
|
char msgbuf[2048];
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
if (!isc_log_wouldlog(isc_lctx, level)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
|
|
|
vsnprintf(msgbuf, sizeof(msgbuf), fmt, ap);
|
|
|
|
va_end(ap);
|
|
|
|
|
|
|
|
isc_log_write(isc_lctx, ISC_LOGCATEGORY_DEFAULT, ISC_LOGMODULE_NETMGR,
|
|
|
|
level, "socket %p: %s", sock, msgbuf);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nmhandle_log(const isc_nmhandle_t *handle, int level, const char *fmt,
|
|
|
|
...) {
|
|
|
|
char msgbuf[2048];
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
if (!isc_log_wouldlog(isc_lctx, level)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
|
|
|
vsnprintf(msgbuf, sizeof(msgbuf), fmt, ap);
|
|
|
|
va_end(ap);
|
|
|
|
|
|
|
|
isc__nmsocket_log(handle->sock, level, "handle %p: %s", handle, msgbuf);
|
|
|
|
}
|
|
|
|
|
2022-10-18 15:21:10 +03:00
|
|
|
void
|
|
|
|
isc__nmhandle_set_manual_timer(isc_nmhandle_t *handle, const bool manual) {
|
|
|
|
isc_nmsocket_t *sock;
|
|
|
|
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
sock = handle->sock;
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
|
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_tcpsocket:
|
|
|
|
isc__nmhandle_tcp_set_manual_timer(handle, manual);
|
|
|
|
return;
|
2022-10-20 15:40:51 +03:00
|
|
|
case isc_nm_tlssocket:
|
|
|
|
isc__nmhandle_tls_set_manual_timer(handle, manual);
|
|
|
|
return;
|
2022-10-18 15:21:10 +03:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
};
|
|
|
|
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
|
2022-08-03 14:46:33 +03:00
|
|
|
void
|
|
|
|
isc__nmhandle_get_selected_alpn(isc_nmhandle_t *handle,
|
|
|
|
const unsigned char **alpn,
|
|
|
|
unsigned int *alpnlen) {
|
|
|
|
isc_nmsocket_t *sock;
|
|
|
|
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
sock = handle->sock;
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
|
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_tlssocket:
|
|
|
|
isc__nmhandle_tls_get_selected_alpn(handle, alpn, alpnlen);
|
|
|
|
return;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2022-08-25 22:18:15 +03:00
|
|
|
isc_result_t
|
|
|
|
isc_nmhandle_set_tcp_nodelay(isc_nmhandle_t *handle, const bool value) {
|
|
|
|
isc_result_t result = ISC_R_FAILURE;
|
|
|
|
isc_nmsocket_t *sock;
|
|
|
|
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
sock = handle->sock;
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
|
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_tcpsocket: {
|
|
|
|
uv_os_fd_t tcp_fd = (uv_os_fd_t)-1;
|
|
|
|
(void)uv_fileno((uv_handle_t *)&sock->uv_handle.tcp, &tcp_fd);
|
|
|
|
RUNTIME_CHECK(tcp_fd != (uv_os_fd_t)-1);
|
|
|
|
result = isc__nm_socket_tcp_nodelay((uv_os_sock_t)tcp_fd,
|
|
|
|
value);
|
|
|
|
} break;
|
2022-08-25 22:37:26 +03:00
|
|
|
case isc_nm_tlssocket:
|
|
|
|
result = isc__nmhandle_tls_set_tcp_nodelay(handle, value);
|
|
|
|
break;
|
2022-08-25 22:18:15 +03:00
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
break;
|
|
|
|
};
|
|
|
|
|
|
|
|
return (result);
|
|
|
|
}
|
|
|
|
|
2020-09-02 17:57:44 +02:00
|
|
|
#ifdef NETMGR_TRACE
|
|
|
|
/*
|
|
|
|
* Dump all active sockets in netmgr. We output to stderr
|
|
|
|
* as the logger might be already shut down.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static const char *
|
|
|
|
nmsocket_type_totext(isc_nmsocket_type type) {
|
|
|
|
switch (type) {
|
|
|
|
case isc_nm_udpsocket:
|
|
|
|
return ("isc_nm_udpsocket");
|
|
|
|
case isc_nm_udplistener:
|
|
|
|
return ("isc_nm_udplistener");
|
|
|
|
case isc_nm_tcpsocket:
|
|
|
|
return ("isc_nm_tcpsocket");
|
|
|
|
case isc_nm_tcplistener:
|
|
|
|
return ("isc_nm_tcplistener");
|
2021-01-25 17:44:39 +02:00
|
|
|
case isc_nm_tlssocket:
|
|
|
|
return ("isc_nm_tlssocket");
|
|
|
|
case isc_nm_tlslistener:
|
|
|
|
return ("isc_nm_tlslistener");
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
case isc_nm_tlsdnslistener:
|
|
|
|
return ("isc_nm_tlsdnslistener");
|
|
|
|
case isc_nm_tlsdnssocket:
|
|
|
|
return ("isc_nm_tlsdnssocket");
|
2020-12-07 14:19:10 +02:00
|
|
|
case isc_nm_httplistener:
|
|
|
|
return ("isc_nm_httplistener");
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
case isc_nm_httpsocket:
|
|
|
|
return ("isc_nm_httpsocket");
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnslistener:
|
|
|
|
return ("isc_nm_streamdnslistener");
|
|
|
|
case isc_nm_streamdnssocket:
|
|
|
|
return ("isc_nm_streamdnssocket");
|
2020-09-02 17:57:44 +02:00
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2020-09-02 17:57:44 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nmhandle_dump(isc_nmhandle_t *handle) {
|
2021-01-29 13:00:46 +01:00
|
|
|
fprintf(stderr, "Active handle %p, refs %" PRIuFAST32 "\n", handle,
|
2020-09-02 17:57:44 +02:00
|
|
|
isc_refcount_current(&handle->references));
|
|
|
|
fprintf(stderr, "Created by:\n");
|
2021-04-27 16:20:03 +02:00
|
|
|
isc_backtrace_symbols_fd(handle->backtrace, handle->backtrace_size,
|
|
|
|
STDERR_FILENO);
|
2020-09-02 17:57:44 +02:00
|
|
|
fprintf(stderr, "\n\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nmsocket_dump(isc_nmsocket_t *sock) {
|
2020-10-21 12:52:09 +02:00
|
|
|
isc_nmhandle_t *handle = NULL;
|
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
LOCK(&sock->tracelock);
|
2020-09-02 17:57:44 +02:00
|
|
|
fprintf(stderr, "\n=================\n");
|
2021-01-29 13:00:46 +01:00
|
|
|
fprintf(stderr, "Active %s socket %p, type %s, refs %" PRIuFAST32 "\n",
|
|
|
|
atomic_load(&sock->client) ? "client" : "server", sock,
|
2020-09-02 17:57:44 +02:00
|
|
|
nmsocket_type_totext(sock->type),
|
|
|
|
isc_refcount_current(&sock->references));
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
fprintf(stderr,
|
2021-05-27 09:45:07 +02:00
|
|
|
"Parent %p, listener %p, server %p, statichandle = "
|
|
|
|
"%p\n",
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
sock->parent, sock->listener, sock->server, sock->statichandle);
|
2021-01-29 13:00:46 +01:00
|
|
|
fprintf(stderr, "Flags:%s%s%s%s%s\n",
|
|
|
|
atomic_load(&sock->active) ? " active" : "",
|
|
|
|
atomic_load(&sock->closing) ? " closing" : "",
|
|
|
|
atomic_load(&sock->destroying) ? " destroying" : "",
|
|
|
|
atomic_load(&sock->connecting) ? " connecting" : "",
|
2021-07-26 13:14:41 +02:00
|
|
|
atomic_load(&sock->accepting) ? " accepting" : "");
|
2020-09-02 17:57:44 +02:00
|
|
|
fprintf(stderr, "Created by:\n");
|
2021-04-27 16:20:03 +02:00
|
|
|
isc_backtrace_symbols_fd(sock->backtrace, sock->backtrace_size,
|
|
|
|
STDERR_FILENO);
|
2020-09-02 17:57:44 +02:00
|
|
|
fprintf(stderr, "\n");
|
2020-10-21 12:52:09 +02:00
|
|
|
|
2020-09-02 17:57:44 +02:00
|
|
|
for (handle = ISC_LIST_HEAD(sock->active_handles); handle != NULL;
|
|
|
|
handle = ISC_LIST_NEXT(handle, active_link))
|
|
|
|
{
|
2020-10-21 12:52:09 +02:00
|
|
|
static bool first = true;
|
|
|
|
if (first) {
|
|
|
|
fprintf(stderr, "Active handles:\n");
|
|
|
|
first = false;
|
|
|
|
}
|
2020-09-02 17:57:44 +02:00
|
|
|
nmhandle_dump(handle);
|
|
|
|
}
|
2020-10-21 12:52:09 +02:00
|
|
|
|
2020-09-02 17:57:44 +02:00
|
|
|
fprintf(stderr, "\n");
|
2022-07-26 13:03:45 +02:00
|
|
|
UNLOCK(&sock->tracelock);
|
2020-09-02 17:57:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_dump_active(isc_nm_t *nm) {
|
2020-10-21 12:52:09 +02:00
|
|
|
isc_nmsocket_t *sock = NULL;
|
|
|
|
|
2020-09-02 17:57:44 +02:00
|
|
|
REQUIRE(VALID_NM(nm));
|
2020-10-21 12:52:09 +02:00
|
|
|
|
2020-09-02 17:57:44 +02:00
|
|
|
LOCK(&nm->lock);
|
|
|
|
for (sock = ISC_LIST_HEAD(nm->active_sockets); sock != NULL;
|
|
|
|
sock = ISC_LIST_NEXT(sock, active_link))
|
|
|
|
{
|
2020-10-21 12:52:09 +02:00
|
|
|
static bool first = true;
|
|
|
|
if (first) {
|
|
|
|
fprintf(stderr, "Outstanding sockets\n");
|
|
|
|
first = false;
|
|
|
|
}
|
2020-09-02 17:57:44 +02:00
|
|
|
nmsocket_dump(sock);
|
|
|
|
}
|
|
|
|
UNLOCK(&nm->lock);
|
|
|
|
}
|
|
|
|
#endif
|