2019-11-05 13:55:54 -08:00
|
|
|
/*
|
|
|
|
* Copyright (C) Internet Systems Consortium, Inc. ("ISC")
|
|
|
|
*
|
|
|
|
* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
2020-09-14 16:20:40 -07:00
|
|
|
* file, you can obtain one at https://mozilla.org/MPL/2.0/.
|
2019-11-05 13:55:54 -08:00
|
|
|
*
|
|
|
|
* See the COPYRIGHT file distributed with this work for additional
|
|
|
|
* information regarding copyright ownership.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <inttypes.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <uv.h>
|
|
|
|
|
|
|
|
#include <isc/atomic.h>
|
|
|
|
#include <isc/buffer.h>
|
|
|
|
#include <isc/condition.h>
|
2020-11-07 20:48:37 +01:00
|
|
|
#include <isc/errno.h>
|
2020-12-17 11:40:29 +01:00
|
|
|
#include <isc/log.h>
|
2019-11-05 13:55:54 -08:00
|
|
|
#include <isc/magic.h>
|
|
|
|
#include <isc/mem.h>
|
|
|
|
#include <isc/netmgr.h>
|
|
|
|
#include <isc/print.h>
|
2020-02-12 13:59:18 +01:00
|
|
|
#include <isc/quota.h>
|
2019-11-05 13:55:54 -08:00
|
|
|
#include <isc/random.h>
|
|
|
|
#include <isc/refcount.h>
|
|
|
|
#include <isc/region.h>
|
|
|
|
#include <isc/result.h>
|
|
|
|
#include <isc/sockaddr.h>
|
2020-01-05 01:02:12 -08:00
|
|
|
#include <isc/stats.h>
|
2020-11-07 20:48:37 +01:00
|
|
|
#include <isc/strerr.h>
|
2019-11-05 13:55:54 -08:00
|
|
|
#include <isc/thread.h>
|
2020-12-17 11:40:29 +01:00
|
|
|
#include <isc/tls.h>
|
2019-11-05 13:55:54 -08:00
|
|
|
#include <isc/util.h>
|
|
|
|
|
|
|
|
#include "netmgr-int.h"
|
2020-12-17 11:40:29 +01:00
|
|
|
#include "openssl_shim.h"
|
2020-02-12 13:59:18 +01:00
|
|
|
#include "uv-compat.h"
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2021-01-29 13:00:46 +01:00
|
|
|
#if NETMGR_TRACE
|
|
|
|
#if HAVE_BACKTRACE
|
2020-09-02 17:57:44 +02:00
|
|
|
#include <execinfo.h>
|
2021-01-29 13:00:46 +01:00
|
|
|
#else /* HAVE_BACKTRACE */
|
|
|
|
#define backtrace(buffer, size) 0
|
|
|
|
#define backtrace_symbols_fd(buffer, size, fd) \
|
|
|
|
fprintf(stderr, "<not available>");
|
|
|
|
#endif /* HAVE_BACKTRACE */
|
|
|
|
#endif /* NETMGR_TRACE */
|
2020-09-02 17:57:44 +02:00
|
|
|
|
2020-01-29 15:16:02 +01:00
|
|
|
/*%
|
|
|
|
* How many isc_nmhandles and isc_nm_uvreqs will we be
|
|
|
|
* caching for reuse in a socket.
|
|
|
|
*/
|
|
|
|
#define ISC_NM_HANDLES_STACK_SIZE 600
|
|
|
|
#define ISC_NM_REQS_STACK_SIZE 600
|
|
|
|
|
2020-01-05 01:02:12 -08:00
|
|
|
/*%
|
|
|
|
* Shortcut index arrays to get access to statistics counters.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static const isc_statscounter_t udp4statsindex[] = {
|
|
|
|
isc_sockstatscounter_udp4open,
|
|
|
|
isc_sockstatscounter_udp4openfail,
|
|
|
|
isc_sockstatscounter_udp4close,
|
|
|
|
isc_sockstatscounter_udp4bindfail,
|
|
|
|
isc_sockstatscounter_udp4connectfail,
|
|
|
|
isc_sockstatscounter_udp4connect,
|
|
|
|
-1,
|
|
|
|
-1,
|
|
|
|
isc_sockstatscounter_udp4sendfail,
|
|
|
|
isc_sockstatscounter_udp4recvfail,
|
|
|
|
isc_sockstatscounter_udp4active
|
|
|
|
};
|
|
|
|
|
|
|
|
static const isc_statscounter_t udp6statsindex[] = {
|
|
|
|
isc_sockstatscounter_udp6open,
|
|
|
|
isc_sockstatscounter_udp6openfail,
|
|
|
|
isc_sockstatscounter_udp6close,
|
|
|
|
isc_sockstatscounter_udp6bindfail,
|
|
|
|
isc_sockstatscounter_udp6connectfail,
|
|
|
|
isc_sockstatscounter_udp6connect,
|
|
|
|
-1,
|
|
|
|
-1,
|
|
|
|
isc_sockstatscounter_udp6sendfail,
|
|
|
|
isc_sockstatscounter_udp6recvfail,
|
|
|
|
isc_sockstatscounter_udp6active
|
|
|
|
};
|
|
|
|
|
|
|
|
static const isc_statscounter_t tcp4statsindex[] = {
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_sockstatscounter_tcp4open, isc_sockstatscounter_tcp4openfail,
|
|
|
|
isc_sockstatscounter_tcp4close, isc_sockstatscounter_tcp4bindfail,
|
|
|
|
isc_sockstatscounter_tcp4connectfail, isc_sockstatscounter_tcp4connect,
|
|
|
|
isc_sockstatscounter_tcp4acceptfail, isc_sockstatscounter_tcp4accept,
|
|
|
|
isc_sockstatscounter_tcp4sendfail, isc_sockstatscounter_tcp4recvfail,
|
2020-01-05 01:02:12 -08:00
|
|
|
isc_sockstatscounter_tcp4active
|
|
|
|
};
|
|
|
|
|
|
|
|
static const isc_statscounter_t tcp6statsindex[] = {
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_sockstatscounter_tcp6open, isc_sockstatscounter_tcp6openfail,
|
|
|
|
isc_sockstatscounter_tcp6close, isc_sockstatscounter_tcp6bindfail,
|
|
|
|
isc_sockstatscounter_tcp6connectfail, isc_sockstatscounter_tcp6connect,
|
|
|
|
isc_sockstatscounter_tcp6acceptfail, isc_sockstatscounter_tcp6accept,
|
|
|
|
isc_sockstatscounter_tcp6sendfail, isc_sockstatscounter_tcp6recvfail,
|
2020-01-05 01:02:12 -08:00
|
|
|
isc_sockstatscounter_tcp6active
|
|
|
|
};
|
|
|
|
|
|
|
|
#if 0
|
|
|
|
/* XXX: not currently used */
|
|
|
|
static const isc_statscounter_t unixstatsindex[] = {
|
|
|
|
isc_sockstatscounter_unixopen,
|
|
|
|
isc_sockstatscounter_unixopenfail,
|
|
|
|
isc_sockstatscounter_unixclose,
|
|
|
|
isc_sockstatscounter_unixbindfail,
|
|
|
|
isc_sockstatscounter_unixconnectfail,
|
|
|
|
isc_sockstatscounter_unixconnect,
|
|
|
|
isc_sockstatscounter_unixacceptfail,
|
|
|
|
isc_sockstatscounter_unixaccept,
|
|
|
|
isc_sockstatscounter_unixsendfail,
|
|
|
|
isc_sockstatscounter_unixrecvfail,
|
|
|
|
isc_sockstatscounter_unixactive
|
|
|
|
};
|
2020-02-13 21:48:23 +01:00
|
|
|
#endif /* if 0 */
|
2020-01-05 01:02:12 -08:00
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
/*
|
|
|
|
* libuv is not thread safe, but has mechanisms to pass messages
|
|
|
|
* between threads. Each socket is owned by a thread. For UDP
|
|
|
|
* sockets we have a set of sockets for each interface and we can
|
|
|
|
* choose a sibling and send the message directly. For TCP, or if
|
|
|
|
* we're calling from a non-networking thread, we need to pass the
|
|
|
|
* request using async_cb.
|
|
|
|
*/
|
|
|
|
|
2018-08-07 16:46:53 +02:00
|
|
|
static thread_local int isc__nm_tid_v = ISC_NETMGR_TID_UNKNOWN;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2020-02-14 08:14:03 +01:00
|
|
|
static void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
nmsocket_maybe_destroy(isc_nmsocket_t *sock FLARG);
|
2020-02-14 08:14:03 +01:00
|
|
|
static void
|
|
|
|
nmhandle_free(isc_nmsocket_t *sock, isc_nmhandle_t *handle);
|
|
|
|
static isc_threadresult_t
|
|
|
|
nm_thread(isc_threadarg_t worker0);
|
|
|
|
static void
|
|
|
|
async_cb(uv_async_t *handle);
|
2020-09-23 21:49:46 +02:00
|
|
|
static bool
|
2020-02-14 08:14:03 +01:00
|
|
|
process_queue(isc__networker_t *worker, isc_queue_t *queue);
|
2020-09-23 21:49:46 +02:00
|
|
|
static bool
|
|
|
|
process_priority_queue(isc__networker_t *worker);
|
|
|
|
static bool
|
|
|
|
process_normal_queue(isc__networker_t *worker);
|
|
|
|
static void
|
|
|
|
process_queues(isc__networker_t *worker);
|
|
|
|
|
|
|
|
static void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc__nm_async_stop(isc__networker_t *worker, isc__netievent_t *ev0);
|
2020-09-23 21:49:46 +02:00
|
|
|
static void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc__nm_async_pause(isc__networker_t *worker, isc__netievent_t *ev0);
|
2020-09-23 21:49:46 +02:00
|
|
|
static void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc__nm_async_resume(isc__networker_t *worker, isc__netievent_t *ev0);
|
2020-10-12 17:51:09 +11:00
|
|
|
static void
|
|
|
|
isc__nm_async_detach(isc__networker_t *worker, isc__netievent_t *ev0);
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
static void
|
|
|
|
isc__nm_async_close(isc__networker_t *worker, isc__netievent_t *ev0);
|
|
|
|
/*%<
|
|
|
|
* Issue a 'handle closed' callback on the socket.
|
|
|
|
*/
|
2020-10-12 17:51:09 +11:00
|
|
|
|
|
|
|
static void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
nmhandle_detach_cb(isc_nmhandle_t **handlep FLARG);
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
int
|
2018-08-07 16:46:53 +02:00
|
|
|
isc_nm_tid(void) {
|
2019-11-05 13:55:54 -08:00
|
|
|
return (isc__nm_tid_v);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2018-08-07 16:46:53 +02:00
|
|
|
isc__nm_in_netthread(void) {
|
2019-11-05 13:55:54 -08:00
|
|
|
return (isc__nm_tid_v >= 0);
|
|
|
|
}
|
|
|
|
|
2020-12-02 22:36:23 +01:00
|
|
|
#ifdef WIN32
|
|
|
|
static void
|
|
|
|
isc__nm_winsock_initialize(void) {
|
|
|
|
WORD wVersionRequested = MAKEWORD(2, 2);
|
|
|
|
WSADATA wsaData;
|
|
|
|
int result;
|
|
|
|
|
|
|
|
result = WSAStartup(wVersionRequested, &wsaData);
|
|
|
|
if (result != 0) {
|
|
|
|
char strbuf[ISC_STRERRORSIZE];
|
|
|
|
strerror_r(result, strbuf, sizeof(strbuf));
|
|
|
|
UNEXPECTED_ERROR(__FILE__, __LINE__,
|
|
|
|
"WSAStartup() failed with error code %lu: %s",
|
|
|
|
result, strbuf);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Confirm that the WinSock DLL supports version 2.2. Note that if the
|
|
|
|
* DLL supports versions greater than 2.2 in addition to 2.2, it will
|
|
|
|
* still return 2.2 in wVersion since that is the version we requested.
|
|
|
|
*/
|
|
|
|
if (LOBYTE(wsaData.wVersion) != 2 || HIBYTE(wsaData.wVersion) != 2) {
|
|
|
|
UNEXPECTED_ERROR(__FILE__, __LINE__,
|
|
|
|
"Unusable WinSock DLL version: %u.%u",
|
|
|
|
LOBYTE(wsaData.wVersion),
|
|
|
|
HIBYTE(wsaData.wVersion));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
isc__nm_winsock_destroy(void) {
|
|
|
|
WSACleanup();
|
|
|
|
}
|
|
|
|
#endif /* WIN32 */
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_nm_t *
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nm_start(isc_mem_t *mctx, uint32_t workers) {
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_nm_t *mgr = NULL;
|
2020-02-13 14:44:37 -08:00
|
|
|
char name[32];
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2020-12-02 22:36:23 +01:00
|
|
|
#ifdef WIN32
|
|
|
|
isc__nm_winsock_initialize();
|
|
|
|
#endif /* WIN32 */
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
mgr = isc_mem_get(mctx, sizeof(*mgr));
|
2020-02-12 13:59:18 +01:00
|
|
|
*mgr = (isc_nm_t){ .nworkers = workers };
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
isc_mem_attach(mctx, &mgr->mctx);
|
|
|
|
isc_mutex_init(&mgr->lock);
|
|
|
|
isc_condition_init(&mgr->wkstatecond);
|
|
|
|
isc_refcount_init(&mgr->references, 1);
|
|
|
|
atomic_init(&mgr->maxudp, 0);
|
|
|
|
atomic_init(&mgr->interlocked, false);
|
|
|
|
|
2020-09-02 17:57:44 +02:00
|
|
|
#ifdef NETMGR_TRACE
|
|
|
|
ISC_LIST_INIT(mgr->active_sockets);
|
|
|
|
#endif
|
|
|
|
|
2019-11-20 22:33:35 +01:00
|
|
|
/*
|
|
|
|
* Default TCP timeout values.
|
2019-11-21 17:08:06 -08:00
|
|
|
* May be updated by isc_nm_tcptimeouts().
|
2019-11-20 22:33:35 +01:00
|
|
|
*/
|
2020-12-02 09:52:39 +01:00
|
|
|
atomic_init(&mgr->init, 30000);
|
|
|
|
atomic_init(&mgr->idle, 30000);
|
|
|
|
atomic_init(&mgr->keepalive, 30000);
|
|
|
|
atomic_init(&mgr->advertised, 30000);
|
2019-11-20 22:33:35 +01:00
|
|
|
|
2019-11-21 17:08:06 -08:00
|
|
|
isc_mutex_init(&mgr->reqlock);
|
|
|
|
isc_mempool_create(mgr->mctx, sizeof(isc__nm_uvreq_t), &mgr->reqpool);
|
|
|
|
isc_mempool_setname(mgr->reqpool, "nm_reqpool");
|
2020-02-10 14:00:36 +01:00
|
|
|
isc_mempool_setfreemax(mgr->reqpool, 4096);
|
2019-11-21 17:08:06 -08:00
|
|
|
isc_mempool_associatelock(mgr->reqpool, &mgr->reqlock);
|
|
|
|
isc_mempool_setfillcount(mgr->reqpool, 32);
|
|
|
|
|
|
|
|
isc_mutex_init(&mgr->evlock);
|
|
|
|
isc_mempool_create(mgr->mctx, sizeof(isc__netievent_storage_t),
|
|
|
|
&mgr->evpool);
|
|
|
|
isc_mempool_setname(mgr->evpool, "nm_evpool");
|
2020-02-10 14:00:36 +01:00
|
|
|
isc_mempool_setfreemax(mgr->evpool, 4096);
|
2019-11-21 17:08:06 -08:00
|
|
|
isc_mempool_associatelock(mgr->evpool, &mgr->evlock);
|
|
|
|
isc_mempool_setfillcount(mgr->evpool, 32);
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
mgr->workers = isc_mem_get(mctx, workers * sizeof(isc__networker_t));
|
|
|
|
for (size_t i = 0; i < workers; i++) {
|
2020-02-13 14:44:37 -08:00
|
|
|
int r;
|
2019-11-05 13:55:54 -08:00
|
|
|
isc__networker_t *worker = &mgr->workers[i];
|
2020-02-12 13:59:18 +01:00
|
|
|
*worker = (isc__networker_t){
|
2019-11-05 13:55:54 -08:00
|
|
|
.mgr = mgr,
|
|
|
|
.id = i,
|
|
|
|
};
|
|
|
|
|
|
|
|
r = uv_loop_init(&worker->loop);
|
|
|
|
RUNTIME_CHECK(r == 0);
|
|
|
|
|
|
|
|
worker->loop.data = &mgr->workers[i];
|
|
|
|
|
|
|
|
r = uv_async_init(&worker->loop, &worker->async, async_cb);
|
|
|
|
RUNTIME_CHECK(r == 0);
|
|
|
|
|
|
|
|
isc_mutex_init(&worker->lock);
|
|
|
|
isc_condition_init(&worker->cond);
|
|
|
|
|
|
|
|
worker->ievents = isc_queue_new(mgr->mctx, 128);
|
2019-12-02 13:54:44 +01:00
|
|
|
worker->ievents_prio = isc_queue_new(mgr->mctx, 128);
|
2020-01-29 13:16:04 +01:00
|
|
|
worker->recvbuf = isc_mem_get(mctx, ISC_NETMGR_RECVBUF_SIZE);
|
2020-12-17 11:40:29 +01:00
|
|
|
worker->sendbuf = isc_mem_get(mctx, ISC_NETMGR_SENDBUF_SIZE);
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We need to do this here and not in nm_thread to avoid a
|
|
|
|
* race - we could exit isc_nm_start, launch nm_destroy,
|
|
|
|
* and nm_thread would still not be up.
|
|
|
|
*/
|
2020-09-23 21:49:46 +02:00
|
|
|
mgr->workers_running++;
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_thread_create(nm_thread, &mgr->workers[i], &worker->thread);
|
|
|
|
|
|
|
|
snprintf(name, sizeof(name), "isc-net-%04zu", i);
|
|
|
|
isc_thread_setname(worker->thread, name);
|
|
|
|
}
|
|
|
|
|
|
|
|
mgr->magic = NM_MAGIC;
|
|
|
|
return (mgr);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free the resources of the network manager.
|
|
|
|
*/
|
|
|
|
static void
|
2020-02-13 14:44:37 -08:00
|
|
|
nm_destroy(isc_nm_t **mgr0) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NM(*mgr0));
|
|
|
|
REQUIRE(!isc__nm_in_netthread());
|
|
|
|
|
|
|
|
isc_nm_t *mgr = *mgr0;
|
2020-02-08 04:37:54 -08:00
|
|
|
*mgr0 = NULL;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2019-12-10 10:47:08 +01:00
|
|
|
isc_refcount_destroy(&mgr->references);
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
mgr->magic = 0;
|
|
|
|
|
|
|
|
for (size_t i = 0; i < mgr->nworkers; i++) {
|
2020-09-23 21:49:46 +02:00
|
|
|
isc__networker_t *worker = &mgr->workers[i];
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc__netievent_t *event = isc__nm_get_netievent_stop(mgr);
|
2020-09-23 21:49:46 +02:00
|
|
|
isc__nm_enqueue_ievent(worker, event);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2019-12-12 17:43:03 +11:00
|
|
|
LOCK(&mgr->lock);
|
2020-09-23 21:49:46 +02:00
|
|
|
while (mgr->workers_running > 0) {
|
2019-11-05 13:55:54 -08:00
|
|
|
WAIT(&mgr->wkstatecond, &mgr->lock);
|
|
|
|
}
|
|
|
|
UNLOCK(&mgr->lock);
|
|
|
|
|
|
|
|
for (size_t i = 0; i < mgr->nworkers; i++) {
|
2019-12-02 13:54:44 +01:00
|
|
|
isc__networker_t *worker = &mgr->workers[i];
|
2019-12-03 00:07:59 -08:00
|
|
|
isc__netievent_t *ievent = NULL;
|
2020-02-13 14:44:37 -08:00
|
|
|
int r;
|
2019-12-03 00:07:59 -08:00
|
|
|
|
|
|
|
/* Empty the async event queues */
|
2020-02-12 13:59:18 +01:00
|
|
|
while ((ievent = (isc__netievent_t *)isc_queue_dequeue(
|
2020-02-13 14:44:37 -08:00
|
|
|
worker->ievents)) != NULL)
|
|
|
|
{
|
2019-12-02 13:54:44 +01:00
|
|
|
isc_mempool_put(mgr->evpool, ievent);
|
|
|
|
}
|
2019-12-03 00:07:59 -08:00
|
|
|
|
2020-02-12 13:59:18 +01:00
|
|
|
while ((ievent = (isc__netievent_t *)isc_queue_dequeue(
|
2020-02-13 14:44:37 -08:00
|
|
|
worker->ievents_prio)) != NULL)
|
|
|
|
{
|
2019-11-21 17:08:06 -08:00
|
|
|
isc_mempool_put(mgr->evpool, ievent);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
2019-12-03 00:07:59 -08:00
|
|
|
|
|
|
|
r = uv_loop_close(&worker->loop);
|
2019-11-15 12:15:03 +01:00
|
|
|
INSIST(r == 0);
|
2019-12-03 00:07:59 -08:00
|
|
|
|
2019-12-02 13:54:44 +01:00
|
|
|
isc_queue_destroy(worker->ievents);
|
|
|
|
isc_queue_destroy(worker->ievents_prio);
|
2020-05-28 12:34:37 +02:00
|
|
|
isc_mutex_destroy(&worker->lock);
|
|
|
|
isc_condition_destroy(&worker->cond);
|
|
|
|
|
2020-12-17 11:40:29 +01:00
|
|
|
isc_mem_put(mgr->mctx, worker->sendbuf,
|
|
|
|
ISC_NETMGR_SENDBUF_SIZE);
|
2020-01-29 13:16:04 +01:00
|
|
|
isc_mem_put(mgr->mctx, worker->recvbuf,
|
|
|
|
ISC_NETMGR_RECVBUF_SIZE);
|
2019-12-02 13:54:44 +01:00
|
|
|
isc_thread_join(worker->thread, NULL);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2020-01-05 01:02:12 -08:00
|
|
|
if (mgr->stats != NULL) {
|
|
|
|
isc_stats_detach(&mgr->stats);
|
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_condition_destroy(&mgr->wkstatecond);
|
|
|
|
isc_mutex_destroy(&mgr->lock);
|
2019-11-21 17:08:06 -08:00
|
|
|
|
|
|
|
isc_mempool_destroy(&mgr->evpool);
|
|
|
|
isc_mutex_destroy(&mgr->evlock);
|
|
|
|
|
|
|
|
isc_mempool_destroy(&mgr->reqpool);
|
|
|
|
isc_mutex_destroy(&mgr->reqlock);
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_mem_put(mgr->mctx, mgr->workers,
|
|
|
|
mgr->nworkers * sizeof(isc__networker_t));
|
|
|
|
isc_mem_putanddetach(&mgr->mctx, mgr, sizeof(*mgr));
|
2020-12-02 22:36:23 +01:00
|
|
|
|
|
|
|
#ifdef WIN32
|
|
|
|
isc__nm_winsock_destroy();
|
|
|
|
#endif /* WIN32 */
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nm_pause(isc_nm_t *mgr) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
REQUIRE(!isc__nm_in_netthread());
|
|
|
|
|
|
|
|
isc__nm_acquire_interlocked_force(mgr);
|
|
|
|
|
|
|
|
for (size_t i = 0; i < mgr->nworkers; i++) {
|
2020-09-23 21:49:46 +02:00
|
|
|
isc__networker_t *worker = &mgr->workers[i];
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc__netievent_resume_t *event =
|
|
|
|
isc__nm_get_netievent_pause(mgr);
|
|
|
|
isc__nm_enqueue_ievent(worker, (isc__netievent_t *)event);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
LOCK(&mgr->lock);
|
2020-09-23 21:49:46 +02:00
|
|
|
while (mgr->workers_paused != mgr->workers_running) {
|
2019-11-05 13:55:54 -08:00
|
|
|
WAIT(&mgr->wkstatecond, &mgr->lock);
|
|
|
|
}
|
|
|
|
UNLOCK(&mgr->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nm_resume(isc_nm_t *mgr) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
REQUIRE(!isc__nm_in_netthread());
|
|
|
|
|
|
|
|
for (size_t i = 0; i < mgr->nworkers; i++) {
|
2020-09-23 21:49:46 +02:00
|
|
|
isc__networker_t *worker = &mgr->workers[i];
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc__netievent_resume_t *event =
|
|
|
|
isc__nm_get_netievent_resume(mgr);
|
|
|
|
isc__nm_enqueue_ievent(worker, (isc__netievent_t *)event);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2020-09-23 21:49:46 +02:00
|
|
|
LOCK(&mgr->lock);
|
|
|
|
while (mgr->workers_paused != 0) {
|
|
|
|
WAIT(&mgr->wkstatecond, &mgr->lock);
|
|
|
|
}
|
|
|
|
UNLOCK(&mgr->lock);
|
|
|
|
|
|
|
|
isc__nm_drop_interlocked(mgr);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nm_attach(isc_nm_t *mgr, isc_nm_t **dst) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
REQUIRE(dst != NULL && *dst == NULL);
|
|
|
|
|
2020-01-14 09:43:37 +01:00
|
|
|
isc_refcount_increment(&mgr->references);
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
*dst = mgr;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nm_detach(isc_nm_t **mgr0) {
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_nm_t *mgr = NULL;
|
|
|
|
|
|
|
|
REQUIRE(mgr0 != NULL);
|
|
|
|
REQUIRE(VALID_NM(*mgr0));
|
|
|
|
|
|
|
|
mgr = *mgr0;
|
|
|
|
*mgr0 = NULL;
|
|
|
|
|
2020-01-14 09:43:37 +01:00
|
|
|
if (isc_refcount_decrement(&mgr->references) == 1) {
|
2019-11-05 13:55:54 -08:00
|
|
|
nm_destroy(&mgr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-22 15:57:42 -08:00
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nm_closedown(isc_nm_t *mgr) {
|
2019-11-22 15:57:42 -08:00
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
|
|
|
|
atomic_store(&mgr->closing, true);
|
|
|
|
for (size_t i = 0; i < mgr->nworkers; i++) {
|
|
|
|
isc__netievent_t *event = NULL;
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
event = isc__nm_get_netievent_shutdown(mgr);
|
2019-11-22 15:57:42 -08:00
|
|
|
isc__nm_enqueue_ievent(&mgr->workers[i], event);
|
|
|
|
}
|
|
|
|
}
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nm_destroy(isc_nm_t **mgr0) {
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_nm_t *mgr = NULL;
|
2020-07-15 17:57:58 -07:00
|
|
|
int counter = 0;
|
2020-09-23 21:49:46 +02:00
|
|
|
uint_fast32_t references;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
REQUIRE(mgr0 != NULL);
|
|
|
|
REQUIRE(VALID_NM(*mgr0));
|
|
|
|
|
|
|
|
mgr = *mgr0;
|
|
|
|
|
2019-11-22 15:57:42 -08:00
|
|
|
/*
|
|
|
|
* Close active connections.
|
|
|
|
*/
|
|
|
|
isc_nm_closedown(mgr);
|
2019-11-22 14:13:19 +01:00
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
/*
|
2019-11-22 15:57:42 -08:00
|
|
|
* Wait for the manager to be dereferenced elsewhere.
|
2019-11-05 13:55:54 -08:00
|
|
|
*/
|
2020-09-23 21:49:46 +02:00
|
|
|
while ((references = isc_refcount_current(&mgr->references)) > 1 &&
|
|
|
|
counter++ < 1000)
|
|
|
|
{
|
2019-11-05 13:55:54 -08:00
|
|
|
#ifdef WIN32
|
2020-04-10 16:59:24 -07:00
|
|
|
_sleep(10);
|
2020-02-13 21:48:23 +01:00
|
|
|
#else /* ifdef WIN32 */
|
2020-04-10 16:59:24 -07:00
|
|
|
usleep(10000);
|
2020-02-13 21:48:23 +01:00
|
|
|
#endif /* ifdef WIN32 */
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
2020-09-23 21:49:46 +02:00
|
|
|
|
2020-10-21 12:52:09 +02:00
|
|
|
#ifdef NETMGR_TRACE
|
|
|
|
isc__nm_dump_active(mgr);
|
|
|
|
#endif
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
|
2020-09-23 21:49:46 +02:00
|
|
|
INSIST(references == 1);
|
2020-07-15 17:57:58 -07:00
|
|
|
|
2019-11-22 15:57:42 -08:00
|
|
|
/*
|
|
|
|
* Detach final reference.
|
|
|
|
*/
|
|
|
|
isc_nm_detach(mgr0);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nm_maxudp(isc_nm_t *mgr, uint32_t maxudp) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
|
|
|
|
atomic_store(&mgr->maxudp, maxudp);
|
|
|
|
}
|
|
|
|
|
2019-11-20 22:33:35 +01:00
|
|
|
void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc_nm_settimeouts(isc_nm_t *mgr, uint32_t init, uint32_t idle,
|
|
|
|
uint32_t keepalive, uint32_t advertised) {
|
2019-11-20 22:33:35 +01:00
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
|
2021-03-18 11:16:45 +01:00
|
|
|
atomic_store(&mgr->init, init);
|
|
|
|
atomic_store(&mgr->idle, idle);
|
|
|
|
atomic_store(&mgr->keepalive, keepalive);
|
|
|
|
atomic_store(&mgr->advertised, advertised);
|
2019-11-20 22:33:35 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc_nm_gettimeouts(isc_nm_t *mgr, uint32_t *initial, uint32_t *idle,
|
|
|
|
uint32_t *keepalive, uint32_t *advertised) {
|
2019-11-20 22:33:35 +01:00
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
|
|
|
|
if (initial != NULL) {
|
2021-03-18 11:16:45 +01:00
|
|
|
*initial = atomic_load(&mgr->init);
|
2019-11-20 22:33:35 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (idle != NULL) {
|
2021-03-18 11:16:45 +01:00
|
|
|
*idle = atomic_load(&mgr->idle);
|
2019-11-20 22:33:35 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (keepalive != NULL) {
|
2021-03-18 11:16:45 +01:00
|
|
|
*keepalive = atomic_load(&mgr->keepalive);
|
2019-11-20 22:33:35 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (advertised != NULL) {
|
2021-03-18 11:16:45 +01:00
|
|
|
*advertised = atomic_load(&mgr->advertised);
|
2019-11-20 22:33:35 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
/*
|
|
|
|
* nm_thread is a single worker thread, that runs uv_run event loop
|
|
|
|
* until asked to stop.
|
|
|
|
*/
|
2019-12-04 10:41:40 +01:00
|
|
|
static isc_threadresult_t
|
2020-02-13 14:44:37 -08:00
|
|
|
nm_thread(isc_threadarg_t worker0) {
|
2020-02-12 13:59:18 +01:00
|
|
|
isc__networker_t *worker = (isc__networker_t *)worker0;
|
2020-09-23 21:49:46 +02:00
|
|
|
isc_nm_t *mgr = worker->mgr;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
isc__nm_tid_v = worker->id;
|
|
|
|
isc_thread_setaffinity(isc__nm_tid_v);
|
|
|
|
|
|
|
|
while (true) {
|
2020-02-13 14:44:37 -08:00
|
|
|
int r = uv_run(&worker->loop, UV_RUN_DEFAULT);
|
2020-09-23 21:49:46 +02:00
|
|
|
/* There's always the async handle until we are done */
|
|
|
|
INSIST(r > 0 || worker->finished);
|
|
|
|
|
|
|
|
if (worker->paused) {
|
|
|
|
LOCK(&worker->lock);
|
|
|
|
/* We need to lock the worker first otherwise
|
|
|
|
* isc_nm_resume() might slip in before WAIT() in the
|
|
|
|
* while loop starts and the signal never gets delivered
|
|
|
|
* and we are forever stuck in the paused loop.
|
|
|
|
*/
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2020-09-23 21:49:46 +02:00
|
|
|
LOCK(&mgr->lock);
|
|
|
|
mgr->workers_paused++;
|
|
|
|
SIGNAL(&mgr->wkstatecond);
|
|
|
|
UNLOCK(&mgr->lock);
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2020-09-23 21:49:46 +02:00
|
|
|
while (worker->paused) {
|
|
|
|
WAIT(&worker->cond, &worker->lock);
|
|
|
|
(void)process_priority_queue(worker);
|
|
|
|
}
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2020-09-23 21:49:46 +02:00
|
|
|
LOCK(&mgr->lock);
|
|
|
|
mgr->workers_paused--;
|
|
|
|
SIGNAL(&mgr->wkstatecond);
|
|
|
|
UNLOCK(&mgr->lock);
|
2019-12-02 13:54:44 +01:00
|
|
|
|
2020-09-23 21:49:46 +02:00
|
|
|
UNLOCK(&worker->lock);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2020-09-23 21:49:46 +02:00
|
|
|
if (r == 0) {
|
|
|
|
INSIST(worker->finished);
|
2019-11-05 13:55:54 -08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-09-23 21:49:46 +02:00
|
|
|
INSIST(!worker->finished);
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Empty the async queue.
|
|
|
|
*/
|
2020-09-23 21:49:46 +02:00
|
|
|
process_queues(worker);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2020-09-23 21:49:46 +02:00
|
|
|
LOCK(&mgr->lock);
|
|
|
|
mgr->workers_running--;
|
|
|
|
SIGNAL(&mgr->wkstatecond);
|
|
|
|
UNLOCK(&mgr->lock);
|
2019-12-04 10:41:40 +01:00
|
|
|
|
|
|
|
return ((isc_threadresult_t)0);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2019-11-21 17:08:06 -08:00
|
|
|
* async_cb is a universal callback for 'async' events sent to event loop.
|
|
|
|
* It's the only way to safely pass data to the libuv event loop. We use a
|
|
|
|
* single async event and a lockless queue of 'isc__netievent_t' structures
|
|
|
|
* passed from other threads.
|
2019-11-05 13:55:54 -08:00
|
|
|
*/
|
|
|
|
static void
|
2020-02-13 14:44:37 -08:00
|
|
|
async_cb(uv_async_t *handle) {
|
2020-02-12 13:59:18 +01:00
|
|
|
isc__networker_t *worker = (isc__networker_t *)handle->loop->data;
|
2020-09-23 21:49:46 +02:00
|
|
|
process_queues(worker);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc__nm_async_stop(isc__networker_t *worker, isc__netievent_t *ev0) {
|
2020-09-23 21:49:46 +02:00
|
|
|
UNUSED(ev0);
|
|
|
|
worker->finished = true;
|
|
|
|
/* Close the async handler */
|
|
|
|
uv_close((uv_handle_t *)&worker->async, NULL);
|
|
|
|
/* uv_stop(&worker->loop); */
|
2019-12-02 13:54:44 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc__nm_async_pause(isc__networker_t *worker, isc__netievent_t *ev0) {
|
2020-09-23 21:49:46 +02:00
|
|
|
UNUSED(ev0);
|
|
|
|
REQUIRE(worker->paused == false);
|
|
|
|
worker->paused = true;
|
|
|
|
uv_stop(&worker->loop);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc__nm_async_resume(isc__networker_t *worker, isc__netievent_t *ev0) {
|
2020-09-23 21:49:46 +02:00
|
|
|
UNUSED(ev0);
|
|
|
|
REQUIRE(worker->paused == true);
|
|
|
|
worker->paused = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
process_priority_queue(isc__networker_t *worker) {
|
|
|
|
return (process_queue(worker, worker->ievents_prio));
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
process_normal_queue(isc__networker_t *worker) {
|
|
|
|
return (process_queue(worker, worker->ievents));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
process_queues(isc__networker_t *worker) {
|
|
|
|
if (!process_priority_queue(worker)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
(void)process_normal_queue(worker);
|
|
|
|
}
|
|
|
|
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
/*
|
|
|
|
* The two macros here generate the individual cases for the process_netievent()
|
|
|
|
* function. The NETIEVENT_CASE(type) macro is the common case, and
|
|
|
|
* NETIEVENT_CASE_NOMORE(type) is a macro that causes the loop in the
|
|
|
|
* process_queue() to stop, e.g. it's only used for the netievent that
|
|
|
|
* stops/pauses processing the enqueued netievents.
|
|
|
|
*/
|
|
|
|
#define NETIEVENT_CASE(type) \
|
|
|
|
case netievent_##type: { \
|
|
|
|
isc__nm_async_##type(worker, ievent); \
|
|
|
|
isc__nm_put_netievent_##type( \
|
|
|
|
worker->mgr, (isc__netievent_##type##_t *)ievent); \
|
|
|
|
return (true); \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define NETIEVENT_CASE_NOMORE(type) \
|
|
|
|
case netievent_##type: { \
|
|
|
|
isc__nm_async_##type(worker, ievent); \
|
|
|
|
isc__nm_put_netievent_##type(worker->mgr, ievent); \
|
|
|
|
return (false); \
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
process_netievent(isc__networker_t *worker, isc__netievent_t *ievent) {
|
|
|
|
REQUIRE(worker->id == isc_nm_tid());
|
|
|
|
|
|
|
|
switch (ievent->type) {
|
|
|
|
/* Don't process more ievents when we are stopping */
|
|
|
|
NETIEVENT_CASE_NOMORE(stop);
|
|
|
|
|
|
|
|
NETIEVENT_CASE(udpconnect);
|
|
|
|
NETIEVENT_CASE(udplisten);
|
|
|
|
NETIEVENT_CASE(udpstop);
|
|
|
|
NETIEVENT_CASE(udpsend);
|
|
|
|
NETIEVENT_CASE(udpread);
|
|
|
|
NETIEVENT_CASE(udpcancel);
|
|
|
|
NETIEVENT_CASE(udpclose);
|
|
|
|
|
|
|
|
NETIEVENT_CASE(tcpaccept);
|
|
|
|
NETIEVENT_CASE(tcpconnect);
|
|
|
|
NETIEVENT_CASE(tcplisten);
|
|
|
|
NETIEVENT_CASE(tcpstartread);
|
|
|
|
NETIEVENT_CASE(tcppauseread);
|
|
|
|
NETIEVENT_CASE(tcpsend);
|
|
|
|
NETIEVENT_CASE(tcpstop);
|
|
|
|
NETIEVENT_CASE(tcpcancel);
|
|
|
|
NETIEVENT_CASE(tcpclose);
|
|
|
|
|
|
|
|
NETIEVENT_CASE(tcpdnsaccept);
|
|
|
|
NETIEVENT_CASE(tcpdnslisten);
|
|
|
|
NETIEVENT_CASE(tcpdnsconnect);
|
|
|
|
NETIEVENT_CASE(tcpdnssend);
|
|
|
|
NETIEVENT_CASE(tcpdnscancel);
|
|
|
|
NETIEVENT_CASE(tcpdnsclose);
|
|
|
|
NETIEVENT_CASE(tcpdnsread);
|
|
|
|
NETIEVENT_CASE(tcpdnsstop);
|
|
|
|
|
2021-01-25 17:44:39 +02:00
|
|
|
NETIEVENT_CASE(tlsstartread);
|
|
|
|
NETIEVENT_CASE(tlssend);
|
|
|
|
NETIEVENT_CASE(tlsclose);
|
|
|
|
NETIEVENT_CASE(tlsconnect);
|
|
|
|
NETIEVENT_CASE(tlsdobio);
|
|
|
|
NETIEVENT_CASE(tlscancel);
|
|
|
|
|
2020-12-17 11:40:29 +01:00
|
|
|
NETIEVENT_CASE(tlsdnscycle);
|
|
|
|
NETIEVENT_CASE(tlsdnsaccept);
|
|
|
|
NETIEVENT_CASE(tlsdnslisten);
|
|
|
|
NETIEVENT_CASE(tlsdnsconnect);
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
NETIEVENT_CASE(tlsdnssend);
|
|
|
|
NETIEVENT_CASE(tlsdnscancel);
|
|
|
|
NETIEVENT_CASE(tlsdnsclose);
|
|
|
|
NETIEVENT_CASE(tlsdnsread);
|
|
|
|
NETIEVENT_CASE(tlsdnsstop);
|
2020-12-17 11:40:29 +01:00
|
|
|
NETIEVENT_CASE(tlsdnsshutdown);
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
|
2020-12-07 14:19:10 +02:00
|
|
|
NETIEVENT_CASE(httpstop);
|
|
|
|
NETIEVENT_CASE(httpsend);
|
|
|
|
NETIEVENT_CASE(httpclose);
|
|
|
|
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
NETIEVENT_CASE(connectcb);
|
|
|
|
NETIEVENT_CASE(readcb);
|
|
|
|
NETIEVENT_CASE(sendcb);
|
|
|
|
|
|
|
|
NETIEVENT_CASE(close);
|
|
|
|
NETIEVENT_CASE(detach);
|
|
|
|
|
|
|
|
NETIEVENT_CASE(shutdown);
|
|
|
|
NETIEVENT_CASE(resume);
|
|
|
|
NETIEVENT_CASE_NOMORE(pause);
|
|
|
|
|
|
|
|
default:
|
|
|
|
INSIST(0);
|
|
|
|
ISC_UNREACHABLE();
|
|
|
|
}
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
|
2020-09-23 21:49:46 +02:00
|
|
|
static bool
|
2020-02-13 14:44:37 -08:00
|
|
|
process_queue(isc__networker_t *worker, isc_queue_t *queue) {
|
2019-12-03 00:07:59 -08:00
|
|
|
isc__netievent_t *ievent = NULL;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2020-02-13 14:44:37 -08:00
|
|
|
while ((ievent = (isc__netievent_t *)isc_queue_dequeue(queue)) != NULL)
|
|
|
|
{
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
if (!process_netievent(worker, ievent)) {
|
|
|
|
return (false);
|
2020-09-23 21:49:46 +02:00
|
|
|
}
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
return (true);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc__nm_get_netievent(isc_nm_t *mgr, isc__netievent_type type) {
|
2019-11-21 17:08:06 -08:00
|
|
|
isc__netievent_storage_t *event = isc_mempool_get(mgr->evpool);
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2020-02-12 13:59:18 +01:00
|
|
|
*event = (isc__netievent_storage_t){ .ni.type = type };
|
2019-11-05 13:55:54 -08:00
|
|
|
return (event);
|
|
|
|
}
|
|
|
|
|
2019-11-19 11:56:00 +01:00
|
|
|
void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc__nm_put_netievent(isc_nm_t *mgr, void *ievent) {
|
2019-11-21 17:08:06 -08:00
|
|
|
isc_mempool_put(mgr->evpool, ievent);
|
2019-11-19 11:56:00 +01:00
|
|
|
}
|
|
|
|
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
NETIEVENT_SOCKET_DEF(tcpclose);
|
|
|
|
NETIEVENT_SOCKET_DEF(tcplisten);
|
|
|
|
NETIEVENT_SOCKET_DEF(tcppauseread);
|
|
|
|
NETIEVENT_SOCKET_DEF(tcpstartread);
|
|
|
|
NETIEVENT_SOCKET_DEF(tcpstop);
|
2021-01-25 17:44:39 +02:00
|
|
|
NETIEVENT_SOCKET_DEF(tlsclose);
|
|
|
|
NETIEVENT_SOCKET_DEF(tlsconnect);
|
|
|
|
NETIEVENT_SOCKET_DEF(tlsdobio);
|
|
|
|
NETIEVENT_SOCKET_DEF(tlsstartread);
|
|
|
|
NETIEVENT_SOCKET_HANDLE_DEF(tlscancel);
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
NETIEVENT_SOCKET_DEF(udpclose);
|
|
|
|
NETIEVENT_SOCKET_DEF(udplisten);
|
|
|
|
NETIEVENT_SOCKET_DEF(udpread);
|
|
|
|
NETIEVENT_SOCKET_DEF(udpsend);
|
|
|
|
NETIEVENT_SOCKET_DEF(udpstop);
|
|
|
|
|
|
|
|
NETIEVENT_SOCKET_DEF(tcpdnsclose);
|
|
|
|
NETIEVENT_SOCKET_DEF(tcpdnsread);
|
|
|
|
NETIEVENT_SOCKET_DEF(tcpdnsstop);
|
|
|
|
NETIEVENT_SOCKET_DEF(tcpdnslisten);
|
|
|
|
NETIEVENT_SOCKET_REQ_DEF(tcpdnsconnect);
|
|
|
|
NETIEVENT_SOCKET_REQ_DEF(tcpdnssend);
|
|
|
|
NETIEVENT_SOCKET_HANDLE_DEF(tcpdnscancel);
|
|
|
|
NETIEVENT_SOCKET_QUOTA_DEF(tcpdnsaccept);
|
|
|
|
|
|
|
|
NETIEVENT_SOCKET_DEF(tlsdnsclose);
|
|
|
|
NETIEVENT_SOCKET_DEF(tlsdnsread);
|
|
|
|
NETIEVENT_SOCKET_DEF(tlsdnsstop);
|
2020-12-17 11:40:29 +01:00
|
|
|
NETIEVENT_SOCKET_DEF(tlsdnslisten);
|
|
|
|
NETIEVENT_SOCKET_REQ_DEF(tlsdnsconnect);
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
NETIEVENT_SOCKET_REQ_DEF(tlsdnssend);
|
|
|
|
NETIEVENT_SOCKET_HANDLE_DEF(tlsdnscancel);
|
2020-12-17 11:40:29 +01:00
|
|
|
NETIEVENT_SOCKET_QUOTA_DEF(tlsdnsaccept);
|
|
|
|
NETIEVENT_SOCKET_DEF(tlsdnscycle);
|
|
|
|
NETIEVENT_SOCKET_DEF(tlsdnsshutdown);
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
|
2020-12-07 14:19:10 +02:00
|
|
|
NETIEVENT_SOCKET_DEF(httpstop);
|
|
|
|
NETIEVENT_SOCKET_REQ_DEF(httpsend);
|
|
|
|
NETIEVENT_SOCKET_DEF(httpclose);
|
|
|
|
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
NETIEVENT_SOCKET_REQ_DEF(tcpconnect);
|
|
|
|
NETIEVENT_SOCKET_REQ_DEF(tcpsend);
|
2021-01-25 17:44:39 +02:00
|
|
|
NETIEVENT_SOCKET_REQ_DEF(tlssend);
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
NETIEVENT_SOCKET_REQ_DEF(udpconnect);
|
|
|
|
NETIEVENT_SOCKET_REQ_RESULT_DEF(connectcb);
|
|
|
|
NETIEVENT_SOCKET_REQ_RESULT_DEF(readcb);
|
|
|
|
NETIEVENT_SOCKET_REQ_RESULT_DEF(sendcb);
|
|
|
|
|
|
|
|
NETIEVENT_SOCKET_DEF(detach);
|
|
|
|
NETIEVENT_SOCKET_HANDLE_DEF(tcpcancel);
|
|
|
|
NETIEVENT_SOCKET_HANDLE_DEF(udpcancel);
|
|
|
|
|
|
|
|
NETIEVENT_SOCKET_QUOTA_DEF(tcpaccept);
|
|
|
|
|
|
|
|
NETIEVENT_SOCKET_DEF(close);
|
|
|
|
NETIEVENT_DEF(pause);
|
|
|
|
NETIEVENT_DEF(resume);
|
|
|
|
NETIEVENT_DEF(shutdown);
|
|
|
|
NETIEVENT_DEF(stop);
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_maybe_enqueue_ievent(isc__networker_t *worker,
|
|
|
|
isc__netievent_t *event) {
|
|
|
|
/*
|
|
|
|
* If we are already in the matching nmthread, process the ievent
|
|
|
|
* directly.
|
|
|
|
*/
|
|
|
|
if (worker->id == isc_nm_tid()) {
|
|
|
|
process_netievent(worker, event);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
isc__nm_enqueue_ievent(worker, event);
|
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc__nm_enqueue_ievent(isc__networker_t *worker, isc__netievent_t *event) {
|
2019-12-02 13:54:44 +01:00
|
|
|
if (event->type > netievent_prio) {
|
|
|
|
/*
|
|
|
|
* We need to make sure this signal will be delivered and
|
|
|
|
* the queue will be processed.
|
|
|
|
*/
|
|
|
|
LOCK(&worker->lock);
|
|
|
|
isc_queue_enqueue(worker->ievents_prio, (uintptr_t)event);
|
|
|
|
SIGNAL(&worker->cond);
|
|
|
|
UNLOCK(&worker->lock);
|
|
|
|
} else {
|
|
|
|
isc_queue_enqueue(worker->ievents, (uintptr_t)event);
|
|
|
|
}
|
2019-11-05 13:55:54 -08:00
|
|
|
uv_async_send(&worker->async);
|
|
|
|
}
|
|
|
|
|
2020-01-16 11:52:58 +01:00
|
|
|
bool
|
2020-02-13 14:44:37 -08:00
|
|
|
isc__nmsocket_active(isc_nmsocket_t *sock) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
if (sock->parent != NULL) {
|
|
|
|
return (atomic_load(&sock->parent->active));
|
|
|
|
}
|
|
|
|
|
|
|
|
return (atomic_load(&sock->active));
|
|
|
|
}
|
|
|
|
|
2020-10-22 10:07:56 +02:00
|
|
|
bool
|
|
|
|
isc__nmsocket_deactivate(isc_nmsocket_t *sock) {
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
|
|
|
|
if (sock->parent != NULL) {
|
|
|
|
return (atomic_compare_exchange_strong(&sock->parent->active,
|
|
|
|
&(bool){ true }, false));
|
|
|
|
}
|
|
|
|
|
|
|
|
return (atomic_compare_exchange_strong(&sock->active, &(bool){ true },
|
|
|
|
false));
|
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc___nmsocket_attach(isc_nmsocket_t *sock, isc_nmsocket_t **target FLARG) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(target != NULL && *target == NULL);
|
|
|
|
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc_nmsocket_t *rsock = NULL;
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
if (sock->parent != NULL) {
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
rsock = sock->parent;
|
|
|
|
INSIST(rsock->parent == NULL); /* sanity check */
|
2019-11-05 13:55:54 -08:00
|
|
|
} else {
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
rsock = sock;
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2021-01-29 13:00:46 +01:00
|
|
|
NETMGR_TRACE_LOG("isc__nmsocket_attach():%p->references = %" PRIuFAST32
|
|
|
|
"\n",
|
|
|
|
rsock, isc_refcount_current(&rsock->references) + 1);
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
|
|
|
|
isc_refcount_increment0(&rsock->references);
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
*target = sock;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free all resources inside a socket (including its children if any).
|
|
|
|
*/
|
|
|
|
static void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
nmsocket_cleanup(isc_nmsocket_t *sock, bool dofree FLARG) {
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nmhandle_t *handle = NULL;
|
2019-11-05 13:55:54 -08:00
|
|
|
isc__nm_uvreq_t *uvreq = NULL;
|
|
|
|
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(!isc__nmsocket_active(sock));
|
|
|
|
|
2021-01-29 13:00:46 +01:00
|
|
|
NETMGR_TRACE_LOG("nmsocket_cleanup():%p->references = %" PRIuFAST32
|
|
|
|
"\n",
|
|
|
|
sock, isc_refcount_current(&sock->references));
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
atomic_store(&sock->destroying, true);
|
|
|
|
|
|
|
|
if (sock->parent == NULL && sock->children != NULL) {
|
|
|
|
/*
|
|
|
|
* We shouldn't be here unless there are no active handles,
|
|
|
|
* so we can clean up and free the children.
|
|
|
|
*/
|
2020-12-03 17:58:10 +01:00
|
|
|
for (size_t i = 0; i < sock->nchildren; i++) {
|
2019-11-05 13:55:54 -08:00
|
|
|
if (!atomic_load(&sock->children[i].destroying)) {
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
nmsocket_cleanup(&sock->children[i],
|
|
|
|
false FLARG_PASS);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This was a parent socket; free the children.
|
|
|
|
*/
|
|
|
|
isc_mem_put(sock->mgr->mctx, sock->children,
|
|
|
|
sock->nchildren * sizeof(*sock));
|
|
|
|
sock->children = NULL;
|
|
|
|
sock->nchildren = 0;
|
|
|
|
}
|
2020-02-28 11:57:51 +01:00
|
|
|
if (sock->statsindex != NULL) {
|
2020-02-28 13:58:13 +01:00
|
|
|
isc__nm_decstats(sock->mgr, sock->statsindex[STATID_ACTIVE]);
|
2020-02-28 11:57:51 +01:00
|
|
|
}
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2020-06-10 11:32:39 +02:00
|
|
|
sock->statichandle = NULL;
|
2020-06-04 23:13:54 -07:00
|
|
|
|
|
|
|
if (sock->outerhandle != NULL) {
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc__nmhandle_detach(&sock->outerhandle FLARG_PASS);
|
2020-06-04 23:13:54 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (sock->outer != NULL) {
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc___nmsocket_detach(&sock->outer FLARG_PASS);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
while ((handle = isc_astack_pop(sock->inactivehandles)) != NULL) {
|
|
|
|
nmhandle_free(sock, handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sock->buf != NULL) {
|
2019-11-08 10:52:49 -08:00
|
|
|
isc_mem_free(sock->mgr->mctx, sock->buf);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (sock->quota != NULL) {
|
|
|
|
isc_quota_detach(&sock->quota);
|
|
|
|
}
|
|
|
|
|
2019-11-22 13:19:45 +01:00
|
|
|
sock->pquota = NULL;
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_astack_destroy(sock->inactivehandles);
|
|
|
|
|
|
|
|
while ((uvreq = isc_astack_pop(sock->inactivereqs)) != NULL) {
|
2019-11-21 17:08:06 -08:00
|
|
|
isc_mempool_put(sock->mgr->reqpool, uvreq);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
isc_astack_destroy(sock->inactivereqs);
|
2020-05-28 14:47:04 +02:00
|
|
|
sock->magic = 0;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
isc_mem_free(sock->mgr->mctx, sock->ah_frees);
|
|
|
|
isc_mem_free(sock->mgr->mctx, sock->ah_handles);
|
2020-05-28 12:34:37 +02:00
|
|
|
isc_mutex_destroy(&sock->lock);
|
|
|
|
isc_condition_destroy(&sock->cond);
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc_condition_destroy(&sock->scond);
|
2021-01-25 17:44:39 +02:00
|
|
|
isc__nm_tls_cleanup_data(sock);
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
isc__nm_http_cleanup_data(sock);
|
2020-09-02 17:57:44 +02:00
|
|
|
#ifdef NETMGR_TRACE
|
|
|
|
LOCK(&sock->mgr->lock);
|
|
|
|
ISC_LIST_UNLINK(sock->mgr->active_sockets, sock, active_link);
|
|
|
|
UNLOCK(&sock->mgr->lock);
|
|
|
|
#endif
|
2019-11-05 13:55:54 -08:00
|
|
|
if (dofree) {
|
|
|
|
isc_nm_t *mgr = sock->mgr;
|
|
|
|
isc_mem_put(mgr->mctx, sock, sizeof(*sock));
|
|
|
|
isc_nm_detach(&mgr);
|
|
|
|
} else {
|
|
|
|
isc_nm_detach(&sock->mgr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
nmsocket_maybe_destroy(isc_nmsocket_t *sock FLARG) {
|
2020-02-13 14:44:37 -08:00
|
|
|
int active_handles;
|
2019-11-05 13:55:54 -08:00
|
|
|
bool destroy = false;
|
|
|
|
|
2021-03-30 09:25:09 +02:00
|
|
|
NETMGR_TRACE_LOG("%s():%p->references = %" PRIuFAST32 "\n", __func__,
|
|
|
|
sock, isc_refcount_current(&sock->references));
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
if (sock->parent != NULL) {
|
|
|
|
/*
|
|
|
|
* This is a child socket and cannot be destroyed except
|
|
|
|
* as a side effect of destroying the parent, so let's go
|
|
|
|
* see if the parent is ready to be destroyed.
|
|
|
|
*/
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
nmsocket_maybe_destroy(sock->parent FLARG_PASS);
|
2019-11-05 13:55:54 -08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is a parent socket (or a standalone). See whether the
|
|
|
|
* children have active handles before deciding whether to
|
|
|
|
* accept destruction.
|
|
|
|
*/
|
|
|
|
LOCK(&sock->lock);
|
2019-12-08 22:44:08 +01:00
|
|
|
if (atomic_load(&sock->active) || atomic_load(&sock->destroying) ||
|
2020-02-13 14:44:37 -08:00
|
|
|
!atomic_load(&sock->closed) || atomic_load(&sock->references) != 0)
|
|
|
|
{
|
2019-12-08 22:44:08 +01:00
|
|
|
UNLOCK(&sock->lock);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
active_handles = atomic_load(&sock->ah);
|
2019-11-05 13:55:54 -08:00
|
|
|
if (sock->children != NULL) {
|
2020-12-03 17:58:10 +01:00
|
|
|
for (size_t i = 0; i < sock->nchildren; i++) {
|
2019-11-05 13:55:54 -08:00
|
|
|
LOCK(&sock->children[i].lock);
|
2019-11-19 11:56:00 +01:00
|
|
|
active_handles += atomic_load(&sock->children[i].ah);
|
2019-11-05 13:55:54 -08:00
|
|
|
UNLOCK(&sock->children[i].lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-10 11:32:39 +02:00
|
|
|
if (active_handles == 0 || sock->statichandle != NULL) {
|
2019-11-05 13:55:54 -08:00
|
|
|
destroy = true;
|
|
|
|
}
|
|
|
|
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
NETMGR_TRACE_LOG("%s:%p->active_handles = %d, .statichandle = %p\n",
|
|
|
|
__func__, sock, active_handles, sock->statichandle);
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
if (destroy) {
|
2020-06-22 15:46:11 -07:00
|
|
|
atomic_store(&sock->destroying, true);
|
|
|
|
UNLOCK(&sock->lock);
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
nmsocket_cleanup(sock, true FLARG_PASS);
|
2020-06-22 15:46:11 -07:00
|
|
|
} else {
|
|
|
|
UNLOCK(&sock->lock);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc___nmsocket_prep_destroy(isc_nmsocket_t *sock FLARG) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(sock->parent == NULL);
|
|
|
|
|
2021-01-29 13:00:46 +01:00
|
|
|
NETMGR_TRACE_LOG("isc___nmsocket_prep_destroy():%p->references = "
|
|
|
|
"%" PRIuFAST32 "\n",
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
sock, isc_refcount_current(&sock->references));
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
/*
|
|
|
|
* The final external reference to the socket is gone. We can try
|
|
|
|
* destroying the socket, but we have to wait for all the inflight
|
|
|
|
* handles to finish first.
|
|
|
|
*/
|
|
|
|
atomic_store(&sock->active, false);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the socket has children, they'll need to be marked inactive
|
|
|
|
* so they can be cleaned up too.
|
|
|
|
*/
|
|
|
|
if (sock->children != NULL) {
|
2020-12-03 17:58:10 +01:00
|
|
|
for (size_t i = 0; i < sock->nchildren; i++) {
|
2019-11-05 13:55:54 -08:00
|
|
|
atomic_store(&sock->children[i].active, false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we're here then we already stopped listening; otherwise
|
|
|
|
* we'd have a hanging reference from the listening process.
|
|
|
|
*
|
|
|
|
* If it's a regular socket we may need to close it.
|
|
|
|
*/
|
|
|
|
if (!atomic_load(&sock->closed)) {
|
|
|
|
switch (sock->type) {
|
2020-09-05 11:07:40 -07:00
|
|
|
case isc_nm_udpsocket:
|
|
|
|
isc__nm_udp_close(sock);
|
|
|
|
return;
|
2019-11-05 13:55:54 -08:00
|
|
|
case isc_nm_tcpsocket:
|
|
|
|
isc__nm_tcp_close(sock);
|
2020-07-01 00:49:12 -07:00
|
|
|
return;
|
2019-11-05 13:55:54 -08:00
|
|
|
case isc_nm_tcpdnssocket:
|
|
|
|
isc__nm_tcpdns_close(sock);
|
2020-07-01 00:49:12 -07:00
|
|
|
return;
|
2021-01-25 17:44:39 +02:00
|
|
|
case isc_nm_tlssocket:
|
|
|
|
isc__nm_tls_close(sock);
|
|
|
|
break;
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
case isc_nm_tlsdnssocket:
|
|
|
|
isc__nm_tlsdns_close(sock);
|
|
|
|
return;
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
case isc_nm_httpsocket:
|
2020-12-07 14:19:10 +02:00
|
|
|
isc__nm_http_close(sock);
|
|
|
|
return;
|
2019-11-05 13:55:54 -08:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
nmsocket_maybe_destroy(sock FLARG_PASS);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc___nmsocket_detach(isc_nmsocket_t **sockp FLARG) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(sockp != NULL && *sockp != NULL);
|
|
|
|
REQUIRE(VALID_NMSOCK(*sockp));
|
|
|
|
|
|
|
|
isc_nmsocket_t *sock = *sockp, *rsock = NULL;
|
|
|
|
*sockp = NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the socket is a part of a set (a child socket) we are
|
|
|
|
* counting references for the whole set at the parent.
|
|
|
|
*/
|
|
|
|
if (sock->parent != NULL) {
|
|
|
|
rsock = sock->parent;
|
|
|
|
INSIST(rsock->parent == NULL); /* Sanity check */
|
|
|
|
} else {
|
|
|
|
rsock = sock;
|
|
|
|
}
|
|
|
|
|
2021-01-29 13:00:46 +01:00
|
|
|
NETMGR_TRACE_LOG("isc__nmsocket_detach():%p->references = %" PRIuFAST32
|
|
|
|
"\n",
|
|
|
|
rsock, isc_refcount_current(&rsock->references) - 1);
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
|
2020-01-14 09:43:37 +01:00
|
|
|
if (isc_refcount_decrement(&rsock->references) == 1) {
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc___nmsocket_prep_destroy(rsock FLARG_PASS);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-04 14:54:36 -07:00
|
|
|
void
|
|
|
|
isc_nmsocket_close(isc_nmsocket_t **sockp) {
|
|
|
|
REQUIRE(sockp != NULL);
|
|
|
|
REQUIRE(VALID_NMSOCK(*sockp));
|
|
|
|
REQUIRE((*sockp)->type == isc_nm_udplistener ||
|
|
|
|
(*sockp)->type == isc_nm_tcplistener ||
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
(*sockp)->type == isc_nm_tcpdnslistener ||
|
2021-01-25 17:44:39 +02:00
|
|
|
(*sockp)->type == isc_nm_tlsdnslistener ||
|
2020-12-07 14:19:10 +02:00
|
|
|
(*sockp)->type == isc_nm_tlslistener ||
|
|
|
|
(*sockp)->type == isc_nm_httplistener);
|
2020-06-04 14:54:36 -07:00
|
|
|
|
|
|
|
isc__nmsocket_detach(sockp);
|
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc___nmsocket_init(isc_nmsocket_t *sock, isc_nm_t *mgr, isc_nmsocket_type type,
|
|
|
|
isc_nmiface_t *iface FLARG) {
|
2020-01-05 01:02:12 -08:00
|
|
|
uint16_t family;
|
|
|
|
|
|
|
|
REQUIRE(sock != NULL);
|
|
|
|
REQUIRE(mgr != NULL);
|
2020-02-12 13:59:18 +01:00
|
|
|
REQUIRE(iface != NULL);
|
2020-01-05 01:02:12 -08:00
|
|
|
|
|
|
|
family = iface->addr.type.sa.sa_family;
|
|
|
|
|
2020-01-29 15:16:02 +01:00
|
|
|
*sock = (isc_nmsocket_t){ .type = type,
|
|
|
|
.iface = iface,
|
|
|
|
.fd = -1,
|
|
|
|
.ah_size = 32,
|
|
|
|
.inactivehandles = isc_astack_new(
|
|
|
|
mgr->mctx, ISC_NM_HANDLES_STACK_SIZE),
|
|
|
|
.inactivereqs = isc_astack_new(
|
|
|
|
mgr->mctx, ISC_NM_REQS_STACK_SIZE) };
|
2020-01-05 01:02:12 -08:00
|
|
|
|
2021-01-29 13:00:46 +01:00
|
|
|
#if NETMGR_TRACE
|
2020-09-02 17:57:44 +02:00
|
|
|
sock->backtrace_size = backtrace(sock->backtrace, TRACE_SIZE);
|
|
|
|
ISC_LINK_INIT(sock, active_link);
|
|
|
|
ISC_LIST_INIT(sock->active_handles);
|
|
|
|
LOCK(&mgr->lock);
|
|
|
|
ISC_LIST_APPEND(mgr->active_sockets, sock, active_link);
|
|
|
|
UNLOCK(&mgr->lock);
|
|
|
|
#endif
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_nm_attach(mgr, &sock->mgr);
|
|
|
|
sock->uv_handle.handle.data = sock;
|
|
|
|
|
2020-02-13 14:44:37 -08:00
|
|
|
sock->ah_frees = isc_mem_allocate(mgr->mctx,
|
|
|
|
sock->ah_size * sizeof(size_t));
|
2020-02-12 13:59:18 +01:00
|
|
|
sock->ah_handles = isc_mem_allocate(
|
|
|
|
mgr->mctx, sock->ah_size * sizeof(isc_nmhandle_t *));
|
2020-03-24 13:38:51 +01:00
|
|
|
ISC_LINK_INIT(&sock->quotacb, link);
|
2019-11-05 13:55:54 -08:00
|
|
|
for (size_t i = 0; i < 32; i++) {
|
|
|
|
sock->ah_frees[i] = i;
|
|
|
|
sock->ah_handles[i] = NULL;
|
|
|
|
}
|
|
|
|
|
2020-01-05 01:02:12 -08:00
|
|
|
switch (type) {
|
|
|
|
case isc_nm_udpsocket:
|
|
|
|
case isc_nm_udplistener:
|
|
|
|
if (family == AF_INET) {
|
|
|
|
sock->statsindex = udp4statsindex;
|
|
|
|
} else {
|
|
|
|
sock->statsindex = udp6statsindex;
|
|
|
|
}
|
2020-01-06 20:26:47 -08:00
|
|
|
isc__nm_incstats(sock->mgr, sock->statsindex[STATID_ACTIVE]);
|
2020-01-05 01:02:12 -08:00
|
|
|
break;
|
|
|
|
case isc_nm_tcpsocket:
|
|
|
|
case isc_nm_tcplistener:
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
case isc_nm_tcpdnssocket:
|
|
|
|
case isc_nm_tcpdnslistener:
|
|
|
|
case isc_nm_tlsdnssocket:
|
|
|
|
case isc_nm_tlsdnslistener:
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
case isc_nm_httpsocket:
|
2020-12-07 14:19:10 +02:00
|
|
|
case isc_nm_httplistener:
|
2020-01-05 01:02:12 -08:00
|
|
|
if (family == AF_INET) {
|
|
|
|
sock->statsindex = tcp4statsindex;
|
|
|
|
} else {
|
|
|
|
sock->statsindex = tcp6statsindex;
|
|
|
|
}
|
2020-01-06 20:26:47 -08:00
|
|
|
isc__nm_incstats(sock->mgr, sock->statsindex[STATID_ACTIVE]);
|
2020-02-05 15:50:29 +11:00
|
|
|
break;
|
2020-01-05 01:02:12 -08:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_mutex_init(&sock->lock);
|
|
|
|
isc_condition_init(&sock->cond);
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc_condition_init(&sock->scond);
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_refcount_init(&sock->references, 1);
|
2019-11-08 10:52:49 -08:00
|
|
|
|
2021-01-25 17:44:39 +02:00
|
|
|
memset(&sock->tlsstream, 0, sizeof(sock->tlsstream));
|
|
|
|
|
2021-01-29 13:00:46 +01:00
|
|
|
NETMGR_TRACE_LOG("isc__nmsocket_init():%p->references = %" PRIuFAST32
|
|
|
|
"\n",
|
|
|
|
sock, isc_refcount_current(&sock->references));
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
atomic_init(&sock->active, true);
|
2019-11-08 10:52:49 -08:00
|
|
|
atomic_init(&sock->sequential, false);
|
|
|
|
atomic_init(&sock->readpaused, false);
|
2020-10-29 12:04:00 +01:00
|
|
|
atomic_init(&sock->closing, false);
|
2019-11-05 13:55:54 -08:00
|
|
|
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
atomic_store(&sock->active_child_connections, 0);
|
|
|
|
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
isc__nm_http_initsocket(sock);
|
2020-12-07 14:19:10 +02:00
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
sock->magic = NMSOCK_MAGIC;
|
|
|
|
}
|
|
|
|
|
2020-06-20 15:03:05 -07:00
|
|
|
void
|
|
|
|
isc__nmsocket_clearcb(isc_nmsocket_t *sock) {
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
2020-09-18 12:27:40 +02:00
|
|
|
REQUIRE(!isc__nm_in_netthread() || sock->tid == isc_nm_tid());
|
2020-06-20 15:03:05 -07:00
|
|
|
|
2020-09-11 10:53:31 +02:00
|
|
|
sock->recv_cb = NULL;
|
|
|
|
sock->recv_cbarg = NULL;
|
|
|
|
sock->accept_cb = NULL;
|
2020-06-20 15:03:05 -07:00
|
|
|
sock->accept_cbarg = NULL;
|
2020-10-29 12:04:00 +01:00
|
|
|
sock->connect_cb = NULL;
|
|
|
|
sock->connect_cbarg = NULL;
|
2020-06-20 15:03:05 -07:00
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc__nm_free_uvbuf(isc_nmsocket_t *sock, const uv_buf_t *buf) {
|
2019-11-05 13:55:54 -08:00
|
|
|
isc__networker_t *worker = NULL;
|
|
|
|
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
2019-11-08 10:52:49 -08:00
|
|
|
if (buf->base == NULL) {
|
|
|
|
/* Empty buffer: might happen in case of error. */
|
|
|
|
return;
|
|
|
|
}
|
2019-11-05 13:55:54 -08:00
|
|
|
worker = &sock->mgr->workers[sock->tid];
|
|
|
|
|
2019-11-15 13:22:13 -08:00
|
|
|
REQUIRE(worker->recvbuf_inuse);
|
2020-07-02 16:27:38 +02:00
|
|
|
if (sock->type == isc_nm_udpsocket && buf->base > worker->recvbuf &&
|
2020-01-29 13:16:04 +01:00
|
|
|
buf->base <= worker->recvbuf + ISC_NETMGR_RECVBUF_SIZE)
|
|
|
|
{
|
2020-04-29 15:19:32 +02:00
|
|
|
/* Can happen in case of out-of-order recvmmsg in libuv1.36 */
|
2020-01-29 13:16:04 +01:00
|
|
|
return;
|
|
|
|
}
|
2019-11-15 13:22:13 -08:00
|
|
|
REQUIRE(buf->base == worker->recvbuf);
|
|
|
|
worker->recvbuf_inuse = false;
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static isc_nmhandle_t *
|
2020-02-13 14:44:37 -08:00
|
|
|
alloc_handle(isc_nmsocket_t *sock) {
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_nmhandle_t *handle =
|
|
|
|
isc_mem_get(sock->mgr->mctx,
|
|
|
|
sizeof(isc_nmhandle_t) + sock->extrahandlesize);
|
|
|
|
|
2020-02-12 13:59:18 +01:00
|
|
|
*handle = (isc_nmhandle_t){ .magic = NMHANDLE_MAGIC };
|
2020-09-02 17:57:44 +02:00
|
|
|
#ifdef NETMGR_TRACE
|
|
|
|
ISC_LINK_INIT(handle, active_link);
|
|
|
|
#endif
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_refcount_init(&handle->references, 1);
|
|
|
|
|
|
|
|
return (handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
isc_nmhandle_t *
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc___nmhandle_get(isc_nmsocket_t *sock, isc_sockaddr_t *peer,
|
|
|
|
isc_sockaddr_t *local FLARG) {
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_nmhandle_t *handle = NULL;
|
2020-02-13 14:44:37 -08:00
|
|
|
size_t handlenum;
|
|
|
|
int pos;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
|
|
|
|
handle = isc_astack_pop(sock->inactivehandles);
|
|
|
|
|
|
|
|
if (handle == NULL) {
|
|
|
|
handle = alloc_handle(sock);
|
|
|
|
} else {
|
2020-06-10 11:32:39 +02:00
|
|
|
isc_refcount_init(&handle->references, 1);
|
2020-07-01 00:49:12 -07:00
|
|
|
INSIST(VALID_NMHANDLE(handle));
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2021-01-29 13:00:46 +01:00
|
|
|
NETMGR_TRACE_LOG(
|
|
|
|
"isc__nmhandle_get():handle %p->references = %" PRIuFAST32 "\n",
|
|
|
|
handle, isc_refcount_current(&handle->references));
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
|
|
|
|
isc___nmsocket_attach(sock, &handle->sock FLARG_PASS);
|
2020-06-04 23:13:54 -07:00
|
|
|
|
2021-01-29 13:00:46 +01:00
|
|
|
#if NETMGR_TRACE
|
2020-09-02 17:57:44 +02:00
|
|
|
handle->backtrace_size = backtrace(handle->backtrace, TRACE_SIZE);
|
|
|
|
#endif
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
if (peer != NULL) {
|
2020-05-13 17:37:51 +02:00
|
|
|
memmove(&handle->peer, peer, sizeof(isc_sockaddr_t));
|
2019-11-05 13:55:54 -08:00
|
|
|
} else {
|
2020-05-13 17:37:51 +02:00
|
|
|
memmove(&handle->peer, &sock->peer, sizeof(isc_sockaddr_t));
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (local != NULL) {
|
2020-05-13 17:37:51 +02:00
|
|
|
memmove(&handle->local, local, sizeof(isc_sockaddr_t));
|
2019-11-05 13:55:54 -08:00
|
|
|
} else if (sock->iface != NULL) {
|
2020-05-13 17:37:51 +02:00
|
|
|
memmove(&handle->local, &sock->iface->addr,
|
|
|
|
sizeof(isc_sockaddr_t));
|
2019-11-05 13:55:54 -08:00
|
|
|
} else {
|
|
|
|
INSIST(0);
|
|
|
|
ISC_UNREACHABLE();
|
|
|
|
}
|
|
|
|
|
|
|
|
LOCK(&sock->lock);
|
|
|
|
/* We need to add this handle to the list of active handles */
|
2020-02-12 13:59:18 +01:00
|
|
|
if ((size_t)atomic_load(&sock->ah) == sock->ah_size) {
|
2019-11-05 13:55:54 -08:00
|
|
|
sock->ah_frees =
|
|
|
|
isc_mem_reallocate(sock->mgr->mctx, sock->ah_frees,
|
2020-02-12 13:59:18 +01:00
|
|
|
sock->ah_size * 2 * sizeof(size_t));
|
|
|
|
sock->ah_handles = isc_mem_reallocate(
|
|
|
|
sock->mgr->mctx, sock->ah_handles,
|
|
|
|
sock->ah_size * 2 * sizeof(isc_nmhandle_t *));
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
for (size_t i = sock->ah_size; i < sock->ah_size * 2; i++) {
|
|
|
|
sock->ah_frees[i] = i;
|
|
|
|
sock->ah_handles[i] = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
sock->ah_size *= 2;
|
|
|
|
}
|
|
|
|
|
2019-11-19 11:56:00 +01:00
|
|
|
handlenum = atomic_fetch_add(&sock->ah, 1);
|
|
|
|
pos = sock->ah_frees[handlenum];
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
INSIST(sock->ah_handles[pos] == NULL);
|
|
|
|
sock->ah_handles[pos] = handle;
|
|
|
|
handle->ah_pos = pos;
|
2020-09-02 17:57:44 +02:00
|
|
|
#ifdef NETMGR_TRACE
|
|
|
|
ISC_LIST_APPEND(sock->active_handles, handle, active_link);
|
|
|
|
#endif
|
2019-11-05 13:55:54 -08:00
|
|
|
UNLOCK(&sock->lock);
|
|
|
|
|
2021-03-29 10:52:05 +02:00
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_udpsocket:
|
|
|
|
case isc_nm_tcpdnssocket:
|
|
|
|
case isc_nm_tlsdnssocket:
|
|
|
|
if (!atomic_load(&sock->client)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* fallthrough */
|
|
|
|
case isc_nm_tcpsocket:
|
|
|
|
case isc_nm_tlssocket:
|
2020-06-10 11:32:39 +02:00
|
|
|
INSIST(sock->statichandle == NULL);
|
2020-09-03 13:31:27 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* statichandle must be assigned, not attached;
|
|
|
|
* otherwise, if a handle was detached elsewhere
|
|
|
|
* it could never reach 0 references, and the
|
|
|
|
* handle and socket would never be freed.
|
|
|
|
*/
|
2020-06-10 11:32:39 +02:00
|
|
|
sock->statichandle = handle;
|
2021-03-29 10:52:05 +02:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
if (sock->type == isc_nm_httpsocket && sock->h2.session) {
|
|
|
|
isc__nm_httpsession_attach(sock->h2.session,
|
|
|
|
&handle->httpsession);
|
2020-12-07 14:19:10 +02:00
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
return (handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc__nmhandle_attach(isc_nmhandle_t *handle, isc_nmhandle_t **handlep FLARG) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
2020-09-03 13:31:27 -07:00
|
|
|
REQUIRE(handlep != NULL && *handlep == NULL);
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2021-01-29 13:00:46 +01:00
|
|
|
NETMGR_TRACE_LOG("isc__nmhandle_attach():handle %p->references = "
|
|
|
|
"%" PRIuFAST32 "\n",
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
handle, isc_refcount_current(&handle->references) + 1);
|
|
|
|
|
2020-01-14 09:43:37 +01:00
|
|
|
isc_refcount_increment(&handle->references);
|
2020-09-03 13:31:27 -07:00
|
|
|
*handlep = handle;
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nmhandle_is_stream(isc_nmhandle_t *handle) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
|
|
|
return (handle->sock->type == isc_nm_tcpsocket ||
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
handle->sock->type == isc_nm_tcpdnssocket ||
|
2021-01-25 17:44:39 +02:00
|
|
|
handle->sock->type == isc_nm_tlssocket ||
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
handle->sock->type == isc_nm_tlsdnssocket);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-02-13 14:44:37 -08:00
|
|
|
nmhandle_free(isc_nmsocket_t *sock, isc_nmhandle_t *handle) {
|
2019-11-05 13:55:54 -08:00
|
|
|
size_t extra = sock->extrahandlesize;
|
|
|
|
|
2019-12-10 11:09:56 +01:00
|
|
|
isc_refcount_destroy(&handle->references);
|
|
|
|
|
2019-11-08 10:52:49 -08:00
|
|
|
if (handle->dofree != NULL) {
|
2019-11-05 13:55:54 -08:00
|
|
|
handle->dofree(handle->opaque);
|
|
|
|
}
|
|
|
|
|
2020-02-12 13:59:18 +01:00
|
|
|
*handle = (isc_nmhandle_t){ .magic = 0 };
|
2019-11-19 11:56:00 +01:00
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_mem_put(sock->mgr->mctx, handle, sizeof(isc_nmhandle_t) + extra);
|
|
|
|
}
|
|
|
|
|
2019-12-08 22:44:08 +01:00
|
|
|
static void
|
2020-02-13 14:44:37 -08:00
|
|
|
nmhandle_deactivate(isc_nmsocket_t *sock, isc_nmhandle_t *handle) {
|
2019-12-16 18:24:55 -08:00
|
|
|
size_t handlenum;
|
|
|
|
bool reuse = false;
|
|
|
|
|
2019-12-08 22:44:08 +01:00
|
|
|
/*
|
|
|
|
* We do all of this under lock to avoid races with socket
|
|
|
|
* destruction. We have to do this now, because at this point the
|
|
|
|
* socket is either unused or still attached to event->sock.
|
|
|
|
*/
|
|
|
|
LOCK(&sock->lock);
|
|
|
|
|
|
|
|
INSIST(sock->ah_handles[handle->ah_pos] == handle);
|
|
|
|
INSIST(sock->ah_size > handle->ah_pos);
|
|
|
|
INSIST(atomic_load(&sock->ah) > 0);
|
|
|
|
|
2020-09-02 17:57:44 +02:00
|
|
|
#ifdef NETMGR_TRACE
|
|
|
|
ISC_LIST_UNLINK(sock->active_handles, handle, active_link);
|
|
|
|
#endif
|
|
|
|
|
2019-12-08 22:44:08 +01:00
|
|
|
sock->ah_handles[handle->ah_pos] = NULL;
|
2019-12-16 18:24:55 -08:00
|
|
|
handlenum = atomic_fetch_sub(&sock->ah, 1) - 1;
|
2019-12-08 22:44:08 +01:00
|
|
|
sock->ah_frees[handlenum] = handle->ah_pos;
|
|
|
|
handle->ah_pos = 0;
|
|
|
|
if (atomic_load(&sock->active)) {
|
2020-02-12 13:59:18 +01:00
|
|
|
reuse = isc_astack_trypush(sock->inactivehandles, handle);
|
2019-12-08 22:44:08 +01:00
|
|
|
}
|
|
|
|
if (!reuse) {
|
|
|
|
nmhandle_free(sock, handle);
|
|
|
|
}
|
|
|
|
UNLOCK(&sock->lock);
|
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc__nmhandle_detach(isc_nmhandle_t **handlep FLARG) {
|
2020-06-04 23:13:54 -07:00
|
|
|
isc_nmsocket_t *sock = NULL;
|
2020-09-03 13:31:27 -07:00
|
|
|
isc_nmhandle_t *handle = NULL;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2020-09-03 13:31:27 -07:00
|
|
|
REQUIRE(handlep != NULL);
|
|
|
|
REQUIRE(VALID_NMHANDLE(*handlep));
|
|
|
|
|
|
|
|
handle = *handlep;
|
|
|
|
*handlep = NULL;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2020-10-12 17:51:09 +11:00
|
|
|
sock = handle->sock;
|
|
|
|
if (sock->tid == isc_nm_tid()) {
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
nmhandle_detach_cb(&handle FLARG_PASS);
|
2020-10-12 17:51:09 +11:00
|
|
|
} else {
|
|
|
|
isc__netievent_detach_t *event =
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc__nm_get_netievent_detach(sock->mgr, sock);
|
|
|
|
/*
|
|
|
|
* we are using implicit "attach" as the last reference
|
|
|
|
* need to be destroyed explicitly in the async callback
|
|
|
|
*/
|
|
|
|
event->handle = handle;
|
|
|
|
FLARG_IEVENT_PASS(event);
|
2020-10-12 17:51:09 +11:00
|
|
|
isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
|
|
|
|
(isc__netievent_t *)event);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-31 11:48:41 +02:00
|
|
|
void
|
2021-03-30 09:25:09 +02:00
|
|
|
isc__nmsocket_shutdown(isc_nmsocket_t *sock);
|
|
|
|
|
2020-10-12 17:51:09 +11:00
|
|
|
static void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
nmhandle_detach_cb(isc_nmhandle_t **handlep FLARG) {
|
2020-10-12 17:51:09 +11:00
|
|
|
isc_nmsocket_t *sock = NULL;
|
|
|
|
isc_nmhandle_t *handle = NULL;
|
|
|
|
|
|
|
|
REQUIRE(handlep != NULL);
|
|
|
|
REQUIRE(VALID_NMHANDLE(*handlep));
|
|
|
|
|
|
|
|
handle = *handlep;
|
|
|
|
*handlep = NULL;
|
|
|
|
|
2021-01-29 13:00:46 +01:00
|
|
|
NETMGR_TRACE_LOG("isc__nmhandle_detach():%p->references = %" PRIuFAST32
|
|
|
|
"\n",
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
handle, isc_refcount_current(&handle->references) - 1);
|
|
|
|
|
2020-01-14 09:43:37 +01:00
|
|
|
if (isc_refcount_decrement(&handle->references) > 1) {
|
2019-11-19 11:56:00 +01:00
|
|
|
return;
|
|
|
|
}
|
2020-09-03 13:31:27 -07:00
|
|
|
|
2020-06-11 13:01:26 +02:00
|
|
|
/* We need an acquire memory barrier here */
|
|
|
|
(void)isc_refcount_current(&handle->references);
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2019-11-19 11:56:00 +01:00
|
|
|
sock = handle->sock;
|
|
|
|
handle->sock = NULL;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2019-11-19 11:56:00 +01:00
|
|
|
if (handle->doreset != NULL) {
|
|
|
|
handle->doreset(handle->opaque);
|
|
|
|
}
|
2019-11-05 13:55:54 -08:00
|
|
|
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
if (sock->type == isc_nm_httpsocket && handle->httpsession != NULL) {
|
|
|
|
isc__nm_httpsession_detach(&handle->httpsession);
|
|
|
|
}
|
|
|
|
|
2020-03-26 14:25:06 +01:00
|
|
|
nmhandle_deactivate(sock, handle);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The handle is gone now. If the socket has a callback configured
|
2019-12-06 20:45:00 +01:00
|
|
|
* for that (e.g., to perform cleanup after request processing),
|
2019-12-06 22:25:52 +01:00
|
|
|
* call it now, or schedule it to run asynchronously.
|
2019-12-06 20:45:00 +01:00
|
|
|
*/
|
|
|
|
if (sock->closehandle_cb != NULL) {
|
|
|
|
if (sock->tid == isc_nm_tid()) {
|
|
|
|
sock->closehandle_cb(sock);
|
|
|
|
} else {
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc__netievent_close_t *event =
|
|
|
|
isc__nm_get_netievent_close(sock->mgr, sock);
|
2019-12-06 20:45:00 +01:00
|
|
|
isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
|
2020-02-12 13:59:18 +01:00
|
|
|
(isc__netievent_t *)event);
|
2019-12-06 20:45:00 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-10 11:32:39 +02:00
|
|
|
if (handle == sock->statichandle) {
|
2020-09-03 13:31:27 -07:00
|
|
|
/* statichandle is assigned, not attached. */
|
2020-06-10 11:32:39 +02:00
|
|
|
sock->statichandle = NULL;
|
|
|
|
}
|
|
|
|
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc___nmsocket_detach(&sock FLARG_PASS);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nmhandle_getdata(isc_nmhandle_t *handle) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
|
|
|
return (handle->opaque);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc_nmhandle_setdata(isc_nmhandle_t *handle, void *arg,
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nm_opaquecb_t doreset, isc_nm_opaquecb_t dofree) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
|
|
|
handle->opaque = arg;
|
|
|
|
handle->doreset = doreset;
|
|
|
|
handle->dofree = dofree;
|
|
|
|
}
|
|
|
|
|
2021-03-18 09:27:38 +01:00
|
|
|
void
|
|
|
|
isc__nm_alloc_dnsbuf(isc_nmsocket_t *sock, size_t len) {
|
|
|
|
REQUIRE(len <= NM_BIG_BUF);
|
|
|
|
|
|
|
|
if (sock->buf == NULL) {
|
|
|
|
/* We don't have the buffer at all */
|
|
|
|
size_t alloc_len = len < NM_REG_BUF ? NM_REG_BUF : NM_BIG_BUF;
|
|
|
|
sock->buf = isc_mem_allocate(sock->mgr->mctx, alloc_len);
|
|
|
|
sock->buf_size = alloc_len;
|
|
|
|
} else {
|
|
|
|
/* We have the buffer but it's too small */
|
|
|
|
sock->buf = isc_mem_reallocate(sock->mgr->mctx, sock->buf,
|
|
|
|
NM_BIG_BUF);
|
|
|
|
sock->buf_size = NM_BIG_BUF;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_failed_send_cb(isc_nmsocket_t *sock, isc__nm_uvreq_t *req,
|
|
|
|
isc_result_t eresult) {
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(VALID_UVREQ(req));
|
|
|
|
|
|
|
|
if (req->cb.send != NULL) {
|
|
|
|
isc__nm_sendcb(sock, req, eresult, true);
|
|
|
|
} else {
|
|
|
|
isc__nm_uvreq_put(&req, sock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_failed_accept_cb(isc_nmsocket_t *sock, isc_result_t eresult) {
|
|
|
|
REQUIRE(sock->accepting);
|
|
|
|
REQUIRE(sock->server);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Detach the quota early to make room for other connections;
|
|
|
|
* otherwise it'd be detached later asynchronously, and clog
|
|
|
|
* the quota unnecessarily.
|
|
|
|
*/
|
|
|
|
if (sock->quota != NULL) {
|
|
|
|
isc_quota_detach(&sock->quota);
|
|
|
|
}
|
|
|
|
|
|
|
|
isc__nmsocket_detach(&sock->server);
|
|
|
|
|
|
|
|
sock->accepting = false;
|
|
|
|
|
|
|
|
switch (eresult) {
|
|
|
|
case ISC_R_NOTCONNECTED:
|
|
|
|
/* IGNORE: The client disconnected before we could accept */
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
isc_log_write(isc_lctx, ISC_LOGCATEGORY_GENERAL,
|
|
|
|
ISC_LOGMODULE_NETMGR, ISC_LOG_ERROR,
|
|
|
|
"Accepting TCP connection failed: %s",
|
|
|
|
isc_result_totext(eresult));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_failed_connect_cb(isc_nmsocket_t *sock, isc__nm_uvreq_t *req,
|
|
|
|
isc_result_t eresult) {
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(VALID_UVREQ(req));
|
|
|
|
REQUIRE(sock->tid == isc_nm_tid());
|
|
|
|
REQUIRE(atomic_load(&sock->connecting));
|
|
|
|
REQUIRE(req->cb.connect != NULL);
|
|
|
|
|
2021-03-30 09:25:09 +02:00
|
|
|
isc__nmsocket_timer_stop(sock);
|
2021-03-31 11:48:41 +02:00
|
|
|
uv_handle_set_data((uv_handle_t *)&sock->timer, sock);
|
2021-03-30 09:25:09 +02:00
|
|
|
|
2021-03-18 09:27:38 +01:00
|
|
|
atomic_store(&sock->connecting, false);
|
|
|
|
|
|
|
|
isc__nmsocket_clearcb(sock);
|
2021-03-30 09:25:09 +02:00
|
|
|
isc__nm_connectcb(sock, req, eresult, true);
|
2021-03-18 09:27:38 +01:00
|
|
|
|
|
|
|
isc__nmsocket_prep_destroy(sock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_failed_read_cb(isc_nmsocket_t *sock, isc_result_t result) {
|
2021-03-16 09:03:02 +01:00
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_udpsocket:
|
|
|
|
isc__nm_udp_failed_read_cb(sock, result);
|
|
|
|
return;
|
|
|
|
case isc_nm_tcpsocket:
|
|
|
|
isc__nm_tcp_failed_read_cb(sock, result);
|
|
|
|
return;
|
|
|
|
case isc_nm_tcpdnssocket:
|
|
|
|
isc__nm_tcpdns_failed_read_cb(sock, result);
|
|
|
|
return;
|
|
|
|
case isc_nm_tlsdnssocket:
|
|
|
|
isc__nm_tlsdns_failed_read_cb(sock, result);
|
|
|
|
return;
|
|
|
|
default:
|
|
|
|
INSIST(0);
|
|
|
|
ISC_UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-30 09:25:09 +02:00
|
|
|
void
|
|
|
|
isc__nmsocket_connecttimeout_cb(uv_timer_t *timer) {
|
|
|
|
uv_connect_t *uvreq = uv_handle_get_data((uv_handle_t *)timer);
|
|
|
|
isc_nmsocket_t *sock = uv_handle_get_data((uv_handle_t *)uvreq->handle);
|
|
|
|
isc__nm_uvreq_t *req = uv_handle_get_data((uv_handle_t *)uvreq);
|
|
|
|
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(sock->tid == isc_nm_tid());
|
|
|
|
REQUIRE(atomic_load(&sock->connecting));
|
|
|
|
REQUIRE(atomic_load(&sock->client));
|
|
|
|
REQUIRE(VALID_UVREQ(req));
|
|
|
|
REQUIRE(VALID_NMHANDLE(req->handle));
|
|
|
|
|
|
|
|
isc__nmsocket_timer_stop(sock);
|
|
|
|
|
|
|
|
/* Call the connect callback directly */
|
|
|
|
req->cb.connect(req->handle, ISC_R_TIMEDOUT, req->cbarg);
|
|
|
|
|
|
|
|
/* Timer is not running, cleanup and shutdown everything */
|
|
|
|
if (!isc__nmsocket_timer_running(sock)) {
|
|
|
|
isc__nmsocket_clearcb(sock);
|
|
|
|
isc__nmsocket_shutdown(sock);
|
|
|
|
atomic_store(&sock->connecting, false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-16 09:03:02 +01:00
|
|
|
static void
|
|
|
|
isc__nmsocket_readtimeout_cb(uv_timer_t *timer) {
|
|
|
|
isc_nmsocket_t *sock = uv_handle_get_data((uv_handle_t *)timer);
|
|
|
|
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(sock->tid == isc_nm_tid());
|
|
|
|
REQUIRE(sock->reading);
|
|
|
|
|
2021-03-29 10:52:05 +02:00
|
|
|
if (atomic_load(&sock->client)) {
|
2021-03-30 09:25:09 +02:00
|
|
|
uv_timer_stop(timer);
|
|
|
|
|
2021-03-29 10:52:05 +02:00
|
|
|
if (sock->recv_cb != NULL) {
|
|
|
|
isc__nm_uvreq_t *req = isc__nm_get_read_req(sock, NULL);
|
|
|
|
isc__nm_readcb(sock, req, ISC_R_TIMEDOUT);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!isc__nmsocket_timer_running(sock)) {
|
|
|
|
isc__nmsocket_clearcb(sock);
|
|
|
|
isc__nm_failed_read_cb(sock, ISC_R_CANCELED);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
isc__nm_failed_read_cb(sock, ISC_R_TIMEDOUT);
|
|
|
|
}
|
2021-03-16 09:03:02 +01:00
|
|
|
}
|
|
|
|
|
2020-11-02 19:58:05 -08:00
|
|
|
void
|
2021-03-16 09:03:02 +01:00
|
|
|
isc__nmsocket_timer_restart(isc_nmsocket_t *sock) {
|
2021-03-30 09:25:09 +02:00
|
|
|
int r = 0;
|
|
|
|
|
2021-03-16 09:03:02 +01:00
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
2020-11-02 19:58:05 -08:00
|
|
|
|
2021-03-30 09:25:09 +02:00
|
|
|
if (atomic_load(&sock->connecting)) {
|
|
|
|
if (sock->connect_timeout == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
r = uv_timer_start(&sock->timer,
|
|
|
|
isc__nmsocket_connecttimeout_cb,
|
|
|
|
sock->connect_timeout + 10, 0);
|
|
|
|
|
|
|
|
} else {
|
|
|
|
if (sock->read_timeout == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
r = uv_timer_start(&sock->timer, isc__nmsocket_readtimeout_cb,
|
|
|
|
sock->read_timeout, 0);
|
2021-03-16 09:03:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
RUNTIME_CHECK(r == 0);
|
|
|
|
}
|
|
|
|
|
2021-03-29 10:52:05 +02:00
|
|
|
bool
|
|
|
|
isc__nmsocket_timer_running(isc_nmsocket_t *sock) {
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
|
|
|
|
return (uv_is_active((uv_handle_t *)&sock->timer));
|
|
|
|
}
|
|
|
|
|
2021-03-16 09:03:02 +01:00
|
|
|
void
|
|
|
|
isc__nmsocket_timer_start(isc_nmsocket_t *sock) {
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
|
2021-03-29 10:52:05 +02:00
|
|
|
if (isc__nmsocket_timer_running(sock)) {
|
2021-03-16 09:03:02 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
isc__nmsocket_timer_restart(sock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nmsocket_timer_stop(isc_nmsocket_t *sock) {
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
|
2021-03-29 10:52:05 +02:00
|
|
|
/* uv_timer_stop() is idempotent, no need to check if running */
|
2021-03-16 09:03:02 +01:00
|
|
|
|
|
|
|
int r = uv_timer_stop(&sock->timer);
|
|
|
|
RUNTIME_CHECK(r == 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
isc__nm_uvreq_t *
|
|
|
|
isc__nm_get_read_req(isc_nmsocket_t *sock, isc_sockaddr_t *sockaddr) {
|
|
|
|
isc__nm_uvreq_t *req = NULL;
|
|
|
|
|
|
|
|
req = isc__nm_uvreq_get(sock->mgr, sock);
|
|
|
|
req->cb.recv = sock->recv_cb;
|
|
|
|
req->cbarg = sock->recv_cbarg;
|
|
|
|
|
2021-03-29 10:52:05 +02:00
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_tcpsocket:
|
|
|
|
case isc_nm_tlssocket:
|
2021-03-16 09:03:02 +01:00
|
|
|
isc_nmhandle_attach(sock->statichandle, &req->handle);
|
2021-03-29 10:52:05 +02:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
if (atomic_load(&sock->client)) {
|
|
|
|
isc_nmhandle_attach(sock->statichandle, &req->handle);
|
|
|
|
} else {
|
|
|
|
req->handle = isc__nmhandle_get(sock, sockaddr, NULL);
|
|
|
|
}
|
|
|
|
break;
|
2021-03-16 09:03:02 +01:00
|
|
|
}
|
|
|
|
|
2021-03-29 10:52:05 +02:00
|
|
|
return (req);
|
2021-03-16 09:03:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*%<
|
|
|
|
* Allocator for read operations. Limited to size 2^16.
|
|
|
|
*
|
|
|
|
* Note this doesn't actually allocate anything, it just assigns the
|
|
|
|
* worker's receive buffer to a socket, and marks it as "in use".
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
isc__nm_alloc_cb(uv_handle_t *handle, size_t size, uv_buf_t *buf) {
|
|
|
|
isc_nmsocket_t *sock = uv_handle_get_data(handle);
|
|
|
|
isc__networker_t *worker = NULL;
|
|
|
|
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(isc__nm_in_netthread());
|
|
|
|
|
|
|
|
switch (sock->type) {
|
2020-11-02 19:58:05 -08:00
|
|
|
case isc_nm_udpsocket:
|
2021-03-16 09:03:02 +01:00
|
|
|
REQUIRE(size <= ISC_NETMGR_RECVBUF_SIZE);
|
|
|
|
size = ISC_NETMGR_RECVBUF_SIZE;
|
2020-11-02 19:58:05 -08:00
|
|
|
break;
|
2021-03-31 12:14:54 +02:00
|
|
|
case isc_nm_tcpsocket:
|
2021-03-16 09:03:02 +01:00
|
|
|
case isc_nm_tcpdnssocket:
|
|
|
|
break;
|
|
|
|
case isc_nm_tlsdnssocket:
|
|
|
|
/*
|
|
|
|
* We need to limit the individual chunks to be read, so the
|
|
|
|
* BIO_write() will always succeed and the consumed before the
|
|
|
|
* next readcb is called.
|
|
|
|
*/
|
|
|
|
if (size >= ISC_NETMGR_TLSBUF_SIZE) {
|
|
|
|
size = ISC_NETMGR_TLSBUF_SIZE;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
INSIST(0);
|
|
|
|
ISC_UNREACHABLE();
|
|
|
|
}
|
|
|
|
|
|
|
|
worker = &sock->mgr->workers[sock->tid];
|
|
|
|
INSIST(!worker->recvbuf_inuse);
|
|
|
|
|
|
|
|
buf->base = worker->recvbuf;
|
|
|
|
buf->len = size;
|
|
|
|
worker->recvbuf_inuse = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_start_reading(isc_nmsocket_t *sock) {
|
|
|
|
int r;
|
|
|
|
|
|
|
|
if (sock->reading) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_udpsocket:
|
|
|
|
r = uv_udp_recv_start(&sock->uv_handle.udp, isc__nm_alloc_cb,
|
|
|
|
isc__nm_udp_read_cb);
|
2020-11-02 19:58:05 -08:00
|
|
|
break;
|
2021-03-31 12:14:54 +02:00
|
|
|
case isc_nm_tcpsocket:
|
|
|
|
r = uv_read_start(&sock->uv_handle.stream, isc__nm_alloc_cb,
|
|
|
|
isc__nm_tcp_read_cb);
|
|
|
|
break;
|
2020-11-02 19:58:05 -08:00
|
|
|
case isc_nm_tcpdnssocket:
|
2021-03-16 09:03:02 +01:00
|
|
|
r = uv_read_start(&sock->uv_handle.stream, isc__nm_alloc_cb,
|
|
|
|
isc__nm_tcpdns_read_cb);
|
2020-11-02 19:58:05 -08:00
|
|
|
break;
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
case isc_nm_tlsdnssocket:
|
2021-03-16 09:03:02 +01:00
|
|
|
r = uv_read_start(&sock->uv_handle.stream, isc__nm_alloc_cb,
|
|
|
|
isc__nm_tlsdns_read_cb);
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
break;
|
2021-03-16 09:03:02 +01:00
|
|
|
default:
|
|
|
|
INSIST(0);
|
|
|
|
ISC_UNREACHABLE();
|
|
|
|
}
|
|
|
|
RUNTIME_CHECK(r == 0);
|
|
|
|
sock->reading = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_stop_reading(isc_nmsocket_t *sock) {
|
|
|
|
int r;
|
|
|
|
|
|
|
|
if (!sock->reading) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_udpsocket:
|
|
|
|
r = uv_udp_recv_stop(&sock->uv_handle.udp);
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
break;
|
2021-03-31 12:14:54 +02:00
|
|
|
case isc_nm_tcpsocket:
|
2021-03-16 09:03:02 +01:00
|
|
|
case isc_nm_tcpdnssocket:
|
|
|
|
case isc_nm_tlsdnssocket:
|
|
|
|
r = uv_read_stop(&sock->uv_handle.stream);
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
break;
|
2020-11-02 19:58:05 -08:00
|
|
|
default:
|
|
|
|
INSIST(0);
|
|
|
|
ISC_UNREACHABLE();
|
|
|
|
}
|
2021-03-16 09:03:02 +01:00
|
|
|
RUNTIME_CHECK(r == 0);
|
|
|
|
sock->reading = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2021-03-31 11:48:41 +02:00
|
|
|
isc__nm_closing(isc_nmsocket_t *sock) {
|
|
|
|
return (atomic_load(&sock->mgr->closing));
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
isc__nmsocket_closing(isc_nmsocket_t *sock) {
|
2021-03-16 09:03:02 +01:00
|
|
|
return (!isc__nmsocket_active(sock) || atomic_load(&sock->closing) ||
|
|
|
|
atomic_load(&sock->mgr->closing) ||
|
|
|
|
(sock->server != NULL && !isc__nmsocket_active(sock->server)));
|
|
|
|
}
|
|
|
|
|
|
|
|
static isc_result_t
|
|
|
|
processbuffer(isc_nmsocket_t *sock) {
|
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_tcpdnssocket:
|
|
|
|
return (isc__nm_tcpdns_processbuffer(sock));
|
|
|
|
case isc_nm_tlsdnssocket:
|
2021-03-18 21:19:18 +01:00
|
|
|
return (isc__nm_tlsdns_processbuffer(sock));
|
2021-03-16 09:03:02 +01:00
|
|
|
default:
|
|
|
|
INSIST(0);
|
|
|
|
ISC_UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Process a DNS message.
|
|
|
|
*
|
|
|
|
* If we only have an incomplete DNS message, we don't touch any
|
|
|
|
* timers. If we do have a full message, reset the timer.
|
|
|
|
*
|
|
|
|
* Stop reading if this is a client socket, or if the server socket
|
|
|
|
* has been set to sequential mode, or the number of queries we are
|
|
|
|
* processing simultaneously has reached the clients-per-connection
|
|
|
|
* limit. In this case we'll be called again by resume_processing()
|
|
|
|
* later.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
isc__nm_process_sock_buffer(isc_nmsocket_t *sock) {
|
|
|
|
for (;;) {
|
|
|
|
int_fast32_t ah = atomic_load(&sock->ah);
|
|
|
|
isc_result_t result = processbuffer(sock);
|
|
|
|
switch (result) {
|
|
|
|
case ISC_R_NOMORE:
|
|
|
|
/*
|
|
|
|
* Don't reset the timer until we have a
|
|
|
|
* full DNS message.
|
|
|
|
*/
|
|
|
|
isc__nm_start_reading(sock);
|
|
|
|
/*
|
|
|
|
* Start the timer only if there are no externally used
|
|
|
|
* active handles, there's always one active handle
|
|
|
|
* attached internally to sock->recv_handle in
|
|
|
|
* accept_connection()
|
|
|
|
*/
|
|
|
|
if (ah == 1) {
|
|
|
|
isc__nmsocket_timer_start(sock);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
case ISC_R_CANCELED:
|
|
|
|
isc__nmsocket_timer_stop(sock);
|
|
|
|
isc__nm_stop_reading(sock);
|
|
|
|
return;
|
|
|
|
case ISC_R_SUCCESS:
|
|
|
|
/*
|
|
|
|
* Stop the timer on the successful message read, this
|
|
|
|
* also allows to restart the timer when we have no more
|
|
|
|
* data.
|
|
|
|
*/
|
|
|
|
isc__nmsocket_timer_stop(sock);
|
|
|
|
|
|
|
|
if (atomic_load(&sock->client) ||
|
|
|
|
atomic_load(&sock->sequential) ||
|
|
|
|
ah >= STREAM_CLIENTS_PER_CONN)
|
|
|
|
{
|
|
|
|
isc__nm_stop_reading(sock);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
INSIST(0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_resume_processing(void *arg) {
|
|
|
|
isc_nmsocket_t *sock = (isc_nmsocket_t *)arg;
|
|
|
|
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(sock->tid == isc_nm_tid());
|
|
|
|
REQUIRE(!atomic_load(&sock->client));
|
|
|
|
|
2021-03-31 11:48:41 +02:00
|
|
|
if (isc__nmsocket_closing(sock)) {
|
2021-03-16 09:03:02 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
isc__nm_process_sock_buffer(sock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc_nmhandle_cleartimeout(isc_nmhandle_t *handle) {
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
|
|
|
|
|
|
|
switch (handle->sock->type) {
|
|
|
|
case isc_nm_httpsocket:
|
|
|
|
isc__nm_http_cleartimeout(handle);
|
|
|
|
return;
|
|
|
|
case isc_nm_tlssocket:
|
|
|
|
isc__nm_tls_cleartimeout(handle);
|
|
|
|
return;
|
|
|
|
default:
|
|
|
|
handle->sock->read_timeout = 0;
|
|
|
|
|
|
|
|
if (uv_is_active((uv_handle_t *)&handle->sock->timer)) {
|
|
|
|
isc__nmsocket_timer_stop(handle->sock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc_nmhandle_settimeout(isc_nmhandle_t *handle, uint32_t timeout) {
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
|
|
|
|
|
|
|
switch (handle->sock->type) {
|
|
|
|
case isc_nm_httpsocket:
|
|
|
|
isc__nm_http_settimeout(handle, timeout);
|
|
|
|
return;
|
|
|
|
case isc_nm_tlssocket:
|
|
|
|
isc__nm_tls_settimeout(handle, timeout);
|
|
|
|
return;
|
|
|
|
default:
|
|
|
|
handle->sock->read_timeout = timeout;
|
2021-03-29 10:52:05 +02:00
|
|
|
isc__nmsocket_timer_restart(handle->sock);
|
2021-03-16 09:03:02 +01:00
|
|
|
}
|
2020-11-02 19:58:05 -08:00
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
void *
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nmhandle_getextra(isc_nmhandle_t *handle) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
|
|
|
return (handle->extra);
|
|
|
|
}
|
|
|
|
|
|
|
|
isc_sockaddr_t
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nmhandle_peeraddr(isc_nmhandle_t *handle) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
|
|
|
return (handle->peer);
|
|
|
|
}
|
|
|
|
|
|
|
|
isc_sockaddr_t
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nmhandle_localaddr(isc_nmhandle_t *handle) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
|
|
|
return (handle->local);
|
|
|
|
}
|
|
|
|
|
2019-11-20 22:33:35 +01:00
|
|
|
isc_nm_t *
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nmhandle_netmgr(isc_nmhandle_t *handle) {
|
2019-11-20 22:33:35 +01:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
|
|
|
|
|
|
|
return (handle->sock->mgr);
|
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
isc__nm_uvreq_t *
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc___nm_uvreq_get(isc_nm_t *mgr, isc_nmsocket_t *sock FLARG) {
|
2019-11-05 13:55:54 -08:00
|
|
|
isc__nm_uvreq_t *req = NULL;
|
|
|
|
|
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
|
2020-10-22 10:07:56 +02:00
|
|
|
if (sock != NULL && isc__nmsocket_active(sock)) {
|
2019-11-05 13:55:54 -08:00
|
|
|
/* Try to reuse one */
|
|
|
|
req = isc_astack_pop(sock->inactivereqs);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (req == NULL) {
|
2019-11-21 17:08:06 -08:00
|
|
|
req = isc_mempool_get(mgr->reqpool);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2020-02-12 13:59:18 +01:00
|
|
|
*req = (isc__nm_uvreq_t){ .magic = 0 };
|
2020-05-13 17:37:51 +02:00
|
|
|
ISC_LINK_INIT(req, link);
|
2019-11-05 13:55:54 -08:00
|
|
|
req->uv_req.req.data = req;
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc___nmsocket_attach(sock, &req->sock FLARG_PASS);
|
2019-11-05 13:55:54 -08:00
|
|
|
req->magic = UVREQ_MAGIC;
|
|
|
|
|
|
|
|
return (req);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc___nm_uvreq_put(isc__nm_uvreq_t **req0, isc_nmsocket_t *sock FLARG) {
|
2019-11-05 13:55:54 -08:00
|
|
|
isc__nm_uvreq_t *req = NULL;
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nmhandle_t *handle = NULL;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
REQUIRE(req0 != NULL);
|
|
|
|
REQUIRE(VALID_UVREQ(*req0));
|
|
|
|
|
|
|
|
req = *req0;
|
|
|
|
*req0 = NULL;
|
|
|
|
|
|
|
|
INSIST(sock == req->sock);
|
|
|
|
|
|
|
|
req->magic = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We need to save this first to make sure that handle,
|
|
|
|
* sock, and the netmgr won't all disappear.
|
|
|
|
*/
|
|
|
|
handle = req->handle;
|
|
|
|
req->handle = NULL;
|
|
|
|
|
2020-10-22 10:07:56 +02:00
|
|
|
if (!isc__nmsocket_active(sock) ||
|
2020-02-12 13:59:18 +01:00
|
|
|
!isc_astack_trypush(sock->inactivereqs, req)) {
|
2019-11-21 17:08:06 -08:00
|
|
|
isc_mempool_put(sock->mgr->reqpool, req);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (handle != NULL) {
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc__nmhandle_detach(&handle FLARG_PASS);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc___nmsocket_detach(&sock FLARG_PASS);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2020-10-21 12:52:09 +02:00
|
|
|
void
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_nm_send(isc_nmhandle_t *handle, isc_region_t *region, isc_nm_cb_t cb,
|
2020-02-13 14:44:37 -08:00
|
|
|
void *cbarg) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
|
|
|
switch (handle->sock->type) {
|
|
|
|
case isc_nm_udpsocket:
|
|
|
|
case isc_nm_udplistener:
|
2020-10-21 12:52:09 +02:00
|
|
|
isc__nm_udp_send(handle, region, cb, cbarg);
|
|
|
|
break;
|
2019-11-05 13:55:54 -08:00
|
|
|
case isc_nm_tcpsocket:
|
2020-10-21 12:52:09 +02:00
|
|
|
isc__nm_tcp_send(handle, region, cb, cbarg);
|
|
|
|
break;
|
2019-11-05 13:55:54 -08:00
|
|
|
case isc_nm_tcpdnssocket:
|
2020-10-21 12:52:09 +02:00
|
|
|
isc__nm_tcpdns_send(handle, region, cb, cbarg);
|
|
|
|
break;
|
2021-01-25 17:44:39 +02:00
|
|
|
case isc_nm_tlssocket:
|
|
|
|
isc__nm_tls_send(handle, region, cb, cbarg);
|
|
|
|
break;
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
case isc_nm_tlsdnssocket:
|
|
|
|
isc__nm_tlsdns_send(handle, region, cb, cbarg);
|
|
|
|
break;
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
case isc_nm_httpsocket:
|
2020-10-31 20:42:18 +01:00
|
|
|
isc__nm_http_send(handle, region, cb, cbarg);
|
|
|
|
break;
|
2019-11-05 13:55:54 -08:00
|
|
|
default:
|
|
|
|
INSIST(0);
|
|
|
|
ISC_UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-21 12:52:09 +02:00
|
|
|
void
|
2020-03-20 11:55:10 +01:00
|
|
|
isc_nm_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg) {
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
/*
|
|
|
|
* This is always called via callback (from accept or connect), and
|
|
|
|
* caller must attach to the handle, so the references always need to be
|
|
|
|
* at least 2.
|
|
|
|
*/
|
|
|
|
REQUIRE(isc_refcount_current(&handle->references) >= 2);
|
|
|
|
|
2020-03-20 11:55:10 +01:00
|
|
|
switch (handle->sock->type) {
|
2020-09-05 11:07:40 -07:00
|
|
|
case isc_nm_udpsocket:
|
|
|
|
isc__nm_udp_read(handle, cb, cbarg);
|
|
|
|
break;
|
2020-03-20 11:55:10 +01:00
|
|
|
case isc_nm_tcpsocket:
|
2020-10-21 12:52:09 +02:00
|
|
|
isc__nm_tcp_read(handle, cb, cbarg);
|
|
|
|
break;
|
2020-09-05 11:07:40 -07:00
|
|
|
case isc_nm_tcpdnssocket:
|
|
|
|
isc__nm_tcpdns_read(handle, cb, cbarg);
|
|
|
|
break;
|
2021-01-25 17:44:39 +02:00
|
|
|
case isc_nm_tlssocket:
|
|
|
|
isc__nm_tls_read(handle, cb, cbarg);
|
|
|
|
break;
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
case isc_nm_tlsdnssocket:
|
|
|
|
isc__nm_tlsdns_read(handle, cb, cbarg);
|
|
|
|
break;
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
case isc_nm_httpsocket:
|
|
|
|
isc__nm_http_read(handle, cb, cbarg);
|
|
|
|
break;
|
2020-03-20 11:55:10 +01:00
|
|
|
default:
|
|
|
|
INSIST(0);
|
|
|
|
ISC_UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-05 17:32:36 -07:00
|
|
|
void
|
|
|
|
isc_nm_cancelread(isc_nmhandle_t *handle) {
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
|
|
|
switch (handle->sock->type) {
|
2020-09-05 11:07:40 -07:00
|
|
|
case isc_nm_udpsocket:
|
|
|
|
isc__nm_udp_cancelread(handle);
|
|
|
|
break;
|
2020-06-05 17:32:36 -07:00
|
|
|
case isc_nm_tcpsocket:
|
2020-06-10 11:32:39 +02:00
|
|
|
isc__nm_tcp_cancelread(handle);
|
2020-06-05 17:32:36 -07:00
|
|
|
break;
|
2020-09-05 11:07:40 -07:00
|
|
|
case isc_nm_tcpdnssocket:
|
|
|
|
isc__nm_tcpdns_cancelread(handle);
|
|
|
|
break;
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
case isc_nm_tlsdnssocket:
|
|
|
|
isc__nm_tlsdns_cancelread(handle);
|
|
|
|
break;
|
2021-01-25 17:44:39 +02:00
|
|
|
case isc_nm_tlssocket:
|
|
|
|
isc__nm_tls_cancelread(handle);
|
|
|
|
break;
|
2020-06-05 17:32:36 -07:00
|
|
|
default:
|
|
|
|
INSIST(0);
|
|
|
|
ISC_UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-22 10:07:56 +02:00
|
|
|
void
|
2020-07-01 16:17:09 -07:00
|
|
|
isc_nm_pauseread(isc_nmhandle_t *handle) {
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
|
|
|
isc_nmsocket_t *sock = handle->sock;
|
2020-06-05 17:32:36 -07:00
|
|
|
|
2020-03-20 11:55:10 +01:00
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_tcpsocket:
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc__nm_tcp_pauseread(handle);
|
2020-10-22 10:07:56 +02:00
|
|
|
break;
|
2021-01-25 17:44:39 +02:00
|
|
|
case isc_nm_tlssocket:
|
|
|
|
isc__nm_tls_pauseread(handle);
|
|
|
|
break;
|
2020-03-20 11:55:10 +01:00
|
|
|
default:
|
|
|
|
INSIST(0);
|
|
|
|
ISC_UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-21 12:52:09 +02:00
|
|
|
void
|
2020-07-01 16:17:09 -07:00
|
|
|
isc_nm_resumeread(isc_nmhandle_t *handle) {
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
|
|
|
isc_nmsocket_t *sock = handle->sock;
|
2020-06-05 17:32:36 -07:00
|
|
|
|
2020-03-20 11:55:10 +01:00
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_tcpsocket:
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc__nm_tcp_resumeread(handle);
|
2020-10-21 12:52:09 +02:00
|
|
|
break;
|
2021-01-25 17:44:39 +02:00
|
|
|
case isc_nm_tlssocket:
|
|
|
|
isc__nm_tls_resumeread(handle);
|
|
|
|
break;
|
2020-03-20 11:55:10 +01:00
|
|
|
default:
|
|
|
|
INSIST(0);
|
|
|
|
ISC_UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc_nm_stoplistening(isc_nmsocket_t *sock) {
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
2020-06-05 17:32:36 -07:00
|
|
|
|
2020-03-20 11:55:10 +01:00
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_udplistener:
|
|
|
|
isc__nm_udp_stoplistening(sock);
|
|
|
|
break;
|
|
|
|
case isc_nm_tcpdnslistener:
|
|
|
|
isc__nm_tcpdns_stoplistening(sock);
|
|
|
|
break;
|
|
|
|
case isc_nm_tcplistener:
|
|
|
|
isc__nm_tcp_stoplistening(sock);
|
|
|
|
break;
|
2021-01-25 17:44:39 +02:00
|
|
|
case isc_nm_tlslistener:
|
|
|
|
isc__nm_tls_stoplistening(sock);
|
|
|
|
break;
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
case isc_nm_tlsdnslistener:
|
|
|
|
isc__nm_tlsdns_stoplistening(sock);
|
|
|
|
break;
|
2020-12-07 14:19:10 +02:00
|
|
|
case isc_nm_httplistener:
|
|
|
|
isc__nm_http_stoplistening(sock);
|
|
|
|
break;
|
2020-03-20 11:55:10 +01:00
|
|
|
default:
|
|
|
|
INSIST(0);
|
|
|
|
ISC_UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-30 09:25:09 +02:00
|
|
|
void
|
|
|
|
isc__nm_connectcb(isc_nmsocket_t *sock, isc__nm_uvreq_t *uvreq,
|
|
|
|
isc_result_t eresult, bool async) {
|
2020-11-11 10:46:33 +01:00
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(VALID_UVREQ(uvreq));
|
|
|
|
REQUIRE(VALID_NMHANDLE(uvreq->handle));
|
|
|
|
|
2021-03-30 09:25:09 +02:00
|
|
|
if (!async) {
|
|
|
|
isc__netievent_connectcb_t ievent = { .sock = sock,
|
|
|
|
.req = uvreq,
|
|
|
|
.result = eresult };
|
|
|
|
isc__nm_async_connectcb(NULL, (isc__netievent_t *)&ievent);
|
|
|
|
return;
|
2020-11-11 10:46:33 +01:00
|
|
|
}
|
2021-03-30 09:25:09 +02:00
|
|
|
isc__netievent_connectcb_t *ievent = isc__nm_get_netievent_connectcb(
|
|
|
|
sock->mgr, sock, uvreq, eresult);
|
|
|
|
isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
|
|
|
|
(isc__netievent_t *)ievent);
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
}
|
|
|
|
|
2020-11-11 10:46:33 +01:00
|
|
|
void
|
|
|
|
isc__nm_async_connectcb(isc__networker_t *worker, isc__netievent_t *ev0) {
|
|
|
|
isc__netievent_connectcb_t *ievent = (isc__netievent_connectcb_t *)ev0;
|
|
|
|
isc_nmsocket_t *sock = ievent->sock;
|
|
|
|
isc__nm_uvreq_t *uvreq = ievent->req;
|
|
|
|
isc_result_t eresult = ievent->result;
|
|
|
|
|
|
|
|
UNUSED(worker);
|
|
|
|
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(VALID_UVREQ(uvreq));
|
|
|
|
REQUIRE(VALID_NMHANDLE(uvreq->handle));
|
|
|
|
REQUIRE(ievent->sock->tid == isc_nm_tid());
|
|
|
|
REQUIRE(uvreq->cb.connect != NULL);
|
|
|
|
|
|
|
|
uvreq->cb.connect(uvreq->handle, eresult, uvreq->cbarg);
|
|
|
|
|
|
|
|
isc__nm_uvreq_put(&uvreq, sock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_readcb(isc_nmsocket_t *sock, isc__nm_uvreq_t *uvreq,
|
|
|
|
isc_result_t eresult) {
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(VALID_UVREQ(uvreq));
|
|
|
|
REQUIRE(VALID_NMHANDLE(uvreq->handle));
|
|
|
|
|
|
|
|
if (eresult == ISC_R_SUCCESS) {
|
2020-12-02 08:54:51 +01:00
|
|
|
isc__netievent_readcb_t ievent = { .sock = sock,
|
|
|
|
.req = uvreq,
|
|
|
|
.result = eresult };
|
|
|
|
|
|
|
|
isc__nm_async_readcb(NULL, (isc__netievent_t *)&ievent);
|
2020-11-11 10:46:33 +01:00
|
|
|
} else {
|
2020-12-02 08:54:51 +01:00
|
|
|
isc__netievent_readcb_t *ievent = isc__nm_get_netievent_readcb(
|
|
|
|
sock->mgr, sock, uvreq, eresult);
|
2020-11-11 10:46:33 +01:00
|
|
|
isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
|
|
|
|
(isc__netievent_t *)ievent);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_async_readcb(isc__networker_t *worker, isc__netievent_t *ev0) {
|
|
|
|
isc__netievent_readcb_t *ievent = (isc__netievent_readcb_t *)ev0;
|
|
|
|
isc_nmsocket_t *sock = ievent->sock;
|
|
|
|
isc__nm_uvreq_t *uvreq = ievent->req;
|
|
|
|
isc_result_t eresult = ievent->result;
|
|
|
|
isc_region_t region = { .base = (unsigned char *)uvreq->uvbuf.base,
|
|
|
|
.length = uvreq->uvbuf.len };
|
|
|
|
|
|
|
|
UNUSED(worker);
|
|
|
|
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(VALID_UVREQ(uvreq));
|
|
|
|
REQUIRE(VALID_NMHANDLE(uvreq->handle));
|
|
|
|
REQUIRE(sock->tid == isc_nm_tid());
|
|
|
|
|
|
|
|
uvreq->cb.recv(uvreq->handle, eresult, ®ion, uvreq->cbarg);
|
|
|
|
|
|
|
|
isc__nm_uvreq_put(&uvreq, sock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_sendcb(isc_nmsocket_t *sock, isc__nm_uvreq_t *uvreq,
|
2021-03-16 09:03:02 +01:00
|
|
|
isc_result_t eresult, bool async) {
|
2020-11-11 10:46:33 +01:00
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(VALID_UVREQ(uvreq));
|
|
|
|
REQUIRE(VALID_NMHANDLE(uvreq->handle));
|
|
|
|
|
2021-03-16 09:03:02 +01:00
|
|
|
if (!async) {
|
2020-12-02 08:54:51 +01:00
|
|
|
isc__netievent_sendcb_t ievent = { .sock = sock,
|
|
|
|
.req = uvreq,
|
|
|
|
.result = eresult };
|
|
|
|
isc__nm_async_sendcb(NULL, (isc__netievent_t *)&ievent);
|
2021-03-16 09:03:02 +01:00
|
|
|
return;
|
2020-11-11 10:46:33 +01:00
|
|
|
}
|
2021-03-16 09:03:02 +01:00
|
|
|
|
|
|
|
isc__netievent_sendcb_t *ievent =
|
|
|
|
isc__nm_get_netievent_sendcb(sock->mgr, sock, uvreq, eresult);
|
|
|
|
isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
|
|
|
|
(isc__netievent_t *)ievent);
|
2020-11-11 10:46:33 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_async_sendcb(isc__networker_t *worker, isc__netievent_t *ev0) {
|
|
|
|
isc__netievent_sendcb_t *ievent = (isc__netievent_sendcb_t *)ev0;
|
|
|
|
isc_nmsocket_t *sock = ievent->sock;
|
|
|
|
isc__nm_uvreq_t *uvreq = ievent->req;
|
|
|
|
isc_result_t eresult = ievent->result;
|
|
|
|
|
|
|
|
UNUSED(worker);
|
|
|
|
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(VALID_UVREQ(uvreq));
|
|
|
|
REQUIRE(VALID_NMHANDLE(uvreq->handle));
|
|
|
|
REQUIRE(sock->tid == isc_nm_tid());
|
|
|
|
|
|
|
|
uvreq->cb.send(uvreq->handle, eresult, uvreq->cbarg);
|
|
|
|
|
|
|
|
isc__nm_uvreq_put(&uvreq, sock);
|
|
|
|
}
|
|
|
|
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
static void
|
|
|
|
isc__nm_async_close(isc__networker_t *worker, isc__netievent_t *ev0) {
|
|
|
|
isc__netievent_close_t *ievent = (isc__netievent_close_t *)ev0;
|
2020-11-11 10:46:33 +01:00
|
|
|
isc_nmsocket_t *sock = ievent->sock;
|
2019-11-19 11:56:00 +01:00
|
|
|
|
|
|
|
REQUIRE(VALID_NMSOCK(ievent->sock));
|
2020-11-11 10:46:33 +01:00
|
|
|
REQUIRE(sock->tid == isc_nm_tid());
|
|
|
|
REQUIRE(sock->closehandle_cb != NULL);
|
2019-11-19 11:56:00 +01:00
|
|
|
|
|
|
|
UNUSED(worker);
|
|
|
|
|
2020-11-11 10:46:33 +01:00
|
|
|
ievent->sock->closehandle_cb(sock);
|
2019-11-19 11:56:00 +01:00
|
|
|
}
|
|
|
|
|
2020-10-12 17:51:09 +11:00
|
|
|
void
|
|
|
|
isc__nm_async_detach(isc__networker_t *worker, isc__netievent_t *ev0) {
|
|
|
|
isc__netievent_detach_t *ievent = (isc__netievent_detach_t *)ev0;
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
FLARG_IEVENT(ievent);
|
2020-10-12 17:51:09 +11:00
|
|
|
|
|
|
|
REQUIRE(VALID_NMSOCK(ievent->sock));
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
REQUIRE(VALID_NMHANDLE(ievent->handle));
|
2020-10-12 17:51:09 +11:00
|
|
|
REQUIRE(ievent->sock->tid == isc_nm_tid());
|
|
|
|
|
|
|
|
UNUSED(worker);
|
|
|
|
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
nmhandle_detach_cb(&ievent->handle FLARG_PASS);
|
2020-10-12 17:51:09 +11:00
|
|
|
}
|
|
|
|
|
2021-03-31 11:48:41 +02:00
|
|
|
void
|
2021-03-30 09:25:09 +02:00
|
|
|
isc__nmsocket_shutdown(isc_nmsocket_t *sock) {
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_udpsocket:
|
2020-10-26 14:19:37 +01:00
|
|
|
isc__nm_udp_shutdown(sock);
|
2020-10-26 12:30:54 +01:00
|
|
|
break;
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
case isc_nm_tcpsocket:
|
2020-10-26 14:19:37 +01:00
|
|
|
isc__nm_tcp_shutdown(sock);
|
2019-11-22 14:13:19 +01:00
|
|
|
break;
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
case isc_nm_tcpdnssocket:
|
|
|
|
isc__nm_tcpdns_shutdown(sock);
|
|
|
|
break;
|
|
|
|
case isc_nm_tlsdnssocket:
|
2020-12-17 11:40:29 +01:00
|
|
|
isc__nm_tlsdns_shutdown(sock);
|
2019-11-22 14:13:19 +01:00
|
|
|
break;
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
case isc_nm_udplistener:
|
|
|
|
case isc_nm_tcplistener:
|
|
|
|
case isc_nm_tcpdnslistener:
|
|
|
|
case isc_nm_tlsdnslistener:
|
|
|
|
return;
|
|
|
|
default:
|
|
|
|
INSIST(0);
|
|
|
|
ISC_UNREACHABLE();
|
2019-11-22 14:13:19 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-30 09:25:09 +02:00
|
|
|
static void
|
|
|
|
shutdown_walk_cb(uv_handle_t *handle, void *arg) {
|
|
|
|
isc_nmsocket_t *sock = uv_handle_get_data(handle);
|
|
|
|
UNUSED(arg);
|
|
|
|
|
|
|
|
if (uv_is_closing(handle)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (handle->type) {
|
|
|
|
case UV_UDP:
|
|
|
|
case UV_TCP:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
isc__nmsocket_shutdown(sock);
|
|
|
|
}
|
|
|
|
|
2019-11-22 14:13:19 +01:00
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc__nm_async_shutdown(isc__networker_t *worker, isc__netievent_t *ev0) {
|
2019-12-09 12:24:46 -08:00
|
|
|
UNUSED(ev0);
|
2019-11-22 14:13:19 +01:00
|
|
|
uv_walk(&worker->loop, shutdown_walk_cb, NULL);
|
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
bool
|
2020-02-13 14:44:37 -08:00
|
|
|
isc__nm_acquire_interlocked(isc_nm_t *mgr) {
|
2019-11-05 13:55:54 -08:00
|
|
|
LOCK(&mgr->lock);
|
|
|
|
bool success = atomic_compare_exchange_strong(&mgr->interlocked,
|
2020-02-12 13:59:18 +01:00
|
|
|
&(bool){ false }, true);
|
2019-11-05 13:55:54 -08:00
|
|
|
UNLOCK(&mgr->lock);
|
|
|
|
return (success);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc__nm_drop_interlocked(isc_nm_t *mgr) {
|
2019-11-05 13:55:54 -08:00
|
|
|
LOCK(&mgr->lock);
|
|
|
|
bool success = atomic_compare_exchange_strong(&mgr->interlocked,
|
2020-02-12 13:59:18 +01:00
|
|
|
&(bool){ true }, false);
|
2020-03-30 13:47:58 -07:00
|
|
|
INSIST(success);
|
2019-11-05 13:55:54 -08:00
|
|
|
BROADCAST(&mgr->wkstatecond);
|
|
|
|
UNLOCK(&mgr->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc__nm_acquire_interlocked_force(isc_nm_t *mgr) {
|
2019-11-05 13:55:54 -08:00
|
|
|
LOCK(&mgr->lock);
|
|
|
|
while (!atomic_compare_exchange_strong(&mgr->interlocked,
|
2020-02-13 14:44:37 -08:00
|
|
|
&(bool){ false }, true))
|
|
|
|
{
|
2019-11-05 13:55:54 -08:00
|
|
|
WAIT(&mgr->wkstatecond, &mgr->lock);
|
|
|
|
}
|
|
|
|
UNLOCK(&mgr->lock);
|
|
|
|
}
|
2020-01-05 01:02:12 -08:00
|
|
|
|
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nm_setstats(isc_nm_t *mgr, isc_stats_t *stats) {
|
2020-01-05 01:02:12 -08:00
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
REQUIRE(mgr->stats == NULL);
|
|
|
|
REQUIRE(isc_stats_ncounters(stats) == isc_sockstatscounter_max);
|
|
|
|
|
|
|
|
isc_stats_attach(stats, &mgr->stats);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc__nm_incstats(isc_nm_t *mgr, isc_statscounter_t counterid) {
|
2020-01-05 01:02:12 -08:00
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
REQUIRE(counterid != -1);
|
|
|
|
|
|
|
|
if (mgr->stats != NULL) {
|
|
|
|
isc_stats_increment(mgr->stats, counterid);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc__nm_decstats(isc_nm_t *mgr, isc_statscounter_t counterid) {
|
2020-01-05 01:02:12 -08:00
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
REQUIRE(counterid != -1);
|
|
|
|
|
|
|
|
if (mgr->stats != NULL) {
|
|
|
|
isc_stats_decrement(mgr->stats, counterid);
|
|
|
|
}
|
|
|
|
}
|
2020-07-21 13:29:14 +02:00
|
|
|
|
2020-11-07 20:48:37 +01:00
|
|
|
isc_result_t
|
|
|
|
isc__nm_socket(int domain, int type, int protocol, uv_os_sock_t *sockp) {
|
|
|
|
#ifdef WIN32
|
|
|
|
SOCKET sock;
|
|
|
|
sock = socket(domain, type, protocol);
|
|
|
|
if (sock == INVALID_SOCKET) {
|
|
|
|
char strbuf[ISC_STRERRORSIZE];
|
|
|
|
DWORD socket_errno = WSAGetLastError();
|
|
|
|
switch (socket_errno) {
|
|
|
|
case WSAEMFILE:
|
|
|
|
case WSAENOBUFS:
|
|
|
|
return (ISC_R_NORESOURCES);
|
|
|
|
|
|
|
|
case WSAEPROTONOSUPPORT:
|
|
|
|
case WSAEPFNOSUPPORT:
|
|
|
|
case WSAEAFNOSUPPORT:
|
|
|
|
return (ISC_R_FAMILYNOSUPPORT);
|
|
|
|
default:
|
|
|
|
strerror_r(socket_errno, strbuf, sizeof(strbuf));
|
2020-12-02 22:36:23 +01:00
|
|
|
UNEXPECTED_ERROR(
|
|
|
|
__FILE__, __LINE__,
|
|
|
|
"socket() failed with error code %lu: %s",
|
|
|
|
socket_errno, strbuf);
|
2020-11-07 20:48:37 +01:00
|
|
|
return (ISC_R_UNEXPECTED);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
int sock = socket(domain, type, protocol);
|
|
|
|
if (sock < 0) {
|
|
|
|
return (isc_errno_toresult(errno));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
*sockp = (uv_os_sock_t)sock;
|
|
|
|
return (ISC_R_SUCCESS);
|
|
|
|
}
|
|
|
|
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
void
|
|
|
|
isc__nm_closesocket(uv_os_sock_t sock) {
|
|
|
|
#ifdef WIN32
|
|
|
|
closesocket(sock);
|
|
|
|
#else
|
|
|
|
close(sock);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2020-07-21 13:29:14 +02:00
|
|
|
#define setsockopt_on(socket, level, name) \
|
|
|
|
setsockopt(socket, level, name, &(int){ 1 }, sizeof(int))
|
|
|
|
|
2021-02-11 08:37:52 +01:00
|
|
|
#define setsockopt_off(socket, level, name) \
|
|
|
|
setsockopt(socket, level, name, &(int){ 1 }, sizeof(int))
|
|
|
|
|
2020-07-21 13:29:14 +02:00
|
|
|
isc_result_t
|
2020-10-05 12:25:19 +02:00
|
|
|
isc__nm_socket_freebind(uv_os_sock_t fd, sa_family_t sa_family) {
|
2020-07-21 13:29:14 +02:00
|
|
|
/*
|
|
|
|
* Set the IP_FREEBIND (or equivalent option) on the uv_handle.
|
|
|
|
*/
|
|
|
|
#ifdef IP_FREEBIND
|
2020-10-05 11:17:52 +02:00
|
|
|
UNUSED(sa_family);
|
2020-07-21 13:29:14 +02:00
|
|
|
if (setsockopt_on(fd, IPPROTO_IP, IP_FREEBIND) == -1) {
|
|
|
|
return (ISC_R_FAILURE);
|
|
|
|
}
|
2020-10-05 11:17:52 +02:00
|
|
|
return (ISC_R_SUCCESS);
|
2020-07-21 13:29:14 +02:00
|
|
|
#elif defined(IP_BINDANY) || defined(IPV6_BINDANY)
|
2020-10-05 11:17:52 +02:00
|
|
|
if (sa_family == AF_INET) {
|
2020-07-21 13:29:14 +02:00
|
|
|
#if defined(IP_BINDANY)
|
|
|
|
if (setsockopt_on(fd, IPPROTO_IP, IP_BINDANY) == -1) {
|
|
|
|
return (ISC_R_FAILURE);
|
|
|
|
}
|
2020-10-05 11:17:52 +02:00
|
|
|
return (ISC_R_SUCCESS);
|
2020-07-21 13:29:14 +02:00
|
|
|
#endif
|
2020-10-05 11:17:52 +02:00
|
|
|
} else if (sa_family == AF_INET6) {
|
2020-07-21 13:29:14 +02:00
|
|
|
#if defined(IPV6_BINDANY)
|
|
|
|
if (setsockopt_on(fd, IPPROTO_IPV6, IPV6_BINDANY) == -1) {
|
|
|
|
return (ISC_R_FAILURE);
|
|
|
|
}
|
2020-10-05 11:17:52 +02:00
|
|
|
return (ISC_R_SUCCESS);
|
2020-07-21 13:29:14 +02:00
|
|
|
#endif
|
2020-10-05 11:17:52 +02:00
|
|
|
}
|
|
|
|
return (ISC_R_NOTIMPLEMENTED);
|
2020-07-21 13:29:14 +02:00
|
|
|
#elif defined(SO_BINDANY)
|
2020-10-05 11:17:52 +02:00
|
|
|
UNUSED(sa_family);
|
2020-07-21 13:29:14 +02:00
|
|
|
if (setsockopt_on(fd, SOL_SOCKET, SO_BINDANY) == -1) {
|
|
|
|
return (ISC_R_FAILURE);
|
|
|
|
}
|
2020-10-05 11:17:52 +02:00
|
|
|
return (ISC_R_SUCCESS);
|
2020-07-21 13:29:14 +02:00
|
|
|
#else
|
2020-10-05 11:17:52 +02:00
|
|
|
UNUSED(fd);
|
|
|
|
UNUSED(sa_family);
|
|
|
|
return (ISC_R_NOTIMPLEMENTED);
|
2020-07-21 13:29:14 +02:00
|
|
|
#endif
|
|
|
|
}
|
2020-09-02 17:57:44 +02:00
|
|
|
|
2020-10-05 10:40:02 +02:00
|
|
|
isc_result_t
|
2020-10-05 13:14:04 +02:00
|
|
|
isc__nm_socket_reuse(uv_os_sock_t fd) {
|
2020-10-05 10:40:02 +02:00
|
|
|
/*
|
|
|
|
* Generally, the SO_REUSEADDR socket option allows reuse of
|
2020-10-05 13:14:04 +02:00
|
|
|
* local addresses.
|
2020-10-05 10:40:02 +02:00
|
|
|
*
|
2020-10-05 13:14:04 +02:00
|
|
|
* On the BSDs, SO_REUSEPORT implies SO_REUSEADDR but with some
|
|
|
|
* additional refinements for programs that use multicast.
|
2020-10-05 10:40:02 +02:00
|
|
|
*
|
2020-10-05 13:14:04 +02:00
|
|
|
* On Linux, SO_REUSEPORT has different semantics: it _shares_ the port
|
|
|
|
* rather than steal it from the current listener, so we don't use it
|
|
|
|
* here, but rather in isc__nm_socket_reuse_lb().
|
2020-10-05 10:40:02 +02:00
|
|
|
*
|
2020-10-05 13:14:04 +02:00
|
|
|
* On Windows, it also allows a socket to forcibly bind to a port in use
|
|
|
|
* by another socket.
|
2020-10-05 10:40:02 +02:00
|
|
|
*/
|
2020-10-05 13:14:04 +02:00
|
|
|
|
|
|
|
#if defined(SO_REUSEPORT) && !defined(__linux__)
|
|
|
|
if (setsockopt_on(fd, SOL_SOCKET, SO_REUSEPORT) == -1) {
|
|
|
|
return (ISC_R_FAILURE);
|
|
|
|
}
|
|
|
|
return (ISC_R_SUCCESS);
|
|
|
|
#elif defined(SO_REUSEADDR)
|
2020-10-05 10:40:02 +02:00
|
|
|
if (setsockopt_on(fd, SOL_SOCKET, SO_REUSEADDR) == -1) {
|
|
|
|
return (ISC_R_FAILURE);
|
|
|
|
}
|
2020-10-05 13:14:04 +02:00
|
|
|
return (ISC_R_SUCCESS);
|
|
|
|
#else
|
|
|
|
UNUSED(fd);
|
|
|
|
return (ISC_R_NOTIMPLEMENTED);
|
2020-10-05 10:40:02 +02:00
|
|
|
#endif
|
2020-10-05 13:14:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
isc_result_t
|
|
|
|
isc__nm_socket_reuse_lb(uv_os_sock_t fd) {
|
|
|
|
/*
|
|
|
|
* On FreeBSD 12+, SO_REUSEPORT_LB socket option allows sockets to be
|
|
|
|
* bound to an identical socket address. For UDP sockets, the use of
|
|
|
|
* this option can provide better distribution of incoming datagrams to
|
|
|
|
* multiple processes (or threads) as compared to the traditional
|
|
|
|
* technique of having multiple processes compete to receive datagrams
|
|
|
|
* on the same socket.
|
|
|
|
*
|
|
|
|
* On Linux, the same thing is achieved simply with SO_REUSEPORT.
|
|
|
|
*/
|
2020-10-05 10:40:02 +02:00
|
|
|
#if defined(SO_REUSEPORT_LB)
|
|
|
|
if (setsockopt_on(fd, SOL_SOCKET, SO_REUSEPORT_LB) == -1) {
|
|
|
|
return (ISC_R_FAILURE);
|
|
|
|
} else {
|
2020-10-05 13:14:04 +02:00
|
|
|
return (ISC_R_SUCCESS);
|
2020-10-05 10:40:02 +02:00
|
|
|
}
|
2020-10-05 13:14:04 +02:00
|
|
|
#elif defined(SO_REUSEPORT) && defined(__linux__)
|
2020-10-05 10:40:02 +02:00
|
|
|
if (setsockopt_on(fd, SOL_SOCKET, SO_REUSEPORT) == -1) {
|
|
|
|
return (ISC_R_FAILURE);
|
|
|
|
} else {
|
2020-10-05 13:14:04 +02:00
|
|
|
return (ISC_R_SUCCESS);
|
2020-10-05 10:40:02 +02:00
|
|
|
}
|
2020-10-05 13:14:04 +02:00
|
|
|
#else
|
|
|
|
UNUSED(fd);
|
|
|
|
return (ISC_R_NOTIMPLEMENTED);
|
2020-10-05 10:40:02 +02:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
isc_result_t
|
2020-10-05 12:25:19 +02:00
|
|
|
isc__nm_socket_incoming_cpu(uv_os_sock_t fd) {
|
2020-10-05 10:40:02 +02:00
|
|
|
#ifdef SO_INCOMING_CPU
|
|
|
|
if (setsockopt_on(fd, SOL_SOCKET, SO_INCOMING_CPU) == -1) {
|
|
|
|
return (ISC_R_FAILURE);
|
|
|
|
} else {
|
|
|
|
return (ISC_R_SUCCESS);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
UNUSED(fd);
|
|
|
|
#endif
|
|
|
|
return (ISC_R_NOTIMPLEMENTED);
|
|
|
|
}
|
|
|
|
|
2020-10-05 10:51:40 +02:00
|
|
|
isc_result_t
|
2020-10-05 12:25:19 +02:00
|
|
|
isc__nm_socket_dontfrag(uv_os_sock_t fd, sa_family_t sa_family) {
|
2020-10-05 10:51:40 +02:00
|
|
|
/*
|
|
|
|
* Set the Don't Fragment flag on IP packets
|
|
|
|
*/
|
|
|
|
if (sa_family == AF_INET6) {
|
|
|
|
#if defined(IPV6_DONTFRAG)
|
2021-02-11 08:37:52 +01:00
|
|
|
if (setsockopt_off(fd, IPPROTO_IPV6, IPV6_DONTFRAG) == -1) {
|
|
|
|
return (ISC_R_FAILURE);
|
|
|
|
} else {
|
|
|
|
return (ISC_R_SUCCESS);
|
|
|
|
}
|
|
|
|
#elif defined(IPV6_MTU_DISCOVER) && defined(IP_PMTUDISC_OMIT)
|
|
|
|
if (setsockopt(fd, IPPROTO_IPV6, IPV6_MTU_DISCOVER,
|
|
|
|
&(int){ IP_PMTUDISC_OMIT }, sizeof(int)) == -1)
|
|
|
|
{
|
2020-10-05 10:51:40 +02:00
|
|
|
return (ISC_R_FAILURE);
|
|
|
|
} else {
|
|
|
|
return (ISC_R_SUCCESS);
|
|
|
|
}
|
2021-02-11 08:37:52 +01:00
|
|
|
#elif defined(IPV6_MTU_DISCOVER) && defined(IP_PMTUDISC_DONT)
|
2020-10-05 10:51:40 +02:00
|
|
|
if (setsockopt(fd, IPPROTO_IPV6, IPV6_MTU_DISCOVER,
|
2021-02-11 08:37:52 +01:00
|
|
|
&(int){ IP_PMTUDISC_DONT }, sizeof(int)) == -1)
|
2020-10-05 10:51:40 +02:00
|
|
|
{
|
|
|
|
return (ISC_R_FAILURE);
|
|
|
|
} else {
|
|
|
|
return (ISC_R_SUCCESS);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
UNUSED(fd);
|
|
|
|
#endif
|
|
|
|
} else if (sa_family == AF_INET) {
|
|
|
|
#if defined(IP_DONTFRAG)
|
2021-02-11 08:37:52 +01:00
|
|
|
if (setsockopt_off(fd, IPPROTO_IP, IP_DONTFRAG) == -1) {
|
|
|
|
return (ISC_R_FAILURE);
|
|
|
|
} else {
|
|
|
|
return (ISC_R_SUCCESS);
|
|
|
|
}
|
|
|
|
#elif defined(IP_MTU_DISCOVER) && defined(IP_PMTUDISC_OMIT)
|
|
|
|
if (setsockopt(fd, IPPROTO_IP, IP_MTU_DISCOVER,
|
|
|
|
&(int){ IP_PMTUDISC_OMIT }, sizeof(int)) == -1)
|
|
|
|
{
|
2020-10-05 10:51:40 +02:00
|
|
|
return (ISC_R_FAILURE);
|
|
|
|
} else {
|
|
|
|
return (ISC_R_SUCCESS);
|
|
|
|
}
|
2021-02-11 08:37:52 +01:00
|
|
|
#elif defined(IP_MTU_DISCOVER) && defined(IP_PMTUDISC_DONT)
|
2020-10-05 10:51:40 +02:00
|
|
|
if (setsockopt(fd, IPPROTO_IP, IP_MTU_DISCOVER,
|
2021-02-11 08:37:52 +01:00
|
|
|
&(int){ IP_PMTUDISC_DONT }, sizeof(int)) == -1)
|
2020-10-05 10:51:40 +02:00
|
|
|
{
|
|
|
|
return (ISC_R_FAILURE);
|
|
|
|
} else {
|
|
|
|
return (ISC_R_SUCCESS);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
UNUSED(fd);
|
|
|
|
#endif
|
|
|
|
} else {
|
|
|
|
return (ISC_R_FAMILYNOSUPPORT);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (ISC_R_NOTIMPLEMENTED);
|
|
|
|
}
|
|
|
|
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
#if defined(_WIN32)
|
|
|
|
#define TIMEOUT_TYPE DWORD
|
|
|
|
#define TIMEOUT_DIV 1000
|
|
|
|
#define TIMEOUT_OPTNAME TCP_MAXRT
|
|
|
|
#elif defined(TCP_CONNECTIONTIMEOUT)
|
|
|
|
#define TIMEOUT_TYPE int
|
|
|
|
#define TIMEOUT_DIV 1000
|
|
|
|
#define TIMEOUT_OPTNAME TCP_CONNECTIONTIMEOUT
|
|
|
|
#elif defined(TCP_RXT_CONNDROPTIME)
|
|
|
|
#define TIMEOUT_TYPE int
|
|
|
|
#define TIMEOUT_DIV 1000
|
|
|
|
#define TIMEOUT_OPTNAME TCP_RXT_CONNDROPTIME
|
|
|
|
#elif defined(TCP_USER_TIMEOUT)
|
|
|
|
#define TIMEOUT_TYPE unsigned int
|
|
|
|
#define TIMEOUT_DIV 1
|
|
|
|
#define TIMEOUT_OPTNAME TCP_USER_TIMEOUT
|
2020-12-02 21:54:25 +01:00
|
|
|
#elif defined(TCP_KEEPINIT)
|
|
|
|
#define TIMEOUT_TYPE int
|
|
|
|
#define TIMEOUT_DIV 1000
|
|
|
|
#define TIMEOUT_OPTNAME TCP_KEEPINIT
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
#endif
|
|
|
|
|
|
|
|
isc_result_t
|
|
|
|
isc__nm_socket_connectiontimeout(uv_os_sock_t fd, int timeout_ms) {
|
|
|
|
#if defined(TIMEOUT_OPTNAME)
|
|
|
|
TIMEOUT_TYPE timeout = timeout_ms / TIMEOUT_DIV;
|
|
|
|
|
|
|
|
if (timeout == 0) {
|
|
|
|
timeout = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (setsockopt(fd, IPPROTO_TCP, TIMEOUT_OPTNAME, &timeout,
|
|
|
|
sizeof(timeout)) == -1)
|
|
|
|
{
|
|
|
|
return (ISC_R_FAILURE);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (ISC_R_SUCCESS);
|
|
|
|
#else
|
|
|
|
UNUSED(fd);
|
|
|
|
UNUSED(timeout_ms);
|
|
|
|
|
|
|
|
return (ISC_R_SUCCESS);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2021-02-28 19:33:16 +02:00
|
|
|
isc_result_t
|
|
|
|
isc__nm_socket_tcp_nodelay(uv_os_sock_t fd) {
|
|
|
|
#ifdef TCP_NODELAY
|
|
|
|
if (setsockopt_on(fd, IPPROTO_TCP, TCP_NODELAY) == -1) {
|
|
|
|
return (ISC_R_FAILURE);
|
|
|
|
} else {
|
|
|
|
return (ISC_R_SUCCESS);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
UNUSED(fd);
|
|
|
|
return (ISC_R_SUCCESS);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2020-09-02 17:57:44 +02:00
|
|
|
#ifdef NETMGR_TRACE
|
|
|
|
/*
|
|
|
|
* Dump all active sockets in netmgr. We output to stderr
|
|
|
|
* as the logger might be already shut down.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static const char *
|
|
|
|
nmsocket_type_totext(isc_nmsocket_type type) {
|
|
|
|
switch (type) {
|
|
|
|
case isc_nm_udpsocket:
|
|
|
|
return ("isc_nm_udpsocket");
|
|
|
|
case isc_nm_udplistener:
|
|
|
|
return ("isc_nm_udplistener");
|
|
|
|
case isc_nm_tcpsocket:
|
|
|
|
return ("isc_nm_tcpsocket");
|
|
|
|
case isc_nm_tcplistener:
|
|
|
|
return ("isc_nm_tcplistener");
|
|
|
|
case isc_nm_tcpdnslistener:
|
|
|
|
return ("isc_nm_tcpdnslistener");
|
|
|
|
case isc_nm_tcpdnssocket:
|
|
|
|
return ("isc_nm_tcpdnssocket");
|
2021-01-25 17:44:39 +02:00
|
|
|
case isc_nm_tlssocket:
|
|
|
|
return ("isc_nm_tlssocket");
|
|
|
|
case isc_nm_tlslistener:
|
|
|
|
return ("isc_nm_tlslistener");
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
case isc_nm_tlsdnslistener:
|
|
|
|
return ("isc_nm_tlsdnslistener");
|
|
|
|
case isc_nm_tlsdnssocket:
|
|
|
|
return ("isc_nm_tlsdnssocket");
|
2020-12-07 14:19:10 +02:00
|
|
|
case isc_nm_httplistener:
|
|
|
|
return ("isc_nm_httplistener");
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
case isc_nm_httpsocket:
|
|
|
|
return ("isc_nm_httpsocket");
|
2020-09-02 17:57:44 +02:00
|
|
|
default:
|
|
|
|
INSIST(0);
|
|
|
|
ISC_UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nmhandle_dump(isc_nmhandle_t *handle) {
|
2021-01-29 13:00:46 +01:00
|
|
|
fprintf(stderr, "Active handle %p, refs %" PRIuFAST32 "\n", handle,
|
2020-09-02 17:57:44 +02:00
|
|
|
isc_refcount_current(&handle->references));
|
|
|
|
fprintf(stderr, "Created by:\n");
|
|
|
|
backtrace_symbols_fd(handle->backtrace, handle->backtrace_size,
|
|
|
|
STDERR_FILENO);
|
|
|
|
fprintf(stderr, "\n\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nmsocket_dump(isc_nmsocket_t *sock) {
|
2020-10-21 12:52:09 +02:00
|
|
|
isc_nmhandle_t *handle = NULL;
|
|
|
|
|
2020-09-02 17:57:44 +02:00
|
|
|
LOCK(&sock->lock);
|
|
|
|
fprintf(stderr, "\n=================\n");
|
2021-01-29 13:00:46 +01:00
|
|
|
fprintf(stderr, "Active %s socket %p, type %s, refs %" PRIuFAST32 "\n",
|
|
|
|
atomic_load(&sock->client) ? "client" : "server", sock,
|
2020-09-02 17:57:44 +02:00
|
|
|
nmsocket_type_totext(sock->type),
|
|
|
|
isc_refcount_current(&sock->references));
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
fprintf(stderr,
|
|
|
|
"Parent %p, listener %p, server %p, statichandle = %p\n",
|
|
|
|
sock->parent, sock->listener, sock->server, sock->statichandle);
|
2021-01-29 13:00:46 +01:00
|
|
|
fprintf(stderr, "Flags:%s%s%s%s%s\n",
|
|
|
|
atomic_load(&sock->active) ? " active" : "",
|
|
|
|
atomic_load(&sock->closing) ? " closing" : "",
|
|
|
|
atomic_load(&sock->destroying) ? " destroying" : "",
|
|
|
|
atomic_load(&sock->connecting) ? " connecting" : "",
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
sock->accepting ? " accepting" : "");
|
2020-09-02 17:57:44 +02:00
|
|
|
fprintf(stderr, "Created by:\n");
|
|
|
|
backtrace_symbols_fd(sock->backtrace, sock->backtrace_size,
|
|
|
|
STDERR_FILENO);
|
|
|
|
fprintf(stderr, "\n");
|
2020-10-21 12:52:09 +02:00
|
|
|
|
2020-09-02 17:57:44 +02:00
|
|
|
for (handle = ISC_LIST_HEAD(sock->active_handles); handle != NULL;
|
|
|
|
handle = ISC_LIST_NEXT(handle, active_link))
|
|
|
|
{
|
2020-10-21 12:52:09 +02:00
|
|
|
static bool first = true;
|
|
|
|
if (first) {
|
|
|
|
fprintf(stderr, "Active handles:\n");
|
|
|
|
first = false;
|
|
|
|
}
|
2020-09-02 17:57:44 +02:00
|
|
|
nmhandle_dump(handle);
|
|
|
|
}
|
2020-10-21 12:52:09 +02:00
|
|
|
|
2020-09-02 17:57:44 +02:00
|
|
|
fprintf(stderr, "\n");
|
|
|
|
UNLOCK(&sock->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_dump_active(isc_nm_t *nm) {
|
2020-10-21 12:52:09 +02:00
|
|
|
isc_nmsocket_t *sock = NULL;
|
|
|
|
|
2020-09-02 17:57:44 +02:00
|
|
|
REQUIRE(VALID_NM(nm));
|
2020-10-21 12:52:09 +02:00
|
|
|
|
2020-09-02 17:57:44 +02:00
|
|
|
LOCK(&nm->lock);
|
|
|
|
for (sock = ISC_LIST_HEAD(nm->active_sockets); sock != NULL;
|
|
|
|
sock = ISC_LIST_NEXT(sock, active_link))
|
|
|
|
{
|
2020-10-21 12:52:09 +02:00
|
|
|
static bool first = true;
|
|
|
|
if (first) {
|
|
|
|
fprintf(stderr, "Outstanding sockets\n");
|
|
|
|
first = false;
|
|
|
|
}
|
2020-09-02 17:57:44 +02:00
|
|
|
nmsocket_dump(sock);
|
|
|
|
}
|
|
|
|
UNLOCK(&nm->lock);
|
|
|
|
}
|
|
|
|
#endif
|