2019-11-05 13:55:54 -08:00
|
|
|
/*
|
|
|
|
* Copyright (C) Internet Systems Consortium, Inc. ("ISC")
|
|
|
|
*
|
2021-06-03 08:37:05 +02:00
|
|
|
* SPDX-License-Identifier: MPL-2.0
|
|
|
|
*
|
2019-11-05 13:55:54 -08:00
|
|
|
* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
2020-09-14 16:20:40 -07:00
|
|
|
* file, you can obtain one at https://mozilla.org/MPL/2.0/.
|
2019-11-05 13:55:54 -08:00
|
|
|
*
|
|
|
|
* See the COPYRIGHT file distributed with this work for additional
|
|
|
|
* information regarding copyright ownership.
|
|
|
|
*/
|
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
#include <assert.h>
|
2019-11-05 13:55:54 -08:00
|
|
|
#include <inttypes.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
#include <isc/async.h>
|
2019-11-05 13:55:54 -08:00
|
|
|
#include <isc/atomic.h>
|
2021-04-27 16:20:03 +02:00
|
|
|
#include <isc/backtrace.h>
|
2021-05-05 11:51:39 +02:00
|
|
|
#include <isc/barrier.h>
|
2019-11-05 13:55:54 -08:00
|
|
|
#include <isc/buffer.h>
|
2020-11-07 20:48:37 +01:00
|
|
|
#include <isc/errno.h>
|
2023-03-23 08:55:29 +01:00
|
|
|
#include <isc/job.h>
|
2022-02-22 23:40:39 +01:00
|
|
|
#include <isc/list.h>
|
2020-12-17 11:40:29 +01:00
|
|
|
#include <isc/log.h>
|
2022-07-26 13:03:45 +02:00
|
|
|
#include <isc/loop.h>
|
2019-11-05 13:55:54 -08:00
|
|
|
#include <isc/magic.h>
|
|
|
|
#include <isc/mem.h>
|
2023-03-16 12:50:04 +02:00
|
|
|
#include <isc/netaddr.h>
|
2019-11-05 13:55:54 -08:00
|
|
|
#include <isc/netmgr.h>
|
2020-02-12 13:59:18 +01:00
|
|
|
#include <isc/quota.h>
|
2019-11-05 13:55:54 -08:00
|
|
|
#include <isc/random.h>
|
|
|
|
#include <isc/refcount.h>
|
|
|
|
#include <isc/region.h>
|
|
|
|
#include <isc/result.h>
|
|
|
|
#include <isc/sockaddr.h>
|
2020-01-05 01:02:12 -08:00
|
|
|
#include <isc/stats.h>
|
2020-11-07 20:48:37 +01:00
|
|
|
#include <isc/strerr.h>
|
2019-11-05 13:55:54 -08:00
|
|
|
#include <isc/thread.h>
|
2022-07-26 13:03:45 +02:00
|
|
|
#include <isc/tid.h>
|
2020-12-17 11:40:29 +01:00
|
|
|
#include <isc/tls.h>
|
2019-11-05 13:55:54 -08:00
|
|
|
#include <isc/util.h>
|
2022-04-27 17:41:47 +02:00
|
|
|
#include <isc/uv.h>
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
#include "../loop_p.h"
|
2019-11-05 13:55:54 -08:00
|
|
|
#include "netmgr-int.h"
|
2020-12-17 11:40:29 +01:00
|
|
|
#include "openssl_shim.h"
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2020-01-05 01:02:12 -08:00
|
|
|
/*%
|
|
|
|
* Shortcut index arrays to get access to statistics counters.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static const isc_statscounter_t udp4statsindex[] = {
|
|
|
|
isc_sockstatscounter_udp4open,
|
|
|
|
isc_sockstatscounter_udp4openfail,
|
|
|
|
isc_sockstatscounter_udp4close,
|
|
|
|
isc_sockstatscounter_udp4bindfail,
|
|
|
|
isc_sockstatscounter_udp4connectfail,
|
|
|
|
isc_sockstatscounter_udp4connect,
|
|
|
|
-1,
|
|
|
|
-1,
|
|
|
|
isc_sockstatscounter_udp4sendfail,
|
|
|
|
isc_sockstatscounter_udp4recvfail,
|
2024-01-02 16:28:46 +03:00
|
|
|
isc_sockstatscounter_udp4active,
|
|
|
|
-1,
|
2020-01-05 01:02:12 -08:00
|
|
|
};
|
|
|
|
|
|
|
|
static const isc_statscounter_t udp6statsindex[] = {
|
|
|
|
isc_sockstatscounter_udp6open,
|
|
|
|
isc_sockstatscounter_udp6openfail,
|
|
|
|
isc_sockstatscounter_udp6close,
|
|
|
|
isc_sockstatscounter_udp6bindfail,
|
|
|
|
isc_sockstatscounter_udp6connectfail,
|
|
|
|
isc_sockstatscounter_udp6connect,
|
|
|
|
-1,
|
|
|
|
-1,
|
|
|
|
isc_sockstatscounter_udp6sendfail,
|
|
|
|
isc_sockstatscounter_udp6recvfail,
|
2024-01-02 16:28:46 +03:00
|
|
|
isc_sockstatscounter_udp6active,
|
|
|
|
-1,
|
2020-01-05 01:02:12 -08:00
|
|
|
};
|
|
|
|
|
|
|
|
static const isc_statscounter_t tcp4statsindex[] = {
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_sockstatscounter_tcp4open, isc_sockstatscounter_tcp4openfail,
|
|
|
|
isc_sockstatscounter_tcp4close, isc_sockstatscounter_tcp4bindfail,
|
|
|
|
isc_sockstatscounter_tcp4connectfail, isc_sockstatscounter_tcp4connect,
|
|
|
|
isc_sockstatscounter_tcp4acceptfail, isc_sockstatscounter_tcp4accept,
|
|
|
|
isc_sockstatscounter_tcp4sendfail, isc_sockstatscounter_tcp4recvfail,
|
2024-01-02 16:28:46 +03:00
|
|
|
isc_sockstatscounter_tcp4active, isc_sockstatscounter_tcp4clients,
|
2020-01-05 01:02:12 -08:00
|
|
|
};
|
|
|
|
|
|
|
|
static const isc_statscounter_t tcp6statsindex[] = {
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_sockstatscounter_tcp6open, isc_sockstatscounter_tcp6openfail,
|
|
|
|
isc_sockstatscounter_tcp6close, isc_sockstatscounter_tcp6bindfail,
|
|
|
|
isc_sockstatscounter_tcp6connectfail, isc_sockstatscounter_tcp6connect,
|
|
|
|
isc_sockstatscounter_tcp6acceptfail, isc_sockstatscounter_tcp6accept,
|
|
|
|
isc_sockstatscounter_tcp6sendfail, isc_sockstatscounter_tcp6recvfail,
|
2024-01-02 16:28:46 +03:00
|
|
|
isc_sockstatscounter_tcp6active, isc_sockstatscounter_tcp6clients,
|
2020-01-05 01:02:12 -08:00
|
|
|
};
|
|
|
|
|
2020-02-14 08:14:03 +01:00
|
|
|
static void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
nmsocket_maybe_destroy(isc_nmsocket_t *sock FLARG);
|
2020-02-14 08:14:03 +01:00
|
|
|
static void
|
|
|
|
nmhandle_free(isc_nmsocket_t *sock, isc_nmhandle_t *handle);
|
2022-02-22 23:40:39 +01:00
|
|
|
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
/*%<
|
|
|
|
* Issue a 'handle closed' callback on the socket.
|
|
|
|
*/
|
2020-10-12 17:51:09 +11:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
static void
|
|
|
|
shutdown_walk_cb(uv_handle_t *handle, void *arg);
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
static void
|
|
|
|
networker_teardown(void *arg) {
|
|
|
|
isc__networker_t *worker = arg;
|
|
|
|
isc_loop_t *loop = worker->loop;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
worker->shuttingdown = true;
|
|
|
|
|
2022-12-07 09:45:34 +01:00
|
|
|
isc__netmgr_log(worker->netmgr, ISC_LOG_DEBUG(1),
|
|
|
|
"Shutting down network manager worker on loop %p(%d)",
|
|
|
|
loop, isc_tid());
|
2022-07-26 13:03:45 +02:00
|
|
|
|
|
|
|
uv_walk(&loop->loop, shutdown_walk_cb, NULL);
|
|
|
|
|
|
|
|
isc__networker_detach(&worker);
|
2021-05-22 18:12:11 +02:00
|
|
|
}
|
|
|
|
|
2021-05-27 09:45:07 +02:00
|
|
|
static void
|
2022-07-26 13:03:45 +02:00
|
|
|
netmgr_teardown(void *arg) {
|
|
|
|
isc_nm_t *netmgr = (void *)arg;
|
|
|
|
|
2023-03-24 13:37:19 +01:00
|
|
|
if (atomic_compare_exchange_strong_acq_rel(&netmgr->shuttingdown,
|
|
|
|
&(bool){ false }, true))
|
2022-07-26 13:03:45 +02:00
|
|
|
{
|
2022-12-07 09:45:34 +01:00
|
|
|
isc__netmgr_log(netmgr, ISC_LOG_DEBUG(1),
|
|
|
|
"Shutting down network manager");
|
2021-05-27 09:45:07 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-25 14:43:14 +02:00
|
|
|
#if HAVE_DECL_UV_UDP_LINUX_RECVERR
|
|
|
|
#define MINIMAL_UV_VERSION UV_VERSION(1, 42, 0)
|
|
|
|
#elif HAVE_DECL_UV_UDP_MMSG_FREE
|
|
|
|
#define MINIMAL_UV_VERSION UV_VERSION(1, 40, 0)
|
|
|
|
#elif HAVE_DECL_UV_UDP_RECVMMSG
|
2023-02-08 09:29:54 +01:00
|
|
|
#define MAXIMAL_UV_VERSION UV_VERSION(1, 39, 99)
|
2022-04-25 14:43:14 +02:00
|
|
|
#define MINIMAL_UV_VERSION UV_VERSION(1, 37, 0)
|
|
|
|
#else
|
2023-02-08 09:29:54 +01:00
|
|
|
#define MAXIMAL_UV_VERSION UV_VERSION(1, 34, 99)
|
2023-02-08 09:28:09 +01:00
|
|
|
#define MINIMAL_UV_VERSION UV_VERSION(1, 34, 0)
|
2022-04-25 14:43:14 +02:00
|
|
|
#endif
|
|
|
|
|
2021-04-27 00:07:43 +02:00
|
|
|
void
|
2022-07-26 13:03:45 +02:00
|
|
|
isc_netmgr_create(isc_mem_t *mctx, isc_loopmgr_t *loopmgr, isc_nm_t **netmgrp) {
|
|
|
|
isc_nm_t *netmgr = NULL;
|
Refactor taskmgr to run on top of netmgr
This commit changes the taskmgr to run the individual tasks on the
netmgr internal workers. While an effort has been put into keeping the
taskmgr interface intact, couple of changes have been made:
* The taskmgr has no concept of universal privileged mode - rather the
tasks are either privileged or unprivileged (normal). The privileged
tasks are run as a first thing when the netmgr is unpaused. There
are now four different queues in in the netmgr:
1. priority queue - netievent on the priority queue are run even when
the taskmgr enter exclusive mode and netmgr is paused. This is
needed to properly start listening on the interfaces, free
resources and resume.
2. privileged task queue - only privileged tasks are queued here and
this is the first queue that gets processed when network manager
is unpaused using isc_nm_resume(). All netmgr workers need to
clean the privileged task queue before they all proceed normal
operation. Both task queues are processed when the workers are
finished.
3. task queue - only (traditional) task are scheduled here and this
queue along with privileged task queues are process when the
netmgr workers are finishing. This is needed to process the task
shutdown events.
4. normal queue - this is the queue with netmgr events, e.g. reading,
sending, callbacks and pretty much everything is processed here.
* The isc_taskmgr_create() now requires initialized netmgr (isc_nm_t)
object.
* The isc_nm_destroy() function now waits for indefinite time, but it
will print out the active objects when in tracing mode
(-DNETMGR_TRACE=1 and -DNETMGR_TRACE_VERBOSE=1), the netmgr has been
made a little bit more asynchronous and it might take longer time to
shutdown all the active networking connections.
* Previously, the isc_nm_stoplistening() was a synchronous operation.
This has been changed and the isc_nm_stoplistening() just schedules
the child sockets to stop listening and exits. This was needed to
prevent a deadlock as the the (traditional) tasks are now executed on
the netmgr threads.
* The socket selection logic in isc__nm_udp_send() was flawed, but
fortunatelly, it was broken, so we never hit the problem where we
created uvreq_t on a socket from nmhandle_t, but then a different
socket could be picked up and then we were trying to run the send
callback on a socket that had different threadid than currently
running.
2021-04-09 11:31:19 +02:00
|
|
|
|
2023-02-08 09:29:54 +01:00
|
|
|
#ifdef MAXIMAL_UV_VERSION
|
|
|
|
if (uv_version() > MAXIMAL_UV_VERSION) {
|
|
|
|
FATAL_ERROR("libuv version too new: running with libuv %s "
|
|
|
|
"when compiled with libuv %s will lead to "
|
|
|
|
"libuv failures",
|
|
|
|
uv_version_string(), UV_VERSION_STRING);
|
|
|
|
}
|
|
|
|
#endif /* MAXIMAL_UV_VERSION */
|
|
|
|
|
2022-04-25 14:43:14 +02:00
|
|
|
if (uv_version() < MINIMAL_UV_VERSION) {
|
2022-10-14 17:18:07 +01:00
|
|
|
FATAL_ERROR("libuv version too old: running with libuv %s "
|
|
|
|
"when compiled with libuv %s will lead to "
|
2023-02-08 09:29:54 +01:00
|
|
|
"libuv failures",
|
2022-10-14 17:18:07 +01:00
|
|
|
uv_version_string(), UV_VERSION_STRING);
|
2022-04-25 14:43:14 +02:00
|
|
|
}
|
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
netmgr = isc_mem_get(mctx, sizeof(*netmgr));
|
|
|
|
*netmgr = (isc_nm_t){
|
|
|
|
.loopmgr = loopmgr,
|
|
|
|
.nloops = isc_loopmgr_nloops(loopmgr),
|
|
|
|
};
|
|
|
|
|
|
|
|
isc_mem_attach(mctx, &netmgr->mctx);
|
|
|
|
isc_refcount_init(&netmgr->references, 1);
|
|
|
|
atomic_init(&netmgr->maxudp, 0);
|
|
|
|
atomic_init(&netmgr->shuttingdown, false);
|
|
|
|
atomic_init(&netmgr->recv_tcp_buffer_size, 0);
|
|
|
|
atomic_init(&netmgr->send_tcp_buffer_size, 0);
|
|
|
|
atomic_init(&netmgr->recv_udp_buffer_size, 0);
|
|
|
|
atomic_init(&netmgr->send_udp_buffer_size, 0);
|
2022-04-01 14:43:14 +02:00
|
|
|
#if HAVE_SO_REUSEPORT_LB
|
2022-07-26 13:03:45 +02:00
|
|
|
netmgr->load_balance_sockets = true;
|
2022-04-01 14:43:14 +02:00
|
|
|
#else
|
2022-07-26 13:03:45 +02:00
|
|
|
netmgr->load_balance_sockets = false;
|
2022-04-01 14:43:14 +02:00
|
|
|
#endif
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2019-11-20 22:33:35 +01:00
|
|
|
/*
|
|
|
|
* Default TCP timeout values.
|
2019-11-21 17:08:06 -08:00
|
|
|
* May be updated by isc_nm_tcptimeouts().
|
2019-11-20 22:33:35 +01:00
|
|
|
*/
|
2022-07-26 13:03:45 +02:00
|
|
|
atomic_init(&netmgr->init, 30000);
|
|
|
|
atomic_init(&netmgr->idle, 30000);
|
|
|
|
atomic_init(&netmgr->keepalive, 30000);
|
|
|
|
atomic_init(&netmgr->advertised, 30000);
|
2025-02-18 14:44:29 +00:00
|
|
|
atomic_init(&netmgr->primaries, 30000);
|
2019-11-20 22:33:35 +01:00
|
|
|
|
2023-08-23 08:56:31 +02:00
|
|
|
netmgr->workers = isc_mem_cget(mctx, netmgr->nloops,
|
|
|
|
sizeof(netmgr->workers[0]));
|
2021-05-05 11:51:39 +02:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
isc_loopmgr_teardown(loopmgr, netmgr_teardown, netmgr);
|
2022-02-15 14:51:02 +01:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
netmgr->magic = NM_MAGIC;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
for (size_t i = 0; i < netmgr->nloops; i++) {
|
|
|
|
isc_loop_t *loop = isc_loop_get(netmgr->loopmgr, i);
|
|
|
|
isc__networker_t *worker = &netmgr->workers[i];
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
*worker = (isc__networker_t){
|
|
|
|
.recvbuf = isc_mem_get(loop->mctx,
|
|
|
|
ISC_NETMGR_RECVBUF_SIZE),
|
2023-01-03 08:27:54 +01:00
|
|
|
.active_sockets = ISC_LIST_INITIALIZER,
|
2022-07-26 13:03:45 +02:00
|
|
|
};
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
isc_nm_attach(netmgr, &worker->netmgr);
|
2021-05-05 11:51:39 +02:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
isc_mem_attach(loop->mctx, &worker->mctx);
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2023-09-12 19:13:45 +02:00
|
|
|
isc_mempool_create(worker->mctx, sizeof(isc_nmsocket_t),
|
|
|
|
&worker->nmsocket_pool);
|
|
|
|
isc_mempool_setfreemax(worker->nmsocket_pool,
|
|
|
|
ISC_NM_NMSOCKET_MAX);
|
|
|
|
|
2023-01-04 15:57:00 +01:00
|
|
|
isc_mempool_create(worker->mctx, sizeof(isc__nm_uvreq_t),
|
|
|
|
&worker->uvreq_pool);
|
2023-08-16 16:55:02 +02:00
|
|
|
isc_mempool_setfreemax(worker->uvreq_pool, ISC_NM_UVREQS_MAX);
|
2023-01-04 15:57:00 +01:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
isc_loop_attach(loop, &worker->loop);
|
|
|
|
isc_loop_teardown(loop, networker_teardown, worker);
|
|
|
|
isc_refcount_init(&worker->references, 1);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
*netmgrp = netmgr;
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free the resources of the network manager.
|
|
|
|
*/
|
|
|
|
static void
|
2020-02-13 14:44:37 -08:00
|
|
|
nm_destroy(isc_nm_t **mgr0) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NM(*mgr0));
|
|
|
|
|
|
|
|
isc_nm_t *mgr = *mgr0;
|
2020-02-08 04:37:54 -08:00
|
|
|
*mgr0 = NULL;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2019-12-10 10:47:08 +01:00
|
|
|
isc_refcount_destroy(&mgr->references);
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
mgr->magic = 0;
|
|
|
|
|
2020-01-05 01:02:12 -08:00
|
|
|
if (mgr->stats != NULL) {
|
|
|
|
isc_stats_detach(&mgr->stats);
|
|
|
|
}
|
|
|
|
|
2023-08-23 08:56:31 +02:00
|
|
|
isc_mem_cput(mgr->mctx, mgr->workers, mgr->nloops,
|
|
|
|
sizeof(mgr->workers[0]));
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_mem_putanddetach(&mgr->mctx, mgr, sizeof(*mgr));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nm_attach(isc_nm_t *mgr, isc_nm_t **dst) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
REQUIRE(dst != NULL && *dst == NULL);
|
|
|
|
|
2020-01-14 09:43:37 +01:00
|
|
|
isc_refcount_increment(&mgr->references);
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
*dst = mgr;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nm_detach(isc_nm_t **mgr0) {
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_nm_t *mgr = NULL;
|
|
|
|
|
|
|
|
REQUIRE(mgr0 != NULL);
|
|
|
|
REQUIRE(VALID_NM(*mgr0));
|
|
|
|
|
|
|
|
mgr = *mgr0;
|
|
|
|
*mgr0 = NULL;
|
|
|
|
|
2020-01-14 09:43:37 +01:00
|
|
|
if (isc_refcount_decrement(&mgr->references) == 1) {
|
2019-11-05 13:55:54 -08:00
|
|
|
nm_destroy(&mgr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-22 15:57:42 -08:00
|
|
|
void
|
2022-07-26 13:03:45 +02:00
|
|
|
isc_netmgr_destroy(isc_nm_t **netmgrp) {
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_nm_t *mgr = NULL;
|
|
|
|
|
2021-04-27 00:07:43 +02:00
|
|
|
REQUIRE(VALID_NM(*netmgrp));
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2021-04-27 00:07:43 +02:00
|
|
|
mgr = *netmgrp;
|
2022-07-26 13:03:45 +02:00
|
|
|
*netmgrp = NULL;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
REQUIRE(isc_refcount_decrement(&mgr->references) == 1);
|
|
|
|
nm_destroy(&mgr);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nm_maxudp(isc_nm_t *mgr, uint32_t maxudp) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
|
2023-03-24 13:37:19 +01:00
|
|
|
atomic_store_relaxed(&mgr->maxudp, maxudp);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2022-02-09 19:48:13 +01:00
|
|
|
void
|
|
|
|
isc_nmhandle_setwritetimeout(isc_nmhandle_t *handle, uint64_t write_timeout) {
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
2022-07-26 13:03:45 +02:00
|
|
|
REQUIRE(handle->sock->tid == isc_tid());
|
2022-06-23 20:18:58 +03:00
|
|
|
|
|
|
|
switch (handle->sock->type) {
|
|
|
|
case isc_nm_tcpsocket:
|
|
|
|
case isc_nm_udpsocket:
|
|
|
|
handle->sock->write_timeout = write_timeout;
|
|
|
|
break;
|
|
|
|
case isc_nm_tlssocket:
|
|
|
|
isc__nmhandle_tls_setwritetimeout(handle, write_timeout);
|
|
|
|
break;
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
|
|
|
isc__nmhandle_streamdns_setwritetimeout(handle, write_timeout);
|
|
|
|
break;
|
2023-03-16 12:50:04 +02:00
|
|
|
case isc_nm_proxystreamsocket:
|
|
|
|
isc__nmhandle_proxystream_setwritetimeout(handle,
|
|
|
|
write_timeout);
|
|
|
|
break;
|
2023-07-12 15:25:38 +03:00
|
|
|
case isc_nm_proxyudpsocket:
|
|
|
|
isc__nmhandle_proxyudp_setwritetimeout(handle, write_timeout);
|
|
|
|
break;
|
2022-06-23 20:18:58 +03:00
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
break;
|
|
|
|
}
|
2022-02-09 19:48:13 +01:00
|
|
|
}
|
|
|
|
|
2019-11-20 22:33:35 +01:00
|
|
|
void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc_nm_settimeouts(isc_nm_t *mgr, uint32_t init, uint32_t idle,
|
2025-02-18 14:44:29 +00:00
|
|
|
uint32_t keepalive, uint32_t advertised,
|
|
|
|
uint32_t primaries) {
|
2019-11-20 22:33:35 +01:00
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
|
2023-03-24 13:37:19 +01:00
|
|
|
atomic_store_relaxed(&mgr->init, init);
|
|
|
|
atomic_store_relaxed(&mgr->idle, idle);
|
|
|
|
atomic_store_relaxed(&mgr->keepalive, keepalive);
|
|
|
|
atomic_store_relaxed(&mgr->advertised, advertised);
|
2025-02-18 14:44:29 +00:00
|
|
|
atomic_store_relaxed(&mgr->primaries, primaries);
|
2019-11-20 22:33:35 +01:00
|
|
|
}
|
|
|
|
|
2020-12-02 20:51:38 +01:00
|
|
|
void
|
|
|
|
isc_nm_setnetbuffers(isc_nm_t *mgr, int32_t recv_tcp, int32_t send_tcp,
|
|
|
|
int32_t recv_udp, int32_t send_udp) {
|
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
|
2023-03-24 13:37:19 +01:00
|
|
|
atomic_store_relaxed(&mgr->recv_tcp_buffer_size, recv_tcp);
|
|
|
|
atomic_store_relaxed(&mgr->send_tcp_buffer_size, send_tcp);
|
|
|
|
atomic_store_relaxed(&mgr->recv_udp_buffer_size, recv_udp);
|
|
|
|
atomic_store_relaxed(&mgr->send_udp_buffer_size, send_udp);
|
2020-12-02 20:51:38 +01:00
|
|
|
}
|
|
|
|
|
2022-04-05 01:20:13 +02:00
|
|
|
bool
|
|
|
|
isc_nm_getloadbalancesockets(isc_nm_t *mgr) {
|
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return mgr->load_balance_sockets;
|
2022-04-05 01:20:13 +02:00
|
|
|
}
|
|
|
|
|
2022-04-01 14:43:14 +02:00
|
|
|
void
|
2023-01-10 10:20:44 +01:00
|
|
|
isc_nm_setloadbalancesockets(isc_nm_t *mgr, ISC_ATTR_UNUSED bool enabled) {
|
2022-04-01 14:43:14 +02:00
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
|
|
|
|
#if HAVE_SO_REUSEPORT_LB
|
|
|
|
mgr->load_balance_sockets = enabled;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2019-11-20 22:33:35 +01:00
|
|
|
void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc_nm_gettimeouts(isc_nm_t *mgr, uint32_t *initial, uint32_t *idle,
|
2025-02-18 14:44:29 +00:00
|
|
|
uint32_t *keepalive, uint32_t *advertised,
|
|
|
|
uint32_t *primaries) {
|
2019-11-20 22:33:35 +01:00
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
|
2023-04-06 16:32:16 +01:00
|
|
|
SET_IF_NOT_NULL(initial, atomic_load_relaxed(&mgr->init));
|
2019-11-20 22:33:35 +01:00
|
|
|
|
2023-04-06 16:32:16 +01:00
|
|
|
SET_IF_NOT_NULL(idle, atomic_load_relaxed(&mgr->idle));
|
2019-11-20 22:33:35 +01:00
|
|
|
|
2023-04-06 16:32:16 +01:00
|
|
|
SET_IF_NOT_NULL(keepalive, atomic_load_relaxed(&mgr->keepalive));
|
2019-11-20 22:33:35 +01:00
|
|
|
|
2023-04-06 16:32:16 +01:00
|
|
|
SET_IF_NOT_NULL(advertised, atomic_load_relaxed(&mgr->advertised));
|
2025-02-18 14:44:29 +00:00
|
|
|
|
|
|
|
SET_IF_NOT_NULL(primaries, atomic_load_relaxed(&mgr->primaries));
|
2019-11-20 22:33:35 +01:00
|
|
|
}
|
|
|
|
|
2020-01-16 11:52:58 +01:00
|
|
|
bool
|
2020-02-13 14:44:37 -08:00
|
|
|
isc__nmsocket_active(isc_nmsocket_t *sock) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
2020-10-22 10:07:56 +02:00
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return sock->active;
|
2020-10-22 10:07:56 +02:00
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc___nmsocket_attach(isc_nmsocket_t *sock, isc_nmsocket_t **target FLARG) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(target != NULL && *target == NULL);
|
|
|
|
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc_nmsocket_t *rsock = NULL;
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
if (sock->parent != NULL) {
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
rsock = sock->parent;
|
|
|
|
INSIST(rsock->parent == NULL); /* sanity check */
|
2019-11-05 13:55:54 -08:00
|
|
|
} else {
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
rsock = sock;
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2021-01-29 13:00:46 +01:00
|
|
|
NETMGR_TRACE_LOG("isc__nmsocket_attach():%p->references = %" PRIuFAST32
|
|
|
|
"\n",
|
|
|
|
rsock, isc_refcount_current(&rsock->references) + 1);
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
|
|
|
|
isc_refcount_increment0(&rsock->references);
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
*target = sock;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free all resources inside a socket (including its children if any).
|
|
|
|
*/
|
|
|
|
static void
|
2023-03-23 09:47:47 +01:00
|
|
|
nmsocket_cleanup(void *arg) {
|
|
|
|
isc_nmsocket_t *sock = arg;
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(!isc__nmsocket_active(sock));
|
|
|
|
|
2023-01-03 08:27:54 +01:00
|
|
|
isc__networker_t *worker = sock->worker;
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
|
2022-08-29 13:42:14 +02:00
|
|
|
isc_refcount_destroy(&sock->references);
|
|
|
|
|
2021-10-02 16:26:43 -07:00
|
|
|
isc__nm_decstats(sock, STATID_ACTIVE);
|
|
|
|
|
2023-03-24 13:37:19 +01:00
|
|
|
REQUIRE(!sock->destroying);
|
|
|
|
sock->destroying = true;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
if (sock->parent == NULL && sock->children != NULL) {
|
|
|
|
/*
|
|
|
|
* We shouldn't be here unless there are no active handles,
|
|
|
|
* so we can clean up and free the children.
|
|
|
|
*/
|
2020-12-03 17:58:10 +01:00
|
|
|
for (size_t i = 0; i < sock->nchildren; i++) {
|
2023-01-03 08:27:54 +01:00
|
|
|
isc_refcount_decrementz(&sock->children[i].references);
|
|
|
|
nmsocket_cleanup(&sock->children[i]);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2021-05-05 11:51:39 +02:00
|
|
|
/*
|
|
|
|
* Now free them.
|
2019-11-05 13:55:54 -08:00
|
|
|
*/
|
2023-08-23 08:56:31 +02:00
|
|
|
isc_mem_cput(sock->worker->mctx, sock->children,
|
|
|
|
sock->nchildren, sizeof(*sock));
|
2019-11-05 13:55:54 -08:00
|
|
|
sock->children = NULL;
|
|
|
|
sock->nchildren = 0;
|
|
|
|
}
|
|
|
|
|
2020-06-10 11:32:39 +02:00
|
|
|
sock->statichandle = NULL;
|
2020-06-04 23:13:54 -07:00
|
|
|
|
|
|
|
if (sock->outerhandle != NULL) {
|
2023-01-17 13:58:10 -08:00
|
|
|
isc_nmhandle_detach(&sock->outerhandle);
|
2020-06-04 23:13:54 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (sock->outer != NULL) {
|
2023-01-17 13:58:10 -08:00
|
|
|
isc__nmsocket_detach(&sock->outer);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2025-03-22 15:26:16 -07:00
|
|
|
ISC_LIST_FOREACH_SAFE (sock->inactive_handles, handle, inactive_link) {
|
2023-01-04 15:57:00 +01:00
|
|
|
ISC_LIST_DEQUEUE(sock->inactive_handles, handle, inactive_link);
|
2019-11-05 13:55:54 -08:00
|
|
|
nmhandle_free(sock, handle);
|
|
|
|
}
|
|
|
|
|
2023-04-11 07:54:58 +02:00
|
|
|
INSIST(sock->server == NULL);
|
2019-11-22 13:19:45 +01:00
|
|
|
sock->pquota = NULL;
|
|
|
|
|
2021-01-25 17:44:39 +02:00
|
|
|
isc__nm_tls_cleanup_data(sock);
|
2022-10-18 15:36:00 +03:00
|
|
|
#if HAVE_LIBNGHTTP2
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
isc__nm_http_cleanup_data(sock);
|
2021-04-21 13:52:15 +02:00
|
|
|
#endif
|
2022-06-20 20:30:12 +03:00
|
|
|
isc__nm_streamdns_cleanup_data(sock);
|
2023-03-16 12:50:04 +02:00
|
|
|
isc__nm_proxystream_cleanup_data(sock);
|
2023-07-12 15:25:38 +03:00
|
|
|
isc__nm_proxyudp_cleanup_data(sock);
|
2022-07-26 13:03:45 +02:00
|
|
|
|
2023-01-07 16:30:21 -08:00
|
|
|
if (sock->barriers_initialised) {
|
|
|
|
isc_barrier_destroy(&sock->listen_barrier);
|
|
|
|
isc_barrier_destroy(&sock->stop_barrier);
|
2022-10-14 20:45:40 +03:00
|
|
|
}
|
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
sock->magic = 0;
|
|
|
|
|
2023-01-03 08:27:54 +01:00
|
|
|
/* Don't free child socket */
|
|
|
|
if (sock->parent == NULL) {
|
|
|
|
REQUIRE(sock->tid == isc_tid());
|
|
|
|
|
|
|
|
ISC_LIST_UNLINK(worker->active_sockets, sock, active_link);
|
|
|
|
|
2023-09-12 19:13:45 +02:00
|
|
|
isc_mempool_put(worker->nmsocket_pool, sock);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
2023-01-03 08:27:54 +01:00
|
|
|
|
|
|
|
isc__networker_detach(&worker);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2023-03-23 09:47:47 +01:00
|
|
|
static bool
|
|
|
|
nmsocket_has_active_handles(isc_nmsocket_t *sock) {
|
|
|
|
if (!ISC_LIST_EMPTY(sock->active_handles)) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return true;
|
2023-03-23 09:47:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (sock->children != NULL) {
|
|
|
|
for (size_t i = 0; i < sock->nchildren; i++) {
|
|
|
|
isc_nmsocket_t *csock = &sock->children[i];
|
|
|
|
if (!ISC_LIST_EMPTY(csock->active_handles)) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return true;
|
2023-03-23 09:47:47 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return false;
|
2023-03-23 09:47:47 +01:00
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
static void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
nmsocket_maybe_destroy(isc_nmsocket_t *sock FLARG) {
|
2021-03-30 09:25:09 +02:00
|
|
|
NETMGR_TRACE_LOG("%s():%p->references = %" PRIuFAST32 "\n", __func__,
|
|
|
|
sock, isc_refcount_current(&sock->references));
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
if (sock->parent != NULL) {
|
|
|
|
/*
|
|
|
|
* This is a child socket and cannot be destroyed except
|
|
|
|
* as a side effect of destroying the parent, so let's go
|
|
|
|
* see if the parent is ready to be destroyed.
|
|
|
|
*/
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
nmsocket_maybe_destroy(sock->parent FLARG_PASS);
|
2019-11-05 13:55:54 -08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-03-24 13:37:19 +01:00
|
|
|
REQUIRE(!sock->destroying);
|
2023-03-28 17:03:56 +02:00
|
|
|
REQUIRE(!sock->active);
|
2023-03-24 13:37:19 +01:00
|
|
|
|
|
|
|
if (!sock->closed) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (isc_refcount_current(&sock->references) != 0) {
|
|
|
|
/*
|
|
|
|
* Using such check is valid only if we don't use
|
|
|
|
* isc_refcount_increment0() on the same variable.
|
|
|
|
*/
|
2019-12-08 22:44:08 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-03-23 09:47:47 +01:00
|
|
|
NETMGR_TRACE_LOG("%s:%p->statichandle = %p\n", __func__, sock,
|
|
|
|
sock->statichandle);
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2023-03-23 09:47:47 +01:00
|
|
|
/*
|
|
|
|
* This is a parent socket (or a standalone). See whether the
|
|
|
|
* children have active handles before deciding whether to
|
|
|
|
* accept destruction.
|
|
|
|
*/
|
|
|
|
if (sock->statichandle == NULL && nmsocket_has_active_handles(sock)) {
|
|
|
|
return;
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2023-03-23 09:47:47 +01:00
|
|
|
if (sock->tid == isc_tid()) {
|
|
|
|
nmsocket_cleanup(sock);
|
|
|
|
} else {
|
|
|
|
isc_async_run(sock->worker->loop, nmsocket_cleanup, sock);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc___nmsocket_prep_destroy(isc_nmsocket_t *sock FLARG) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(sock->parent == NULL);
|
|
|
|
|
2021-01-29 13:00:46 +01:00
|
|
|
NETMGR_TRACE_LOG("isc___nmsocket_prep_destroy():%p->references = "
|
|
|
|
"%" PRIuFAST32 "\n",
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
sock, isc_refcount_current(&sock->references));
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
/*
|
|
|
|
* The final external reference to the socket is gone. We can try
|
|
|
|
* destroying the socket, but we have to wait for all the inflight
|
|
|
|
* handles to finish first.
|
|
|
|
*/
|
2023-03-28 17:03:56 +02:00
|
|
|
sock->active = false;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
/*
|
2023-03-28 17:03:56 +02:00
|
|
|
* If the socket has children, they have been marked inactive by the
|
|
|
|
* shutdown uv_walk
|
2019-11-05 13:55:54 -08:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we're here then we already stopped listening; otherwise
|
|
|
|
* we'd have a hanging reference from the listening process.
|
|
|
|
*
|
|
|
|
* If it's a regular socket we may need to close it.
|
|
|
|
*/
|
2023-03-24 13:37:19 +01:00
|
|
|
if (!sock->closing && !sock->closed) {
|
2019-11-05 13:55:54 -08:00
|
|
|
switch (sock->type) {
|
2020-09-05 11:07:40 -07:00
|
|
|
case isc_nm_udpsocket:
|
|
|
|
isc__nm_udp_close(sock);
|
|
|
|
return;
|
2019-11-05 13:55:54 -08:00
|
|
|
case isc_nm_tcpsocket:
|
|
|
|
isc__nm_tcp_close(sock);
|
2020-07-01 00:49:12 -07:00
|
|
|
return;
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
|
|
|
isc__nm_streamdns_close(sock);
|
|
|
|
return;
|
2021-04-21 13:52:15 +02:00
|
|
|
case isc_nm_tlssocket:
|
|
|
|
isc__nm_tls_close(sock);
|
2022-08-29 10:55:10 +02:00
|
|
|
return;
|
2022-10-18 15:36:00 +03:00
|
|
|
#if HAVE_LIBNGHTTP2
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
case isc_nm_httpsocket:
|
2020-12-07 14:19:10 +02:00
|
|
|
isc__nm_http_close(sock);
|
|
|
|
return;
|
2021-04-21 13:52:15 +02:00
|
|
|
#endif
|
2023-03-16 12:50:04 +02:00
|
|
|
case isc_nm_proxystreamsocket:
|
|
|
|
isc__nm_proxystream_close(sock);
|
|
|
|
return;
|
2023-07-12 15:25:38 +03:00
|
|
|
case isc_nm_proxyudpsocket:
|
|
|
|
isc__nm_proxyudp_close(sock);
|
|
|
|
return;
|
2019-11-05 13:55:54 -08:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
nmsocket_maybe_destroy(sock FLARG_PASS);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc___nmsocket_detach(isc_nmsocket_t **sockp FLARG) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(sockp != NULL && *sockp != NULL);
|
|
|
|
REQUIRE(VALID_NMSOCK(*sockp));
|
|
|
|
|
|
|
|
isc_nmsocket_t *sock = *sockp, *rsock = NULL;
|
|
|
|
*sockp = NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the socket is a part of a set (a child socket) we are
|
|
|
|
* counting references for the whole set at the parent.
|
|
|
|
*/
|
|
|
|
if (sock->parent != NULL) {
|
|
|
|
rsock = sock->parent;
|
|
|
|
INSIST(rsock->parent == NULL); /* Sanity check */
|
|
|
|
} else {
|
|
|
|
rsock = sock;
|
|
|
|
}
|
|
|
|
|
2021-01-29 13:00:46 +01:00
|
|
|
NETMGR_TRACE_LOG("isc__nmsocket_detach():%p->references = %" PRIuFAST32
|
|
|
|
"\n",
|
|
|
|
rsock, isc_refcount_current(&rsock->references) - 1);
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
|
2020-01-14 09:43:37 +01:00
|
|
|
if (isc_refcount_decrement(&rsock->references) == 1) {
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc___nmsocket_prep_destroy(rsock FLARG_PASS);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-04 14:54:36 -07:00
|
|
|
void
|
|
|
|
isc_nmsocket_close(isc_nmsocket_t **sockp) {
|
|
|
|
REQUIRE(sockp != NULL);
|
|
|
|
REQUIRE(VALID_NMSOCK(*sockp));
|
|
|
|
REQUIRE((*sockp)->type == isc_nm_udplistener ||
|
|
|
|
(*sockp)->type == isc_nm_tcplistener ||
|
2022-06-20 20:30:12 +03:00
|
|
|
(*sockp)->type == isc_nm_streamdnslistener ||
|
2020-12-07 14:19:10 +02:00
|
|
|
(*sockp)->type == isc_nm_tlslistener ||
|
2023-03-16 12:50:04 +02:00
|
|
|
(*sockp)->type == isc_nm_httplistener ||
|
2023-07-12 15:25:38 +03:00
|
|
|
(*sockp)->type == isc_nm_proxystreamlistener ||
|
|
|
|
(*sockp)->type == isc_nm_proxyudplistener);
|
2020-06-04 14:54:36 -07:00
|
|
|
|
|
|
|
isc__nmsocket_detach(sockp);
|
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
void
|
2022-07-26 13:03:45 +02:00
|
|
|
isc___nmsocket_init(isc_nmsocket_t *sock, isc__networker_t *worker,
|
2023-01-03 08:27:54 +01:00
|
|
|
isc_nmsocket_type type, isc_sockaddr_t *iface,
|
|
|
|
isc_nmsocket_t *parent FLARG) {
|
2020-01-05 01:02:12 -08:00
|
|
|
uint16_t family;
|
|
|
|
|
|
|
|
REQUIRE(sock != NULL);
|
2022-07-26 13:03:45 +02:00
|
|
|
REQUIRE(worker != NULL);
|
|
|
|
|
|
|
|
*sock = (isc_nmsocket_t){
|
|
|
|
.type = type,
|
|
|
|
.tid = worker->loop->tid,
|
|
|
|
.fd = -1,
|
2023-01-04 15:57:00 +01:00
|
|
|
.inactive_handles = ISC_LIST_INITIALIZER,
|
2022-07-26 13:03:45 +02:00
|
|
|
.result = ISC_R_UNSET,
|
2023-01-03 08:27:54 +01:00
|
|
|
.active_handles = ISC_LIST_INITIALIZER,
|
2024-06-04 09:12:45 +02:00
|
|
|
.active_handles_max = ISC_NETMGR_MAX_STREAM_CLIENTS_PER_CONN,
|
2023-01-03 08:27:54 +01:00
|
|
|
.active_link = ISC_LINK_INITIALIZER,
|
2023-03-28 17:03:56 +02:00
|
|
|
.active = true,
|
2022-07-26 13:03:45 +02:00
|
|
|
};
|
2020-01-05 01:02:12 -08:00
|
|
|
|
2021-10-02 14:52:46 -07:00
|
|
|
if (iface != NULL) {
|
|
|
|
family = iface->type.sa.sa_family;
|
|
|
|
sock->iface = *iface;
|
|
|
|
} else {
|
|
|
|
family = AF_UNSPEC;
|
|
|
|
}
|
|
|
|
|
2023-01-03 08:27:54 +01:00
|
|
|
if (parent) {
|
|
|
|
sock->parent = parent;
|
|
|
|
} else {
|
|
|
|
ISC_LIST_APPEND(worker->active_sockets, sock, active_link);
|
|
|
|
}
|
|
|
|
|
|
|
|
#if ISC_NETMGR_TRACE
|
2021-04-27 16:20:03 +02:00
|
|
|
sock->backtrace_size = isc_backtrace(sock->backtrace, TRACE_SIZE);
|
2020-09-02 17:57:44 +02:00
|
|
|
#endif
|
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
isc__networker_attach(worker, &sock->worker);
|
2019-11-05 13:55:54 -08:00
|
|
|
sock->uv_handle.handle.data = sock;
|
|
|
|
|
2020-01-05 01:02:12 -08:00
|
|
|
switch (type) {
|
|
|
|
case isc_nm_udpsocket:
|
|
|
|
case isc_nm_udplistener:
|
2021-10-02 16:26:43 -07:00
|
|
|
switch (family) {
|
|
|
|
case AF_INET:
|
2020-01-05 01:02:12 -08:00
|
|
|
sock->statsindex = udp4statsindex;
|
2021-10-02 16:26:43 -07:00
|
|
|
break;
|
|
|
|
case AF_INET6:
|
2020-01-05 01:02:12 -08:00
|
|
|
sock->statsindex = udp6statsindex;
|
2021-10-02 16:26:43 -07:00
|
|
|
break;
|
2021-10-02 14:52:46 -07:00
|
|
|
case AF_UNSPEC:
|
|
|
|
/*
|
|
|
|
* Route sockets are AF_UNSPEC, and don't
|
|
|
|
* have stats counters.
|
|
|
|
*/
|
|
|
|
break;
|
2021-10-02 16:26:43 -07:00
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2020-01-05 01:02:12 -08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case isc_nm_tcpsocket:
|
|
|
|
case isc_nm_tcplistener:
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
case isc_nm_httpsocket:
|
2020-12-07 14:19:10 +02:00
|
|
|
case isc_nm_httplistener:
|
2021-10-02 16:26:43 -07:00
|
|
|
switch (family) {
|
|
|
|
case AF_INET:
|
2020-01-05 01:02:12 -08:00
|
|
|
sock->statsindex = tcp4statsindex;
|
2021-10-02 16:26:43 -07:00
|
|
|
break;
|
|
|
|
case AF_INET6:
|
2020-01-05 01:02:12 -08:00
|
|
|
sock->statsindex = tcp6statsindex;
|
2021-10-02 16:26:43 -07:00
|
|
|
break;
|
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2020-01-05 01:02:12 -08:00
|
|
|
}
|
2020-02-05 15:50:29 +11:00
|
|
|
break;
|
2020-01-05 01:02:12 -08:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_refcount_init(&sock->references, 1);
|
2019-11-08 10:52:49 -08:00
|
|
|
|
2021-01-29 13:00:46 +01:00
|
|
|
NETMGR_TRACE_LOG("isc__nmsocket_init():%p->references = %" PRIuFAST32
|
|
|
|
"\n",
|
|
|
|
sock, isc_refcount_current(&sock->references));
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
sock->magic = NMSOCK_MAGIC;
|
2021-10-02 16:26:43 -07:00
|
|
|
|
|
|
|
isc__nm_incstats(sock, STATID_ACTIVE);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2020-06-20 15:03:05 -07:00
|
|
|
void
|
|
|
|
isc__nmsocket_clearcb(isc_nmsocket_t *sock) {
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
2022-07-26 13:03:45 +02:00
|
|
|
REQUIRE(sock->tid == isc_tid());
|
2020-06-20 15:03:05 -07:00
|
|
|
|
2020-09-11 10:53:31 +02:00
|
|
|
sock->recv_cb = NULL;
|
|
|
|
sock->recv_cbarg = NULL;
|
|
|
|
sock->accept_cb = NULL;
|
2020-06-20 15:03:05 -07:00
|
|
|
sock->accept_cbarg = NULL;
|
2020-10-29 12:04:00 +01:00
|
|
|
sock->connect_cb = NULL;
|
|
|
|
sock->connect_cbarg = NULL;
|
2020-06-20 15:03:05 -07:00
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc__nm_free_uvbuf(isc_nmsocket_t *sock, const uv_buf_t *buf) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
REQUIRE(buf->base == sock->worker->recvbuf);
|
Fix the UDP recvmmsg support
Previously, the netmgr/udp.c tried to detect the recvmmsg detection in
libuv with #ifdef UV_UDP_<foo> preprocessor macros. However, because
the UV_UDP_<foo> are not preprocessor macros, but enum members, the
detection didn't work. Because the detection didn't work, the code
didn't have access to the information when we received the final chunk
of the recvmmsg and tried to free the uvbuf every time. Fortunately,
the isc__nm_free_uvbuf() had a kludge that detected attempt to free in
the middle of the receive buffer, so the code worked.
However, libuv 1.37.0 changed the way the recvmmsg was enabled from
implicit to explicit, and we checked for yet another enum member
presence with preprocessor macro, so in fact libuv recvmmsg support was
never enabled with libuv >= 1.37.0.
This commit changes to the preprocessor macros to autoconf checks for
declaration, so the detection now works again. On top of that, it's now
possible to cleanup the alloc_cb and free_uvbuf functions because now,
the information whether we can or cannot free the buffer is available to
us.
2022-01-11 12:14:23 +01:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
sock->worker->recvbuf_inuse = false;
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static isc_nmhandle_t *
|
2020-02-13 14:44:37 -08:00
|
|
|
alloc_handle(isc_nmsocket_t *sock) {
|
2022-07-26 13:03:45 +02:00
|
|
|
isc_nmhandle_t *handle = isc_mem_get(sock->worker->mctx,
|
2022-03-23 13:57:15 +01:00
|
|
|
sizeof(isc_nmhandle_t));
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2023-01-03 08:27:54 +01:00
|
|
|
*handle = (isc_nmhandle_t){
|
|
|
|
.magic = NMHANDLE_MAGIC,
|
|
|
|
.active_link = ISC_LINK_INITIALIZER,
|
2023-01-04 15:57:00 +01:00
|
|
|
.inactive_link = ISC_LINK_INITIALIZER,
|
2023-01-03 08:27:54 +01:00
|
|
|
};
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_refcount_init(&handle->references, 1);
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return handle;
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2023-01-04 15:57:00 +01:00
|
|
|
static isc_nmhandle_t *
|
|
|
|
dequeue_handle(isc_nmsocket_t *sock) {
|
|
|
|
#if !__SANITIZE_ADDRESS__ && !__SANITIZE_THREAD__
|
|
|
|
isc_nmhandle_t *handle = ISC_LIST_HEAD(sock->inactive_handles);
|
|
|
|
if (handle != NULL) {
|
|
|
|
ISC_LIST_DEQUEUE(sock->inactive_handles, handle, inactive_link);
|
|
|
|
|
2023-08-16 16:30:53 +02:00
|
|
|
sock->inactive_handles_cur--;
|
|
|
|
|
2023-01-04 15:57:00 +01:00
|
|
|
isc_refcount_init(&handle->references, 1);
|
|
|
|
INSIST(VALID_NMHANDLE(handle));
|
2024-11-19 10:38:03 +01:00
|
|
|
return handle;
|
2023-01-04 15:57:00 +01:00
|
|
|
}
|
|
|
|
#else
|
2023-03-24 12:11:44 +01:00
|
|
|
INSIST(ISC_LIST_EMPTY(sock->inactive_handles));
|
2023-01-04 15:57:00 +01:00
|
|
|
#endif /* !__SANITIZE_ADDRESS__ && !__SANITIZE_THREAD__ */
|
2024-11-19 10:38:03 +01:00
|
|
|
return NULL;
|
2023-01-04 15:57:00 +01:00
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_nmhandle_t *
|
2023-01-03 08:27:54 +01:00
|
|
|
isc___nmhandle_get(isc_nmsocket_t *sock, isc_sockaddr_t const *peer,
|
|
|
|
isc_sockaddr_t const *local FLARG) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
|
2023-01-04 15:57:00 +01:00
|
|
|
isc_nmhandle_t *handle = dequeue_handle(sock);
|
2019-11-05 13:55:54 -08:00
|
|
|
if (handle == NULL) {
|
|
|
|
handle = alloc_handle(sock);
|
|
|
|
}
|
|
|
|
|
2021-01-29 13:00:46 +01:00
|
|
|
NETMGR_TRACE_LOG(
|
|
|
|
"isc__nmhandle_get():handle %p->references = %" PRIuFAST32 "\n",
|
|
|
|
handle, isc_refcount_current(&handle->references));
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
|
|
|
|
isc___nmsocket_attach(sock, &handle->sock FLARG_PASS);
|
2020-06-04 23:13:54 -07:00
|
|
|
|
2023-01-03 08:27:54 +01:00
|
|
|
#if ISC_NETMGR_TRACE
|
2021-04-27 16:20:03 +02:00
|
|
|
handle->backtrace_size = isc_backtrace(handle->backtrace, TRACE_SIZE);
|
2020-09-02 17:57:44 +02:00
|
|
|
#endif
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
if (peer != NULL) {
|
2021-05-26 08:15:34 +02:00
|
|
|
handle->peer = *peer;
|
2019-11-05 13:55:54 -08:00
|
|
|
} else {
|
2021-05-26 08:15:34 +02:00
|
|
|
handle->peer = sock->peer;
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (local != NULL) {
|
2021-05-26 08:15:34 +02:00
|
|
|
handle->local = *local;
|
2019-11-05 13:55:54 -08:00
|
|
|
} else {
|
2021-05-26 08:15:34 +02:00
|
|
|
handle->local = sock->iface;
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2020-09-02 17:57:44 +02:00
|
|
|
ISC_LIST_APPEND(sock->active_handles, handle, active_link);
|
2024-06-04 09:12:45 +02:00
|
|
|
sock->active_handles_cur++;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2021-03-29 10:52:05 +02:00
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_udpsocket:
|
2023-07-12 15:25:38 +03:00
|
|
|
case isc_nm_proxyudpsocket:
|
2023-03-24 13:37:19 +01:00
|
|
|
if (!sock->client) {
|
2021-03-29 10:52:05 +02:00
|
|
|
break;
|
|
|
|
}
|
2021-10-11 12:09:16 +02:00
|
|
|
FALLTHROUGH;
|
2021-03-29 10:52:05 +02:00
|
|
|
case isc_nm_tcpsocket:
|
|
|
|
case isc_nm_tlssocket:
|
2023-03-16 12:50:04 +02:00
|
|
|
case isc_nm_proxystreamsocket:
|
2020-06-10 11:32:39 +02:00
|
|
|
INSIST(sock->statichandle == NULL);
|
2020-09-03 13:31:27 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* statichandle must be assigned, not attached;
|
|
|
|
* otherwise, if a handle was detached elsewhere
|
|
|
|
* it could never reach 0 references, and the
|
|
|
|
* handle and socket would never be freed.
|
|
|
|
*/
|
2020-06-10 11:32:39 +02:00
|
|
|
sock->statichandle = handle;
|
2021-03-29 10:52:05 +02:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2021-04-21 13:52:15 +02:00
|
|
|
#if HAVE_LIBNGHTTP2
|
2023-09-12 09:32:30 +02:00
|
|
|
if (sock->type == isc_nm_httpsocket && sock->h2 != NULL &&
|
|
|
|
sock->h2->session)
|
|
|
|
{
|
|
|
|
isc__nm_httpsession_attach(sock->h2->session,
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
&handle->httpsession);
|
2020-12-07 14:19:10 +02:00
|
|
|
}
|
2021-04-21 13:52:15 +02:00
|
|
|
#endif
|
2020-12-07 14:19:10 +02:00
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return handle;
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nmhandle_is_stream(isc_nmhandle_t *handle) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return handle->sock->type == isc_nm_tcpsocket ||
|
|
|
|
handle->sock->type == isc_nm_tlssocket ||
|
|
|
|
handle->sock->type == isc_nm_httpsocket ||
|
|
|
|
handle->sock->type == isc_nm_streamdnssocket ||
|
|
|
|
handle->sock->type == isc_nm_proxystreamsocket;
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-02-13 14:44:37 -08:00
|
|
|
nmhandle_free(isc_nmsocket_t *sock, isc_nmhandle_t *handle) {
|
2023-03-23 09:47:47 +01:00
|
|
|
handle->magic = 0;
|
|
|
|
|
2019-11-08 10:52:49 -08:00
|
|
|
if (handle->dofree != NULL) {
|
2019-11-05 13:55:54 -08:00
|
|
|
handle->dofree(handle->opaque);
|
|
|
|
}
|
|
|
|
|
2023-03-24 15:32:02 +01:00
|
|
|
isc_mem_put(sock->worker->mctx, handle, sizeof(*handle));
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2023-04-09 06:48:46 +02:00
|
|
|
static void
|
|
|
|
nmhandle__destroy(isc_nmhandle_t *handle) {
|
|
|
|
isc_nmsocket_t *sock = handle->sock;
|
|
|
|
handle->sock = NULL;
|
|
|
|
|
|
|
|
#if defined(__SANITIZE_ADDRESS__) || defined(__SANITIZE_THREAD__)
|
|
|
|
nmhandle_free(sock, handle);
|
|
|
|
#else
|
2023-08-16 16:30:53 +02:00
|
|
|
if (sock->active &&
|
|
|
|
sock->inactive_handles_cur < sock->inactive_handles_max)
|
|
|
|
{
|
|
|
|
sock->inactive_handles_cur++;
|
2023-04-09 06:48:46 +02:00
|
|
|
ISC_LIST_APPEND(sock->inactive_handles, handle, inactive_link);
|
|
|
|
} else {
|
|
|
|
nmhandle_free(sock, handle);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
isc__nmsocket_detach(&sock);
|
|
|
|
}
|
|
|
|
|
2023-03-23 08:55:29 +01:00
|
|
|
static void
|
|
|
|
isc__nm_closehandle_job(void *arg) {
|
2023-04-09 06:48:46 +02:00
|
|
|
isc_nmhandle_t *handle = arg;
|
|
|
|
isc_nmsocket_t *sock = handle->sock;
|
2020-09-03 13:31:27 -07:00
|
|
|
|
2023-03-23 08:55:29 +01:00
|
|
|
sock->closehandle_cb(sock);
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2023-04-09 06:48:46 +02:00
|
|
|
nmhandle__destroy(handle);
|
2020-10-12 17:51:09 +11:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2023-03-23 08:55:29 +01:00
|
|
|
nmhandle_destroy(isc_nmhandle_t *handle) {
|
|
|
|
isc_nmsocket_t *sock = handle->sock;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2019-11-19 11:56:00 +01:00
|
|
|
if (handle->doreset != NULL) {
|
|
|
|
handle->doreset(handle->opaque);
|
|
|
|
}
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2021-04-21 13:52:15 +02:00
|
|
|
#if HAVE_LIBNGHTTP2
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
if (sock->type == isc_nm_httpsocket && handle->httpsession != NULL) {
|
|
|
|
isc__nm_httpsession_detach(&handle->httpsession);
|
|
|
|
}
|
2021-04-21 13:52:15 +02:00
|
|
|
#endif
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
|
2023-01-04 15:57:00 +01:00
|
|
|
if (handle == sock->statichandle) {
|
|
|
|
/* statichandle is assigned, not attached. */
|
|
|
|
sock->statichandle = NULL;
|
|
|
|
}
|
|
|
|
|
2023-07-12 15:25:38 +03:00
|
|
|
if (handle->proxy_udphandle != NULL) {
|
|
|
|
isc_nmhandle_detach(&handle->proxy_udphandle);
|
|
|
|
}
|
|
|
|
|
2023-03-23 09:47:47 +01:00
|
|
|
ISC_LIST_UNLINK(sock->active_handles, handle, active_link);
|
2024-06-04 09:12:45 +02:00
|
|
|
INSIST(sock->active_handles_cur > 0);
|
|
|
|
sock->active_handles_cur--;
|
2023-03-23 09:47:47 +01:00
|
|
|
|
2023-04-09 06:48:46 +02:00
|
|
|
if (sock->closehandle_cb == NULL) {
|
|
|
|
nmhandle__destroy(handle);
|
|
|
|
return;
|
2023-03-23 09:47:47 +01:00
|
|
|
}
|
2020-03-26 14:25:06 +01:00
|
|
|
|
|
|
|
/*
|
2023-04-09 06:48:46 +02:00
|
|
|
* If the socket has a callback configured for that (e.g.,
|
|
|
|
* to perform cleanup after request processing), call it
|
|
|
|
* now asynchronously.
|
2019-12-06 20:45:00 +01:00
|
|
|
*/
|
2023-04-09 06:48:46 +02:00
|
|
|
isc_job_run(sock->worker->loop, &handle->job, isc__nm_closehandle_job,
|
|
|
|
handle);
|
2023-03-23 08:55:29 +01:00
|
|
|
}
|
2019-12-06 20:45:00 +01:00
|
|
|
|
Fix the streaming read callback shutdown logic
When shutting down TCP sockets, the read callback calling logic was
flawed, it would call either one less callback or one extra. Fix the
logic in the way:
1. When isc_nm_read() has been called but isc_nm_read_stop() hasn't on
the handle, the read callback will be called with ISC_R_CANCELED to
cancel active reading from the socket/handle.
2. When isc_nm_read() has been called and isc_nm_read_stop() has been
called on the on the handle, the read callback will be called with
ISC_R_SHUTTINGDOWN to signal that the dormant (not-reading) socket
is being shut down.
3. The .reading and .recv_read flags are little bit tricky. The
.reading flag indicates if the outer layer is reading the data (that
would be uv_tcp_t for TCP and isc_nmsocket_t (TCP) for TLSStream),
the .recv_read flag indicates whether somebody is interested in the
data read from the socket.
Usually, you would expect that the .reading should be false when
.recv_read is false, but it gets even more tricky with TLSStream as
the TLS protocol might need to read from the socket even when sending
data.
Fix the usage of the .recv_read and .reading flags in the TLSStream
to their true meaning - which mostly consist of using .recv_read
everywhere and then wrapping isc_nm_read() and isc_nm_read_stop()
with the .reading flag.
4. The TLS failed read helper has been modified to resemble the TCP code
as much as possible, clearing and re-setting the .recv_read flag in
the TCP timeout code has been fixed and .recv_read is now cleared
when isc_nm_read_stop() has been called on the streaming socket.
5. The use of Network Manager in the named_controlconf, isccc_ccmsg, and
isc_httpd units have been greatly simplified due to the improved design.
6. More unit tests for TCP and TLS testing the shutdown conditions have
been added.
Co-authored-by: Ondřej Surý <ondrej@isc.org>
Co-authored-by: Artem Boldariev <artem@isc.org>
2023-04-13 17:27:50 +02:00
|
|
|
#if ISC_NETMGR_TRACE
|
|
|
|
ISC_REFCOUNT_TRACE_IMPL(isc_nmhandle, nmhandle_destroy)
|
|
|
|
#else
|
|
|
|
ISC_REFCOUNT_IMPL(isc_nmhandle, nmhandle_destroy);
|
|
|
|
#endif
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
void *
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nmhandle_getdata(isc_nmhandle_t *handle) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return handle->opaque;
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc_nmhandle_setdata(isc_nmhandle_t *handle, void *arg,
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nm_opaquecb_t doreset, isc_nm_opaquecb_t dofree) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
|
|
|
handle->opaque = arg;
|
|
|
|
handle->doreset = doreset;
|
|
|
|
handle->dofree = dofree;
|
|
|
|
}
|
|
|
|
|
2021-03-18 09:27:38 +01:00
|
|
|
void
|
|
|
|
isc__nm_failed_send_cb(isc_nmsocket_t *sock, isc__nm_uvreq_t *req,
|
2022-11-24 17:11:22 +01:00
|
|
|
isc_result_t eresult, bool async) {
|
2021-03-18 09:27:38 +01:00
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(VALID_UVREQ(req));
|
|
|
|
|
|
|
|
if (req->cb.send != NULL) {
|
2022-11-24 17:11:22 +01:00
|
|
|
isc__nm_sendcb(sock, req, eresult, async);
|
2021-03-18 09:27:38 +01:00
|
|
|
} else {
|
2023-03-24 12:11:44 +01:00
|
|
|
isc__nm_uvreq_put(&req);
|
2021-03-18 09:27:38 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_failed_connect_cb(isc_nmsocket_t *sock, isc__nm_uvreq_t *req,
|
2021-04-06 18:27:38 +02:00
|
|
|
isc_result_t eresult, bool async) {
|
2021-03-18 09:27:38 +01:00
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(VALID_UVREQ(req));
|
2022-07-26 13:03:45 +02:00
|
|
|
REQUIRE(sock->tid == isc_tid());
|
2021-03-18 09:27:38 +01:00
|
|
|
REQUIRE(req->cb.connect != NULL);
|
2023-03-24 13:37:19 +01:00
|
|
|
REQUIRE(sock->connecting);
|
|
|
|
|
|
|
|
sock->connecting = false;
|
2021-03-18 09:27:38 +01:00
|
|
|
|
2022-07-14 13:22:34 +02:00
|
|
|
isc__nm_incstats(sock, STATID_CONNECTFAIL);
|
|
|
|
|
2021-03-30 09:25:09 +02:00
|
|
|
isc__nmsocket_timer_stop(sock);
|
2022-02-09 10:59:08 +01:00
|
|
|
uv_handle_set_data((uv_handle_t *)&sock->read_timer, sock);
|
2021-03-30 09:25:09 +02:00
|
|
|
|
2021-03-18 09:27:38 +01:00
|
|
|
isc__nmsocket_clearcb(sock);
|
2021-04-06 18:27:38 +02:00
|
|
|
isc__nm_connectcb(sock, req, eresult, async);
|
2021-03-18 09:27:38 +01:00
|
|
|
|
|
|
|
isc__nmsocket_prep_destroy(sock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2021-04-06 18:27:38 +02:00
|
|
|
isc__nm_failed_read_cb(isc_nmsocket_t *sock, isc_result_t result, bool async) {
|
2021-03-16 09:03:02 +01:00
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
2022-11-22 21:03:02 +02:00
|
|
|
UNUSED(async);
|
2021-03-16 09:03:02 +01:00
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_udpsocket:
|
2022-11-23 14:03:23 +01:00
|
|
|
isc__nm_udp_failed_read_cb(sock, result, async);
|
2021-03-16 09:03:02 +01:00
|
|
|
return;
|
|
|
|
case isc_nm_tcpsocket:
|
2022-11-23 14:03:23 +01:00
|
|
|
isc__nm_tcp_failed_read_cb(sock, result, async);
|
2021-03-16 09:03:02 +01:00
|
|
|
return;
|
2022-08-29 10:55:10 +02:00
|
|
|
case isc_nm_tlssocket:
|
2022-11-23 14:03:23 +01:00
|
|
|
isc__nm_tls_failed_read_cb(sock, result, async);
|
2022-08-29 10:55:10 +02:00
|
|
|
return;
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
|
|
|
isc__nm_streamdns_failed_read_cb(sock, result, async);
|
|
|
|
return;
|
2023-03-16 12:50:04 +02:00
|
|
|
case isc_nm_proxystreamsocket:
|
|
|
|
isc__nm_proxystream_failed_read_cb(sock, result, async);
|
|
|
|
return;
|
2023-07-12 15:25:38 +03:00
|
|
|
case isc_nm_proxyudpsocket:
|
|
|
|
isc__nm_proxyudp_failed_read_cb(sock, result, async);
|
|
|
|
return;
|
2021-03-16 09:03:02 +01:00
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2021-03-16 09:03:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-30 09:25:09 +02:00
|
|
|
void
|
|
|
|
isc__nmsocket_connecttimeout_cb(uv_timer_t *timer) {
|
|
|
|
uv_connect_t *uvreq = uv_handle_get_data((uv_handle_t *)timer);
|
|
|
|
isc_nmsocket_t *sock = uv_handle_get_data((uv_handle_t *)uvreq->handle);
|
|
|
|
isc__nm_uvreq_t *req = uv_handle_get_data((uv_handle_t *)uvreq);
|
|
|
|
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
2022-07-26 13:03:45 +02:00
|
|
|
REQUIRE(sock->tid == isc_tid());
|
2021-03-30 09:25:09 +02:00
|
|
|
REQUIRE(VALID_UVREQ(req));
|
|
|
|
REQUIRE(VALID_NMHANDLE(req->handle));
|
2023-03-24 13:37:19 +01:00
|
|
|
REQUIRE(sock->connecting);
|
2021-03-30 09:25:09 +02:00
|
|
|
|
|
|
|
isc__nmsocket_timer_stop(sock);
|
|
|
|
|
2022-02-22 18:12:18 +01:00
|
|
|
/*
|
|
|
|
* Mark the connection as timed out and shutdown the socket.
|
|
|
|
*/
|
2023-03-24 13:37:19 +01:00
|
|
|
REQUIRE(!sock->timedout);
|
|
|
|
sock->timedout = true;
|
2022-02-22 18:12:18 +01:00
|
|
|
isc__nmsocket_shutdown(sock);
|
2021-03-30 09:25:09 +02:00
|
|
|
}
|
|
|
|
|
2021-12-01 17:41:20 +01:00
|
|
|
void
|
2022-12-07 09:45:34 +01:00
|
|
|
isc__nm_accept_connection_log(isc_nmsocket_t *sock, isc_result_t result,
|
|
|
|
bool can_log_quota) {
|
2021-12-01 17:41:20 +01:00
|
|
|
int level;
|
|
|
|
|
|
|
|
switch (result) {
|
|
|
|
case ISC_R_SUCCESS:
|
|
|
|
case ISC_R_NOCONN:
|
|
|
|
return;
|
|
|
|
case ISC_R_QUOTA:
|
|
|
|
case ISC_R_SOFTQUOTA:
|
|
|
|
if (!can_log_quota) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
level = ISC_LOG_INFO;
|
|
|
|
break;
|
|
|
|
case ISC_R_NOTCONNECTED:
|
|
|
|
level = ISC_LOG_INFO;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
level = ISC_LOG_ERROR;
|
|
|
|
}
|
|
|
|
|
2022-12-07 09:45:34 +01:00
|
|
|
isc__nmsocket_log(sock, level, "Accepting TCP connection failed: %s",
|
|
|
|
isc_result_totext(result));
|
2021-12-01 17:41:20 +01:00
|
|
|
}
|
|
|
|
|
2022-02-09 11:21:04 +01:00
|
|
|
void
|
2022-03-10 13:51:08 +01:00
|
|
|
isc__nmsocket_writetimeout_cb(void *data, isc_result_t eresult) {
|
|
|
|
isc__nm_uvreq_t *req = data;
|
|
|
|
isc_nmsocket_t *sock = NULL;
|
2022-02-09 11:21:04 +01:00
|
|
|
|
2022-03-10 13:51:08 +01:00
|
|
|
REQUIRE(eresult == ISC_R_TIMEDOUT);
|
|
|
|
REQUIRE(VALID_UVREQ(req));
|
|
|
|
REQUIRE(VALID_NMSOCK(req->sock));
|
|
|
|
|
|
|
|
sock = req->sock;
|
2022-02-09 11:21:04 +01:00
|
|
|
|
2024-01-18 17:24:22 +01:00
|
|
|
isc__nm_start_reading(sock);
|
2022-02-15 14:41:15 +01:00
|
|
|
isc__nmsocket_reset(sock);
|
2022-02-09 11:21:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2021-03-16 09:03:02 +01:00
|
|
|
isc__nmsocket_readtimeout_cb(uv_timer_t *timer) {
|
|
|
|
isc_nmsocket_t *sock = uv_handle_get_data((uv_handle_t *)timer);
|
|
|
|
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
2022-07-26 13:03:45 +02:00
|
|
|
REQUIRE(sock->tid == isc_tid());
|
2021-03-16 09:03:02 +01:00
|
|
|
|
2023-03-24 13:37:19 +01:00
|
|
|
if (sock->client) {
|
2021-03-30 09:25:09 +02:00
|
|
|
uv_timer_stop(timer);
|
|
|
|
|
2021-03-29 10:52:05 +02:00
|
|
|
if (sock->recv_cb != NULL) {
|
|
|
|
isc__nm_uvreq_t *req = isc__nm_get_read_req(sock, NULL);
|
2022-11-23 14:03:23 +01:00
|
|
|
isc__nm_readcb(sock, req, ISC_R_TIMEDOUT, false);
|
2021-03-29 10:52:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!isc__nmsocket_timer_running(sock)) {
|
|
|
|
isc__nmsocket_clearcb(sock);
|
Fix the streaming read callback shutdown logic
When shutting down TCP sockets, the read callback calling logic was
flawed, it would call either one less callback or one extra. Fix the
logic in the way:
1. When isc_nm_read() has been called but isc_nm_read_stop() hasn't on
the handle, the read callback will be called with ISC_R_CANCELED to
cancel active reading from the socket/handle.
2. When isc_nm_read() has been called and isc_nm_read_stop() has been
called on the on the handle, the read callback will be called with
ISC_R_SHUTTINGDOWN to signal that the dormant (not-reading) socket
is being shut down.
3. The .reading and .recv_read flags are little bit tricky. The
.reading flag indicates if the outer layer is reading the data (that
would be uv_tcp_t for TCP and isc_nmsocket_t (TCP) for TLSStream),
the .recv_read flag indicates whether somebody is interested in the
data read from the socket.
Usually, you would expect that the .reading should be false when
.recv_read is false, but it gets even more tricky with TLSStream as
the TLS protocol might need to read from the socket even when sending
data.
Fix the usage of the .recv_read and .reading flags in the TLSStream
to their true meaning - which mostly consist of using .recv_read
everywhere and then wrapping isc_nm_read() and isc_nm_read_stop()
with the .reading flag.
4. The TLS failed read helper has been modified to resemble the TCP code
as much as possible, clearing and re-setting the .recv_read flag in
the TCP timeout code has been fixed and .recv_read is now cleared
when isc_nm_read_stop() has been called on the streaming socket.
5. The use of Network Manager in the named_controlconf, isccc_ccmsg, and
isc_httpd units have been greatly simplified due to the improved design.
6. More unit tests for TCP and TLS testing the shutdown conditions have
been added.
Co-authored-by: Ondřej Surý <ondrej@isc.org>
Co-authored-by: Artem Boldariev <artem@isc.org>
2023-04-13 17:27:50 +02:00
|
|
|
isc__nm_failed_read_cb(sock, ISC_R_TIMEDOUT, false);
|
2021-03-29 10:52:05 +02:00
|
|
|
}
|
|
|
|
} else {
|
2021-04-06 18:27:38 +02:00
|
|
|
isc__nm_failed_read_cb(sock, ISC_R_TIMEDOUT, false);
|
2021-03-29 10:52:05 +02:00
|
|
|
}
|
2021-03-16 09:03:02 +01:00
|
|
|
}
|
|
|
|
|
2020-11-02 19:58:05 -08:00
|
|
|
void
|
2021-03-16 09:03:02 +01:00
|
|
|
isc__nmsocket_timer_restart(isc_nmsocket_t *sock) {
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
2020-11-02 19:58:05 -08:00
|
|
|
|
2022-07-27 16:26:55 +03:00
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_tlssocket:
|
|
|
|
isc__nmsocket_tls_timer_restart(sock);
|
|
|
|
return;
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
|
|
|
isc__nmsocket_streamdns_timer_restart(sock);
|
|
|
|
return;
|
2023-03-16 12:50:04 +02:00
|
|
|
case isc_nm_proxystreamsocket:
|
|
|
|
isc__nmsocket_proxystream_timer_restart(sock);
|
|
|
|
return;
|
2023-07-12 15:25:38 +03:00
|
|
|
case isc_nm_proxyudpsocket:
|
|
|
|
isc__nmsocket_proxyudp_timer_restart(sock);
|
|
|
|
return;
|
2022-07-27 16:26:55 +03:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2022-05-10 19:44:28 +03:00
|
|
|
if (uv_is_closing((uv_handle_t *)&sock->read_timer)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-03-24 13:37:19 +01:00
|
|
|
if (sock->connecting) {
|
2022-02-15 14:51:02 +01:00
|
|
|
int r;
|
|
|
|
|
2021-03-30 09:25:09 +02:00
|
|
|
if (sock->connect_timeout == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-02-09 10:59:08 +01:00
|
|
|
r = uv_timer_start(&sock->read_timer,
|
2021-03-30 09:25:09 +02:00
|
|
|
isc__nmsocket_connecttimeout_cb,
|
|
|
|
sock->connect_timeout + 10, 0);
|
2022-02-15 14:51:02 +01:00
|
|
|
UV_RUNTIME_CHECK(uv_timer_start, r);
|
2021-03-30 09:25:09 +02:00
|
|
|
|
|
|
|
} else {
|
2022-02-15 14:51:02 +01:00
|
|
|
int r;
|
|
|
|
|
2021-03-30 09:25:09 +02:00
|
|
|
if (sock->read_timeout == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-02-09 10:59:08 +01:00
|
|
|
r = uv_timer_start(&sock->read_timer,
|
|
|
|
isc__nmsocket_readtimeout_cb,
|
2021-03-30 09:25:09 +02:00
|
|
|
sock->read_timeout, 0);
|
2022-02-15 14:51:02 +01:00
|
|
|
UV_RUNTIME_CHECK(uv_timer_start, r);
|
2021-03-16 09:03:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-29 10:52:05 +02:00
|
|
|
bool
|
|
|
|
isc__nmsocket_timer_running(isc_nmsocket_t *sock) {
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
|
2022-07-26 17:36:32 +03:00
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_tlssocket:
|
2024-11-19 10:38:03 +01:00
|
|
|
return isc__nmsocket_tls_timer_running(sock);
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
2024-11-19 10:38:03 +01:00
|
|
|
return isc__nmsocket_streamdns_timer_running(sock);
|
2023-03-16 12:50:04 +02:00
|
|
|
case isc_nm_proxystreamsocket:
|
2024-11-19 10:38:03 +01:00
|
|
|
return isc__nmsocket_proxystream_timer_running(sock);
|
2023-07-12 15:25:38 +03:00
|
|
|
case isc_nm_proxyudpsocket:
|
2024-11-19 10:38:03 +01:00
|
|
|
return isc__nmsocket_proxyudp_timer_running(sock);
|
2022-07-26 17:36:32 +03:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return uv_is_active((uv_handle_t *)&sock->read_timer);
|
2021-03-29 10:52:05 +02:00
|
|
|
}
|
|
|
|
|
2021-03-16 09:03:02 +01:00
|
|
|
void
|
|
|
|
isc__nmsocket_timer_start(isc_nmsocket_t *sock) {
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
|
2021-03-29 10:52:05 +02:00
|
|
|
if (isc__nmsocket_timer_running(sock)) {
|
2021-03-16 09:03:02 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
isc__nmsocket_timer_restart(sock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nmsocket_timer_stop(isc_nmsocket_t *sock) {
|
2022-02-15 14:51:02 +01:00
|
|
|
int r;
|
|
|
|
|
2021-03-16 09:03:02 +01:00
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
|
2022-07-27 16:26:55 +03:00
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_tlssocket:
|
|
|
|
isc__nmsocket_tls_timer_stop(sock);
|
|
|
|
return;
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
|
|
|
isc__nmsocket_streamdns_timer_stop(sock);
|
|
|
|
return;
|
2023-03-16 12:50:04 +02:00
|
|
|
case isc_nm_proxystreamsocket:
|
|
|
|
isc__nmsocket_proxystream_timer_stop(sock);
|
|
|
|
return;
|
2023-07-12 15:25:38 +03:00
|
|
|
case isc_nm_proxyudpsocket:
|
|
|
|
isc__nmsocket_proxyudp_timer_stop(sock);
|
|
|
|
return;
|
2022-07-27 16:26:55 +03:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2021-03-29 10:52:05 +02:00
|
|
|
/* uv_timer_stop() is idempotent, no need to check if running */
|
2021-03-16 09:03:02 +01:00
|
|
|
|
2022-02-09 10:59:08 +01:00
|
|
|
r = uv_timer_stop(&sock->read_timer);
|
2022-02-15 14:51:02 +01:00
|
|
|
UV_RUNTIME_CHECK(uv_timer_stop, r);
|
2021-03-16 09:03:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
isc__nm_uvreq_t *
|
Fix the streaming read callback shutdown logic
When shutting down TCP sockets, the read callback calling logic was
flawed, it would call either one less callback or one extra. Fix the
logic in the way:
1. When isc_nm_read() has been called but isc_nm_read_stop() hasn't on
the handle, the read callback will be called with ISC_R_CANCELED to
cancel active reading from the socket/handle.
2. When isc_nm_read() has been called and isc_nm_read_stop() has been
called on the on the handle, the read callback will be called with
ISC_R_SHUTTINGDOWN to signal that the dormant (not-reading) socket
is being shut down.
3. The .reading and .recv_read flags are little bit tricky. The
.reading flag indicates if the outer layer is reading the data (that
would be uv_tcp_t for TCP and isc_nmsocket_t (TCP) for TLSStream),
the .recv_read flag indicates whether somebody is interested in the
data read from the socket.
Usually, you would expect that the .reading should be false when
.recv_read is false, but it gets even more tricky with TLSStream as
the TLS protocol might need to read from the socket even when sending
data.
Fix the usage of the .recv_read and .reading flags in the TLSStream
to their true meaning - which mostly consist of using .recv_read
everywhere and then wrapping isc_nm_read() and isc_nm_read_stop()
with the .reading flag.
4. The TLS failed read helper has been modified to resemble the TCP code
as much as possible, clearing and re-setting the .recv_read flag in
the TCP timeout code has been fixed and .recv_read is now cleared
when isc_nm_read_stop() has been called on the streaming socket.
5. The use of Network Manager in the named_controlconf, isccc_ccmsg, and
isc_httpd units have been greatly simplified due to the improved design.
6. More unit tests for TCP and TLS testing the shutdown conditions have
been added.
Co-authored-by: Ondřej Surý <ondrej@isc.org>
Co-authored-by: Artem Boldariev <artem@isc.org>
2023-04-13 17:27:50 +02:00
|
|
|
isc___nm_get_read_req(isc_nmsocket_t *sock, isc_sockaddr_t *sockaddr FLARG) {
|
2021-03-16 09:03:02 +01:00
|
|
|
isc__nm_uvreq_t *req = NULL;
|
|
|
|
|
2023-03-24 12:11:44 +01:00
|
|
|
req = isc__nm_uvreq_get(sock);
|
2021-03-16 09:03:02 +01:00
|
|
|
req->cb.recv = sock->recv_cb;
|
|
|
|
req->cbarg = sock->recv_cbarg;
|
|
|
|
|
2021-03-29 10:52:05 +02:00
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_tcpsocket:
|
|
|
|
case isc_nm_tlssocket:
|
2023-03-16 12:50:04 +02:00
|
|
|
case isc_nm_proxystreamsocket:
|
Fix the streaming read callback shutdown logic
When shutting down TCP sockets, the read callback calling logic was
flawed, it would call either one less callback or one extra. Fix the
logic in the way:
1. When isc_nm_read() has been called but isc_nm_read_stop() hasn't on
the handle, the read callback will be called with ISC_R_CANCELED to
cancel active reading from the socket/handle.
2. When isc_nm_read() has been called and isc_nm_read_stop() has been
called on the on the handle, the read callback will be called with
ISC_R_SHUTTINGDOWN to signal that the dormant (not-reading) socket
is being shut down.
3. The .reading and .recv_read flags are little bit tricky. The
.reading flag indicates if the outer layer is reading the data (that
would be uv_tcp_t for TCP and isc_nmsocket_t (TCP) for TLSStream),
the .recv_read flag indicates whether somebody is interested in the
data read from the socket.
Usually, you would expect that the .reading should be false when
.recv_read is false, but it gets even more tricky with TLSStream as
the TLS protocol might need to read from the socket even when sending
data.
Fix the usage of the .recv_read and .reading flags in the TLSStream
to their true meaning - which mostly consist of using .recv_read
everywhere and then wrapping isc_nm_read() and isc_nm_read_stop()
with the .reading flag.
4. The TLS failed read helper has been modified to resemble the TCP code
as much as possible, clearing and re-setting the .recv_read flag in
the TCP timeout code has been fixed and .recv_read is now cleared
when isc_nm_read_stop() has been called on the streaming socket.
5. The use of Network Manager in the named_controlconf, isccc_ccmsg, and
isc_httpd units have been greatly simplified due to the improved design.
6. More unit tests for TCP and TLS testing the shutdown conditions have
been added.
Co-authored-by: Ondřej Surý <ondrej@isc.org>
Co-authored-by: Artem Boldariev <artem@isc.org>
2023-04-13 17:27:50 +02:00
|
|
|
#if ISC_NETMGR_TRACE
|
|
|
|
isc_nmhandle__attach(sock->statichandle,
|
|
|
|
&req->handle FLARG_PASS);
|
|
|
|
#else
|
2021-03-16 09:03:02 +01:00
|
|
|
isc_nmhandle_attach(sock->statichandle, &req->handle);
|
Fix the streaming read callback shutdown logic
When shutting down TCP sockets, the read callback calling logic was
flawed, it would call either one less callback or one extra. Fix the
logic in the way:
1. When isc_nm_read() has been called but isc_nm_read_stop() hasn't on
the handle, the read callback will be called with ISC_R_CANCELED to
cancel active reading from the socket/handle.
2. When isc_nm_read() has been called and isc_nm_read_stop() has been
called on the on the handle, the read callback will be called with
ISC_R_SHUTTINGDOWN to signal that the dormant (not-reading) socket
is being shut down.
3. The .reading and .recv_read flags are little bit tricky. The
.reading flag indicates if the outer layer is reading the data (that
would be uv_tcp_t for TCP and isc_nmsocket_t (TCP) for TLSStream),
the .recv_read flag indicates whether somebody is interested in the
data read from the socket.
Usually, you would expect that the .reading should be false when
.recv_read is false, but it gets even more tricky with TLSStream as
the TLS protocol might need to read from the socket even when sending
data.
Fix the usage of the .recv_read and .reading flags in the TLSStream
to their true meaning - which mostly consist of using .recv_read
everywhere and then wrapping isc_nm_read() and isc_nm_read_stop()
with the .reading flag.
4. The TLS failed read helper has been modified to resemble the TCP code
as much as possible, clearing and re-setting the .recv_read flag in
the TCP timeout code has been fixed and .recv_read is now cleared
when isc_nm_read_stop() has been called on the streaming socket.
5. The use of Network Manager in the named_controlconf, isccc_ccmsg, and
isc_httpd units have been greatly simplified due to the improved design.
6. More unit tests for TCP and TLS testing the shutdown conditions have
been added.
Co-authored-by: Ondřej Surý <ondrej@isc.org>
Co-authored-by: Artem Boldariev <artem@isc.org>
2023-04-13 17:27:50 +02:00
|
|
|
#endif
|
2021-03-29 10:52:05 +02:00
|
|
|
break;
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
Fix the streaming read callback shutdown logic
When shutting down TCP sockets, the read callback calling logic was
flawed, it would call either one less callback or one extra. Fix the
logic in the way:
1. When isc_nm_read() has been called but isc_nm_read_stop() hasn't on
the handle, the read callback will be called with ISC_R_CANCELED to
cancel active reading from the socket/handle.
2. When isc_nm_read() has been called and isc_nm_read_stop() has been
called on the on the handle, the read callback will be called with
ISC_R_SHUTTINGDOWN to signal that the dormant (not-reading) socket
is being shut down.
3. The .reading and .recv_read flags are little bit tricky. The
.reading flag indicates if the outer layer is reading the data (that
would be uv_tcp_t for TCP and isc_nmsocket_t (TCP) for TLSStream),
the .recv_read flag indicates whether somebody is interested in the
data read from the socket.
Usually, you would expect that the .reading should be false when
.recv_read is false, but it gets even more tricky with TLSStream as
the TLS protocol might need to read from the socket even when sending
data.
Fix the usage of the .recv_read and .reading flags in the TLSStream
to their true meaning - which mostly consist of using .recv_read
everywhere and then wrapping isc_nm_read() and isc_nm_read_stop()
with the .reading flag.
4. The TLS failed read helper has been modified to resemble the TCP code
as much as possible, clearing and re-setting the .recv_read flag in
the TCP timeout code has been fixed and .recv_read is now cleared
when isc_nm_read_stop() has been called on the streaming socket.
5. The use of Network Manager in the named_controlconf, isccc_ccmsg, and
isc_httpd units have been greatly simplified due to the improved design.
6. More unit tests for TCP and TLS testing the shutdown conditions have
been added.
Co-authored-by: Ondřej Surý <ondrej@isc.org>
Co-authored-by: Artem Boldariev <artem@isc.org>
2023-04-13 17:27:50 +02:00
|
|
|
#if ISC_NETMGR_TRACE
|
|
|
|
isc_nmhandle__attach(sock->recv_handle,
|
|
|
|
&req->handle FLARG_PASS);
|
|
|
|
#else
|
2022-06-20 20:30:12 +03:00
|
|
|
isc_nmhandle_attach(sock->recv_handle, &req->handle);
|
Fix the streaming read callback shutdown logic
When shutting down TCP sockets, the read callback calling logic was
flawed, it would call either one less callback or one extra. Fix the
logic in the way:
1. When isc_nm_read() has been called but isc_nm_read_stop() hasn't on
the handle, the read callback will be called with ISC_R_CANCELED to
cancel active reading from the socket/handle.
2. When isc_nm_read() has been called and isc_nm_read_stop() has been
called on the on the handle, the read callback will be called with
ISC_R_SHUTTINGDOWN to signal that the dormant (not-reading) socket
is being shut down.
3. The .reading and .recv_read flags are little bit tricky. The
.reading flag indicates if the outer layer is reading the data (that
would be uv_tcp_t for TCP and isc_nmsocket_t (TCP) for TLSStream),
the .recv_read flag indicates whether somebody is interested in the
data read from the socket.
Usually, you would expect that the .reading should be false when
.recv_read is false, but it gets even more tricky with TLSStream as
the TLS protocol might need to read from the socket even when sending
data.
Fix the usage of the .recv_read and .reading flags in the TLSStream
to their true meaning - which mostly consist of using .recv_read
everywhere and then wrapping isc_nm_read() and isc_nm_read_stop()
with the .reading flag.
4. The TLS failed read helper has been modified to resemble the TCP code
as much as possible, clearing and re-setting the .recv_read flag in
the TCP timeout code has been fixed and .recv_read is now cleared
when isc_nm_read_stop() has been called on the streaming socket.
5. The use of Network Manager in the named_controlconf, isccc_ccmsg, and
isc_httpd units have been greatly simplified due to the improved design.
6. More unit tests for TCP and TLS testing the shutdown conditions have
been added.
Co-authored-by: Ondřej Surý <ondrej@isc.org>
Co-authored-by: Artem Boldariev <artem@isc.org>
2023-04-13 17:27:50 +02:00
|
|
|
#endif
|
2022-06-20 20:30:12 +03:00
|
|
|
break;
|
2021-03-29 10:52:05 +02:00
|
|
|
default:
|
2023-03-24 13:37:19 +01:00
|
|
|
if (sock->client && sock->statichandle != NULL) {
|
Fix the streaming read callback shutdown logic
When shutting down TCP sockets, the read callback calling logic was
flawed, it would call either one less callback or one extra. Fix the
logic in the way:
1. When isc_nm_read() has been called but isc_nm_read_stop() hasn't on
the handle, the read callback will be called with ISC_R_CANCELED to
cancel active reading from the socket/handle.
2. When isc_nm_read() has been called and isc_nm_read_stop() has been
called on the on the handle, the read callback will be called with
ISC_R_SHUTTINGDOWN to signal that the dormant (not-reading) socket
is being shut down.
3. The .reading and .recv_read flags are little bit tricky. The
.reading flag indicates if the outer layer is reading the data (that
would be uv_tcp_t for TCP and isc_nmsocket_t (TCP) for TLSStream),
the .recv_read flag indicates whether somebody is interested in the
data read from the socket.
Usually, you would expect that the .reading should be false when
.recv_read is false, but it gets even more tricky with TLSStream as
the TLS protocol might need to read from the socket even when sending
data.
Fix the usage of the .recv_read and .reading flags in the TLSStream
to their true meaning - which mostly consist of using .recv_read
everywhere and then wrapping isc_nm_read() and isc_nm_read_stop()
with the .reading flag.
4. The TLS failed read helper has been modified to resemble the TCP code
as much as possible, clearing and re-setting the .recv_read flag in
the TCP timeout code has been fixed and .recv_read is now cleared
when isc_nm_read_stop() has been called on the streaming socket.
5. The use of Network Manager in the named_controlconf, isccc_ccmsg, and
isc_httpd units have been greatly simplified due to the improved design.
6. More unit tests for TCP and TLS testing the shutdown conditions have
been added.
Co-authored-by: Ondřej Surý <ondrej@isc.org>
Co-authored-by: Artem Boldariev <artem@isc.org>
2023-04-13 17:27:50 +02:00
|
|
|
#if ISC_NETMGR_TRACE
|
|
|
|
isc_nmhandle__attach(sock->statichandle,
|
|
|
|
&req->handle FLARG_PASS);
|
|
|
|
#else
|
2021-03-29 10:52:05 +02:00
|
|
|
isc_nmhandle_attach(sock->statichandle, &req->handle);
|
Fix the streaming read callback shutdown logic
When shutting down TCP sockets, the read callback calling logic was
flawed, it would call either one less callback or one extra. Fix the
logic in the way:
1. When isc_nm_read() has been called but isc_nm_read_stop() hasn't on
the handle, the read callback will be called with ISC_R_CANCELED to
cancel active reading from the socket/handle.
2. When isc_nm_read() has been called and isc_nm_read_stop() has been
called on the on the handle, the read callback will be called with
ISC_R_SHUTTINGDOWN to signal that the dormant (not-reading) socket
is being shut down.
3. The .reading and .recv_read flags are little bit tricky. The
.reading flag indicates if the outer layer is reading the data (that
would be uv_tcp_t for TCP and isc_nmsocket_t (TCP) for TLSStream),
the .recv_read flag indicates whether somebody is interested in the
data read from the socket.
Usually, you would expect that the .reading should be false when
.recv_read is false, but it gets even more tricky with TLSStream as
the TLS protocol might need to read from the socket even when sending
data.
Fix the usage of the .recv_read and .reading flags in the TLSStream
to their true meaning - which mostly consist of using .recv_read
everywhere and then wrapping isc_nm_read() and isc_nm_read_stop()
with the .reading flag.
4. The TLS failed read helper has been modified to resemble the TCP code
as much as possible, clearing and re-setting the .recv_read flag in
the TCP timeout code has been fixed and .recv_read is now cleared
when isc_nm_read_stop() has been called on the streaming socket.
5. The use of Network Manager in the named_controlconf, isccc_ccmsg, and
isc_httpd units have been greatly simplified due to the improved design.
6. More unit tests for TCP and TLS testing the shutdown conditions have
been added.
Co-authored-by: Ondřej Surý <ondrej@isc.org>
Co-authored-by: Artem Boldariev <artem@isc.org>
2023-04-13 17:27:50 +02:00
|
|
|
#endif
|
2021-03-29 10:52:05 +02:00
|
|
|
} else {
|
Fix the streaming read callback shutdown logic
When shutting down TCP sockets, the read callback calling logic was
flawed, it would call either one less callback or one extra. Fix the
logic in the way:
1. When isc_nm_read() has been called but isc_nm_read_stop() hasn't on
the handle, the read callback will be called with ISC_R_CANCELED to
cancel active reading from the socket/handle.
2. When isc_nm_read() has been called and isc_nm_read_stop() has been
called on the on the handle, the read callback will be called with
ISC_R_SHUTTINGDOWN to signal that the dormant (not-reading) socket
is being shut down.
3. The .reading and .recv_read flags are little bit tricky. The
.reading flag indicates if the outer layer is reading the data (that
would be uv_tcp_t for TCP and isc_nmsocket_t (TCP) for TLSStream),
the .recv_read flag indicates whether somebody is interested in the
data read from the socket.
Usually, you would expect that the .reading should be false when
.recv_read is false, but it gets even more tricky with TLSStream as
the TLS protocol might need to read from the socket even when sending
data.
Fix the usage of the .recv_read and .reading flags in the TLSStream
to their true meaning - which mostly consist of using .recv_read
everywhere and then wrapping isc_nm_read() and isc_nm_read_stop()
with the .reading flag.
4. The TLS failed read helper has been modified to resemble the TCP code
as much as possible, clearing and re-setting the .recv_read flag in
the TCP timeout code has been fixed and .recv_read is now cleared
when isc_nm_read_stop() has been called on the streaming socket.
5. The use of Network Manager in the named_controlconf, isccc_ccmsg, and
isc_httpd units have been greatly simplified due to the improved design.
6. More unit tests for TCP and TLS testing the shutdown conditions have
been added.
Co-authored-by: Ondřej Surý <ondrej@isc.org>
Co-authored-by: Artem Boldariev <artem@isc.org>
2023-04-13 17:27:50 +02:00
|
|
|
req->handle = isc___nmhandle_get(sock, sockaddr,
|
|
|
|
NULL FLARG_PASS);
|
2021-03-29 10:52:05 +02:00
|
|
|
}
|
|
|
|
break;
|
2021-03-16 09:03:02 +01:00
|
|
|
}
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return req;
|
2021-03-16 09:03:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*%<
|
Fix the UDP recvmmsg support
Previously, the netmgr/udp.c tried to detect the recvmmsg detection in
libuv with #ifdef UV_UDP_<foo> preprocessor macros. However, because
the UV_UDP_<foo> are not preprocessor macros, but enum members, the
detection didn't work. Because the detection didn't work, the code
didn't have access to the information when we received the final chunk
of the recvmmsg and tried to free the uvbuf every time. Fortunately,
the isc__nm_free_uvbuf() had a kludge that detected attempt to free in
the middle of the receive buffer, so the code worked.
However, libuv 1.37.0 changed the way the recvmmsg was enabled from
implicit to explicit, and we checked for yet another enum member
presence with preprocessor macro, so in fact libuv recvmmsg support was
never enabled with libuv >= 1.37.0.
This commit changes to the preprocessor macros to autoconf checks for
declaration, so the detection now works again. On top of that, it's now
possible to cleanup the alloc_cb and free_uvbuf functions because now,
the information whether we can or cannot free the buffer is available to
us.
2022-01-11 12:14:23 +01:00
|
|
|
* Allocator callback for read operations.
|
2021-03-16 09:03:02 +01:00
|
|
|
*
|
|
|
|
* Note this doesn't actually allocate anything, it just assigns the
|
|
|
|
* worker's receive buffer to a socket, and marks it as "in use".
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
isc__nm_alloc_cb(uv_handle_t *handle, size_t size, uv_buf_t *buf) {
|
|
|
|
isc_nmsocket_t *sock = uv_handle_get_data(handle);
|
|
|
|
isc__networker_t *worker = NULL;
|
|
|
|
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
Fix the UDP recvmmsg support
Previously, the netmgr/udp.c tried to detect the recvmmsg detection in
libuv with #ifdef UV_UDP_<foo> preprocessor macros. However, because
the UV_UDP_<foo> are not preprocessor macros, but enum members, the
detection didn't work. Because the detection didn't work, the code
didn't have access to the information when we received the final chunk
of the recvmmsg and tried to free the uvbuf every time. Fortunately,
the isc__nm_free_uvbuf() had a kludge that detected attempt to free in
the middle of the receive buffer, so the code worked.
However, libuv 1.37.0 changed the way the recvmmsg was enabled from
implicit to explicit, and we checked for yet another enum member
presence with preprocessor macro, so in fact libuv recvmmsg support was
never enabled with libuv >= 1.37.0.
This commit changes to the preprocessor macros to autoconf checks for
declaration, so the detection now works again. On top of that, it's now
possible to cleanup the alloc_cb and free_uvbuf functions because now,
the information whether we can or cannot free the buffer is available to
us.
2022-01-11 12:14:23 +01:00
|
|
|
/*
|
|
|
|
* The size provided by libuv is only suggested size, and it always
|
|
|
|
* defaults to 64 * 1024 in the current versions of libuv (see
|
|
|
|
* src/unix/udp.c and src/unix/stream.c).
|
|
|
|
*/
|
|
|
|
UNUSED(size);
|
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
worker = sock->worker;
|
Fix the UDP recvmmsg support
Previously, the netmgr/udp.c tried to detect the recvmmsg detection in
libuv with #ifdef UV_UDP_<foo> preprocessor macros. However, because
the UV_UDP_<foo> are not preprocessor macros, but enum members, the
detection didn't work. Because the detection didn't work, the code
didn't have access to the information when we received the final chunk
of the recvmmsg and tried to free the uvbuf every time. Fortunately,
the isc__nm_free_uvbuf() had a kludge that detected attempt to free in
the middle of the receive buffer, so the code worked.
However, libuv 1.37.0 changed the way the recvmmsg was enabled from
implicit to explicit, and we checked for yet another enum member
presence with preprocessor macro, so in fact libuv recvmmsg support was
never enabled with libuv >= 1.37.0.
This commit changes to the preprocessor macros to autoconf checks for
declaration, so the detection now works again. On top of that, it's now
possible to cleanup the alloc_cb and free_uvbuf functions because now,
the information whether we can or cannot free the buffer is available to
us.
2022-01-11 12:14:23 +01:00
|
|
|
INSIST(!worker->recvbuf_inuse);
|
|
|
|
INSIST(worker->recvbuf != NULL);
|
2021-03-16 09:03:02 +01:00
|
|
|
|
|
|
|
switch (sock->type) {
|
2020-11-02 19:58:05 -08:00
|
|
|
case isc_nm_udpsocket:
|
Fix the UDP recvmmsg support
Previously, the netmgr/udp.c tried to detect the recvmmsg detection in
libuv with #ifdef UV_UDP_<foo> preprocessor macros. However, because
the UV_UDP_<foo> are not preprocessor macros, but enum members, the
detection didn't work. Because the detection didn't work, the code
didn't have access to the information when we received the final chunk
of the recvmmsg and tried to free the uvbuf every time. Fortunately,
the isc__nm_free_uvbuf() had a kludge that detected attempt to free in
the middle of the receive buffer, so the code worked.
However, libuv 1.37.0 changed the way the recvmmsg was enabled from
implicit to explicit, and we checked for yet another enum member
presence with preprocessor macro, so in fact libuv recvmmsg support was
never enabled with libuv >= 1.37.0.
This commit changes to the preprocessor macros to autoconf checks for
declaration, so the detection now works again. On top of that, it's now
possible to cleanup the alloc_cb and free_uvbuf functions because now,
the information whether we can or cannot free the buffer is available to
us.
2022-01-11 12:14:23 +01:00
|
|
|
buf->len = ISC_NETMGR_UDP_RECVBUF_SIZE;
|
2020-11-02 19:58:05 -08:00
|
|
|
break;
|
2021-03-31 12:14:54 +02:00
|
|
|
case isc_nm_tcpsocket:
|
Fix the UDP recvmmsg support
Previously, the netmgr/udp.c tried to detect the recvmmsg detection in
libuv with #ifdef UV_UDP_<foo> preprocessor macros. However, because
the UV_UDP_<foo> are not preprocessor macros, but enum members, the
detection didn't work. Because the detection didn't work, the code
didn't have access to the information when we received the final chunk
of the recvmmsg and tried to free the uvbuf every time. Fortunately,
the isc__nm_free_uvbuf() had a kludge that detected attempt to free in
the middle of the receive buffer, so the code worked.
However, libuv 1.37.0 changed the way the recvmmsg was enabled from
implicit to explicit, and we checked for yet another enum member
presence with preprocessor macro, so in fact libuv recvmmsg support was
never enabled with libuv >= 1.37.0.
This commit changes to the preprocessor macros to autoconf checks for
declaration, so the detection now works again. On top of that, it's now
possible to cleanup the alloc_cb and free_uvbuf functions because now,
the information whether we can or cannot free the buffer is available to
us.
2022-01-11 12:14:23 +01:00
|
|
|
buf->len = ISC_NETMGR_TCP_RECVBUF_SIZE;
|
2021-03-16 09:03:02 +01:00
|
|
|
break;
|
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2021-03-16 09:03:02 +01:00
|
|
|
}
|
|
|
|
|
Fix the UDP recvmmsg support
Previously, the netmgr/udp.c tried to detect the recvmmsg detection in
libuv with #ifdef UV_UDP_<foo> preprocessor macros. However, because
the UV_UDP_<foo> are not preprocessor macros, but enum members, the
detection didn't work. Because the detection didn't work, the code
didn't have access to the information when we received the final chunk
of the recvmmsg and tried to free the uvbuf every time. Fortunately,
the isc__nm_free_uvbuf() had a kludge that detected attempt to free in
the middle of the receive buffer, so the code worked.
However, libuv 1.37.0 changed the way the recvmmsg was enabled from
implicit to explicit, and we checked for yet another enum member
presence with preprocessor macro, so in fact libuv recvmmsg support was
never enabled with libuv >= 1.37.0.
This commit changes to the preprocessor macros to autoconf checks for
declaration, so the detection now works again. On top of that, it's now
possible to cleanup the alloc_cb and free_uvbuf functions because now,
the information whether we can or cannot free the buffer is available to
us.
2022-01-11 12:14:23 +01:00
|
|
|
REQUIRE(buf->len <= ISC_NETMGR_RECVBUF_SIZE);
|
2021-03-16 09:03:02 +01:00
|
|
|
buf->base = worker->recvbuf;
|
Fix the UDP recvmmsg support
Previously, the netmgr/udp.c tried to detect the recvmmsg detection in
libuv with #ifdef UV_UDP_<foo> preprocessor macros. However, because
the UV_UDP_<foo> are not preprocessor macros, but enum members, the
detection didn't work. Because the detection didn't work, the code
didn't have access to the information when we received the final chunk
of the recvmmsg and tried to free the uvbuf every time. Fortunately,
the isc__nm_free_uvbuf() had a kludge that detected attempt to free in
the middle of the receive buffer, so the code worked.
However, libuv 1.37.0 changed the way the recvmmsg was enabled from
implicit to explicit, and we checked for yet another enum member
presence with preprocessor macro, so in fact libuv recvmmsg support was
never enabled with libuv >= 1.37.0.
This commit changes to the preprocessor macros to autoconf checks for
declaration, so the detection now works again. On top of that, it's now
possible to cleanup the alloc_cb and free_uvbuf functions because now,
the information whether we can or cannot free the buffer is available to
us.
2022-01-11 12:14:23 +01:00
|
|
|
|
2021-03-16 09:03:02 +01:00
|
|
|
worker->recvbuf_inuse = true;
|
|
|
|
}
|
|
|
|
|
2022-06-14 09:17:08 +02:00
|
|
|
isc_result_t
|
2021-03-16 09:03:02 +01:00
|
|
|
isc__nm_start_reading(isc_nmsocket_t *sock) {
|
2022-06-14 09:17:08 +02:00
|
|
|
isc_result_t result = ISC_R_SUCCESS;
|
2021-03-16 09:03:02 +01:00
|
|
|
int r;
|
|
|
|
|
Fix the streaming read callback shutdown logic
When shutting down TCP sockets, the read callback calling logic was
flawed, it would call either one less callback or one extra. Fix the
logic in the way:
1. When isc_nm_read() has been called but isc_nm_read_stop() hasn't on
the handle, the read callback will be called with ISC_R_CANCELED to
cancel active reading from the socket/handle.
2. When isc_nm_read() has been called and isc_nm_read_stop() has been
called on the on the handle, the read callback will be called with
ISC_R_SHUTTINGDOWN to signal that the dormant (not-reading) socket
is being shut down.
3. The .reading and .recv_read flags are little bit tricky. The
.reading flag indicates if the outer layer is reading the data (that
would be uv_tcp_t for TCP and isc_nmsocket_t (TCP) for TLSStream),
the .recv_read flag indicates whether somebody is interested in the
data read from the socket.
Usually, you would expect that the .reading should be false when
.recv_read is false, but it gets even more tricky with TLSStream as
the TLS protocol might need to read from the socket even when sending
data.
Fix the usage of the .recv_read and .reading flags in the TLSStream
to their true meaning - which mostly consist of using .recv_read
everywhere and then wrapping isc_nm_read() and isc_nm_read_stop()
with the .reading flag.
4. The TLS failed read helper has been modified to resemble the TCP code
as much as possible, clearing and re-setting the .recv_read flag in
the TCP timeout code has been fixed and .recv_read is now cleared
when isc_nm_read_stop() has been called on the streaming socket.
5. The use of Network Manager in the named_controlconf, isccc_ccmsg, and
isc_httpd units have been greatly simplified due to the improved design.
6. More unit tests for TCP and TLS testing the shutdown conditions have
been added.
Co-authored-by: Ondřej Surý <ondrej@isc.org>
Co-authored-by: Artem Boldariev <artem@isc.org>
2023-04-13 17:27:50 +02:00
|
|
|
if (uv_is_active(&sock->uv_handle.handle)) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return ISC_R_SUCCESS;
|
2021-03-16 09:03:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_udpsocket:
|
|
|
|
r = uv_udp_recv_start(&sock->uv_handle.udp, isc__nm_alloc_cb,
|
|
|
|
isc__nm_udp_read_cb);
|
2020-11-02 19:58:05 -08:00
|
|
|
break;
|
2021-03-31 12:14:54 +02:00
|
|
|
case isc_nm_tcpsocket:
|
|
|
|
r = uv_read_start(&sock->uv_handle.stream, isc__nm_alloc_cb,
|
|
|
|
isc__nm_tcp_read_cb);
|
|
|
|
break;
|
2021-03-16 09:03:02 +01:00
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2021-03-16 09:03:02 +01:00
|
|
|
}
|
2022-06-14 09:17:08 +02:00
|
|
|
if (r != 0) {
|
|
|
|
result = isc_uverr2result(r);
|
|
|
|
}
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return result;
|
2021-03-16 09:03:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_stop_reading(isc_nmsocket_t *sock) {
|
|
|
|
int r;
|
|
|
|
|
Fix the streaming read callback shutdown logic
When shutting down TCP sockets, the read callback calling logic was
flawed, it would call either one less callback or one extra. Fix the
logic in the way:
1. When isc_nm_read() has been called but isc_nm_read_stop() hasn't on
the handle, the read callback will be called with ISC_R_CANCELED to
cancel active reading from the socket/handle.
2. When isc_nm_read() has been called and isc_nm_read_stop() has been
called on the on the handle, the read callback will be called with
ISC_R_SHUTTINGDOWN to signal that the dormant (not-reading) socket
is being shut down.
3. The .reading and .recv_read flags are little bit tricky. The
.reading flag indicates if the outer layer is reading the data (that
would be uv_tcp_t for TCP and isc_nmsocket_t (TCP) for TLSStream),
the .recv_read flag indicates whether somebody is interested in the
data read from the socket.
Usually, you would expect that the .reading should be false when
.recv_read is false, but it gets even more tricky with TLSStream as
the TLS protocol might need to read from the socket even when sending
data.
Fix the usage of the .recv_read and .reading flags in the TLSStream
to their true meaning - which mostly consist of using .recv_read
everywhere and then wrapping isc_nm_read() and isc_nm_read_stop()
with the .reading flag.
4. The TLS failed read helper has been modified to resemble the TCP code
as much as possible, clearing and re-setting the .recv_read flag in
the TCP timeout code has been fixed and .recv_read is now cleared
when isc_nm_read_stop() has been called on the streaming socket.
5. The use of Network Manager in the named_controlconf, isccc_ccmsg, and
isc_httpd units have been greatly simplified due to the improved design.
6. More unit tests for TCP and TLS testing the shutdown conditions have
been added.
Co-authored-by: Ondřej Surý <ondrej@isc.org>
Co-authored-by: Artem Boldariev <artem@isc.org>
2023-04-13 17:27:50 +02:00
|
|
|
if (!uv_is_active(&sock->uv_handle.handle)) {
|
2021-03-16 09:03:02 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_udpsocket:
|
|
|
|
r = uv_udp_recv_stop(&sock->uv_handle.udp);
|
2022-02-15 14:51:02 +01:00
|
|
|
UV_RUNTIME_CHECK(uv_udp_recv_stop, r);
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
break;
|
2021-03-31 12:14:54 +02:00
|
|
|
case isc_nm_tcpsocket:
|
2021-03-16 09:03:02 +01:00
|
|
|
r = uv_read_stop(&sock->uv_handle.stream);
|
2022-02-15 14:51:02 +01:00
|
|
|
UV_RUNTIME_CHECK(uv_read_stop, r);
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
break;
|
2020-11-02 19:58:05 -08:00
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2020-11-02 19:58:05 -08:00
|
|
|
}
|
2021-03-16 09:03:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2022-07-26 13:03:45 +02:00
|
|
|
isc__nm_closing(isc__networker_t *worker) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return worker->shuttingdown;
|
2021-03-31 11:48:41 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
isc__nmsocket_closing(isc_nmsocket_t *sock) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return !sock->active || sock->closing ||
|
|
|
|
isc__nm_closing(sock->worker) ||
|
|
|
|
(sock->server != NULL && !isc__nmsocket_active(sock->server));
|
2021-03-16 09:03:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc_nmhandle_cleartimeout(isc_nmhandle_t *handle) {
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
|
|
|
|
|
|
|
switch (handle->sock->type) {
|
2021-04-21 13:52:15 +02:00
|
|
|
#if HAVE_LIBNGHTTP2
|
2021-03-16 09:03:02 +01:00
|
|
|
case isc_nm_httpsocket:
|
|
|
|
isc__nm_http_cleartimeout(handle);
|
|
|
|
return;
|
2022-10-18 15:36:00 +03:00
|
|
|
#endif
|
2021-03-16 09:03:02 +01:00
|
|
|
case isc_nm_tlssocket:
|
|
|
|
isc__nm_tls_cleartimeout(handle);
|
|
|
|
return;
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
|
|
|
isc__nmhandle_streamdns_cleartimeout(handle);
|
|
|
|
return;
|
2023-03-16 12:50:04 +02:00
|
|
|
case isc_nm_proxystreamsocket:
|
|
|
|
isc__nmhandle_proxystream_cleartimeout(handle);
|
|
|
|
return;
|
2023-07-12 15:25:38 +03:00
|
|
|
case isc_nm_proxyudpsocket:
|
|
|
|
isc__nmhandle_proxyudp_cleartimeout(handle);
|
|
|
|
return;
|
2021-03-16 09:03:02 +01:00
|
|
|
default:
|
|
|
|
handle->sock->read_timeout = 0;
|
|
|
|
|
2022-02-09 10:59:08 +01:00
|
|
|
if (uv_is_active((uv_handle_t *)&handle->sock->read_timer)) {
|
2021-03-16 09:03:02 +01:00
|
|
|
isc__nmsocket_timer_stop(handle->sock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc_nmhandle_settimeout(isc_nmhandle_t *handle, uint32_t timeout) {
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
|
|
|
|
|
|
|
switch (handle->sock->type) {
|
2021-04-21 13:52:15 +02:00
|
|
|
#if HAVE_LIBNGHTTP2
|
2021-03-16 09:03:02 +01:00
|
|
|
case isc_nm_httpsocket:
|
|
|
|
isc__nm_http_settimeout(handle, timeout);
|
|
|
|
return;
|
2022-10-18 15:36:00 +03:00
|
|
|
#endif
|
2021-03-16 09:03:02 +01:00
|
|
|
case isc_nm_tlssocket:
|
|
|
|
isc__nm_tls_settimeout(handle, timeout);
|
|
|
|
return;
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
|
|
|
isc__nmhandle_streamdns_settimeout(handle, timeout);
|
|
|
|
return;
|
2023-03-16 12:50:04 +02:00
|
|
|
case isc_nm_proxystreamsocket:
|
|
|
|
isc__nmhandle_proxystream_settimeout(handle, timeout);
|
|
|
|
return;
|
2023-07-12 15:25:38 +03:00
|
|
|
case isc_nm_proxyudpsocket:
|
|
|
|
isc__nmhandle_proxyudp_settimeout(handle, timeout);
|
|
|
|
return;
|
2021-03-16 09:03:02 +01:00
|
|
|
default:
|
|
|
|
handle->sock->read_timeout = timeout;
|
2021-03-29 10:52:05 +02:00
|
|
|
isc__nmsocket_timer_restart(handle->sock);
|
2021-03-16 09:03:02 +01:00
|
|
|
}
|
2020-11-02 19:58:05 -08:00
|
|
|
}
|
|
|
|
|
2020-11-02 18:33:20 -08:00
|
|
|
void
|
|
|
|
isc_nmhandle_keepalive(isc_nmhandle_t *handle, bool value) {
|
2021-07-14 21:12:37 -07:00
|
|
|
isc_nmsocket_t *sock = NULL;
|
2022-07-26 13:03:45 +02:00
|
|
|
isc_nm_t *netmgr = NULL;
|
2021-07-14 21:12:37 -07:00
|
|
|
|
2020-11-02 18:33:20 -08:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
2021-07-14 21:12:37 -07:00
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
2020-11-02 18:33:20 -08:00
|
|
|
|
2021-07-14 21:12:37 -07:00
|
|
|
sock = handle->sock;
|
2022-07-26 13:03:45 +02:00
|
|
|
netmgr = sock->worker->netmgr;
|
2021-07-14 21:12:37 -07:00
|
|
|
|
2023-03-24 13:37:19 +01:00
|
|
|
REQUIRE(sock->tid == isc_tid());
|
|
|
|
|
2021-07-14 21:12:37 -07:00
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_tcpsocket:
|
2023-03-24 13:37:19 +01:00
|
|
|
sock->keepalive = value;
|
|
|
|
sock->read_timeout =
|
|
|
|
value ? atomic_load_relaxed(&netmgr->keepalive)
|
|
|
|
: atomic_load_relaxed(&netmgr->idle);
|
|
|
|
sock->write_timeout =
|
|
|
|
value ? atomic_load_relaxed(&netmgr->keepalive)
|
|
|
|
: atomic_load_relaxed(&netmgr->idle);
|
2020-11-02 18:33:20 -08:00
|
|
|
break;
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
|
|
|
isc__nmhandle_streamdns_keepalive(handle, value);
|
|
|
|
break;
|
2021-07-14 21:12:37 -07:00
|
|
|
case isc_nm_tlssocket:
|
|
|
|
isc__nmhandle_tls_keepalive(handle, value);
|
|
|
|
break;
|
2022-10-18 15:36:00 +03:00
|
|
|
#if HAVE_LIBNGHTTP2
|
2021-07-14 21:12:37 -07:00
|
|
|
case isc_nm_httpsocket:
|
|
|
|
isc__nmhandle_http_keepalive(handle, value);
|
|
|
|
break;
|
|
|
|
#endif /* HAVE_LIBNGHTTP2 */
|
2023-03-16 12:50:04 +02:00
|
|
|
case isc_nm_proxystreamsocket:
|
|
|
|
isc__nmhandle_proxystream_keepalive(handle, value);
|
|
|
|
break;
|
2020-11-02 18:33:20 -08:00
|
|
|
default:
|
2021-07-14 21:12:37 -07:00
|
|
|
/*
|
|
|
|
* For any other protocol, this is a no-op.
|
|
|
|
*/
|
2020-11-02 18:33:20 -08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-22 18:58:01 -07:00
|
|
|
bool
|
|
|
|
isc_nmhandle_timer_running(isc_nmhandle_t *handle) {
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return isc__nmsocket_timer_running(handle->sock);
|
2021-04-22 18:58:01 -07:00
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_sockaddr_t
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nmhandle_peeraddr(isc_nmhandle_t *handle) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return handle->peer;
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
isc_sockaddr_t
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nmhandle_localaddr(isc_nmhandle_t *handle) {
|
2024-12-29 12:32:05 +01:00
|
|
|
isc_sockaddr_t addr;
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
2024-12-29 12:32:05 +01:00
|
|
|
addr = handle->local;
|
|
|
|
|
|
|
|
#ifdef ISC_SOCKET_DETAILS
|
|
|
|
switch (handle->sock->type) {
|
|
|
|
case isc_nm_tcpsocket:
|
|
|
|
uv_tcp_getsockname(&handle->sock->uv_handle.tcp,
|
|
|
|
(struct sockaddr *)&addr.type,
|
|
|
|
&(int){ sizeof(addr.type) });
|
|
|
|
break;
|
|
|
|
|
|
|
|
case isc_nm_udpsocket:
|
|
|
|
uv_udp_getsockname(&handle->sock->uv_handle.udp,
|
|
|
|
(struct sockaddr *)&addr.type,
|
|
|
|
&(int){ sizeof(addr.type) });
|
|
|
|
break;
|
|
|
|
|
|
|
|
case isc_nm_tlssocket:
|
|
|
|
case isc_nm_httpsocket:
|
|
|
|
case isc_nm_streamdnssocket:
|
|
|
|
if (handle->sock->outerhandle) {
|
|
|
|
addr = isc_nmhandle_localaddr(
|
|
|
|
handle->sock->outerhandle);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case isc_nm_proxystreamsocket:
|
|
|
|
case isc_nm_proxyudpsocket:
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
#endif /* ISC_SOCKET_DETAILS */
|
|
|
|
|
|
|
|
return addr;
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2019-11-20 22:33:35 +01:00
|
|
|
isc_nm_t *
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nmhandle_netmgr(isc_nmhandle_t *handle) {
|
2019-11-20 22:33:35 +01:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return handle->sock->worker->netmgr;
|
2019-11-20 22:33:35 +01:00
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
isc__nm_uvreq_t *
|
2023-03-24 12:11:44 +01:00
|
|
|
isc___nm_uvreq_get(isc_nmsocket_t *sock FLARG) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
2023-01-04 15:57:00 +01:00
|
|
|
REQUIRE(sock->tid == isc_tid());
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2023-03-24 12:11:44 +01:00
|
|
|
isc__networker_t *worker = sock->worker;
|
|
|
|
|
|
|
|
isc__nm_uvreq_t *req = isc_mempool_get(worker->uvreq_pool);
|
2022-07-13 09:34:47 +02:00
|
|
|
*req = (isc__nm_uvreq_t){
|
|
|
|
.connect_tries = 3,
|
2023-01-04 15:57:00 +01:00
|
|
|
.link = ISC_LINK_INITIALIZER,
|
2023-03-24 12:11:44 +01:00
|
|
|
.active_link = ISC_LINK_INITIALIZER,
|
2023-01-04 15:57:00 +01:00
|
|
|
.magic = UVREQ_MAGIC,
|
2022-07-13 09:34:47 +02:00
|
|
|
};
|
2023-03-24 12:11:44 +01:00
|
|
|
uv_handle_set_data(&req->uv_req.handle, req);
|
|
|
|
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
isc___nmsocket_attach(sock, &req->sock FLARG_PASS);
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2023-03-24 12:11:44 +01:00
|
|
|
ISC_LIST_APPEND(sock->active_uvreqs, req, active_link);
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return req;
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2023-03-24 12:11:44 +01:00
|
|
|
void
|
|
|
|
isc___nm_uvreq_put(isc__nm_uvreq_t **reqp FLARG) {
|
|
|
|
REQUIRE(reqp != NULL && VALID_UVREQ(*reqp));
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2023-03-24 12:11:44 +01:00
|
|
|
isc__nm_uvreq_t *req = *reqp;
|
|
|
|
isc_nmhandle_t *handle = req->handle;
|
|
|
|
isc_nmsocket_t *sock = req->sock;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2023-03-24 12:11:44 +01:00
|
|
|
*reqp = NULL;
|
|
|
|
req->handle = NULL;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2023-03-24 12:11:44 +01:00
|
|
|
REQUIRE(VALID_UVREQ(req));
|
|
|
|
|
|
|
|
ISC_LIST_UNLINK(sock->active_uvreqs, req, active_link);
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
if (handle != NULL) {
|
Fix the streaming read callback shutdown logic
When shutting down TCP sockets, the read callback calling logic was
flawed, it would call either one less callback or one extra. Fix the
logic in the way:
1. When isc_nm_read() has been called but isc_nm_read_stop() hasn't on
the handle, the read callback will be called with ISC_R_CANCELED to
cancel active reading from the socket/handle.
2. When isc_nm_read() has been called and isc_nm_read_stop() has been
called on the on the handle, the read callback will be called with
ISC_R_SHUTTINGDOWN to signal that the dormant (not-reading) socket
is being shut down.
3. The .reading and .recv_read flags are little bit tricky. The
.reading flag indicates if the outer layer is reading the data (that
would be uv_tcp_t for TCP and isc_nmsocket_t (TCP) for TLSStream),
the .recv_read flag indicates whether somebody is interested in the
data read from the socket.
Usually, you would expect that the .reading should be false when
.recv_read is false, but it gets even more tricky with TLSStream as
the TLS protocol might need to read from the socket even when sending
data.
Fix the usage of the .recv_read and .reading flags in the TLSStream
to their true meaning - which mostly consist of using .recv_read
everywhere and then wrapping isc_nm_read() and isc_nm_read_stop()
with the .reading flag.
4. The TLS failed read helper has been modified to resemble the TCP code
as much as possible, clearing and re-setting the .recv_read flag in
the TCP timeout code has been fixed and .recv_read is now cleared
when isc_nm_read_stop() has been called on the streaming socket.
5. The use of Network Manager in the named_controlconf, isccc_ccmsg, and
isc_httpd units have been greatly simplified due to the improved design.
6. More unit tests for TCP and TLS testing the shutdown conditions have
been added.
Co-authored-by: Ondřej Surý <ondrej@isc.org>
Co-authored-by: Artem Boldariev <artem@isc.org>
2023-04-13 17:27:50 +02:00
|
|
|
#if ISC_NETMGR_TRACE
|
|
|
|
isc_nmhandle__detach(&handle, func, file, line);
|
|
|
|
#else
|
|
|
|
isc_nmhandle_detach(&handle);
|
|
|
|
#endif
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2023-03-27 22:40:57 +02:00
|
|
|
isc_mempool_put(sock->worker->uvreq_pool, req);
|
|
|
|
|
|
|
|
isc___nmsocket_detach(&sock FLARG_PASS);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2020-10-21 12:52:09 +02:00
|
|
|
void
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_nm_send(isc_nmhandle_t *handle, isc_region_t *region, isc_nm_cb_t cb,
|
2020-02-13 14:44:37 -08:00
|
|
|
void *cbarg) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
|
|
|
switch (handle->sock->type) {
|
|
|
|
case isc_nm_udpsocket:
|
|
|
|
case isc_nm_udplistener:
|
2020-10-21 12:52:09 +02:00
|
|
|
isc__nm_udp_send(handle, region, cb, cbarg);
|
|
|
|
break;
|
2019-11-05 13:55:54 -08:00
|
|
|
case isc_nm_tcpsocket:
|
2020-10-21 12:52:09 +02:00
|
|
|
isc__nm_tcp_send(handle, region, cb, cbarg);
|
|
|
|
break;
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
|
|
|
isc__nm_streamdns_send(handle, region, cb, cbarg);
|
|
|
|
break;
|
2021-04-21 13:52:15 +02:00
|
|
|
case isc_nm_tlssocket:
|
|
|
|
isc__nm_tls_send(handle, region, cb, cbarg);
|
|
|
|
break;
|
2022-10-18 15:36:00 +03:00
|
|
|
#if HAVE_LIBNGHTTP2
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
case isc_nm_httpsocket:
|
2020-10-31 20:42:18 +01:00
|
|
|
isc__nm_http_send(handle, region, cb, cbarg);
|
|
|
|
break;
|
2021-04-21 13:52:15 +02:00
|
|
|
#endif
|
2023-03-16 12:50:04 +02:00
|
|
|
case isc_nm_proxystreamsocket:
|
|
|
|
isc__nm_proxystream_send(handle, region, cb, cbarg);
|
|
|
|
break;
|
2023-07-12 15:25:38 +03:00
|
|
|
case isc_nm_proxyudpsocket:
|
|
|
|
isc__nm_proxyudp_send(handle, region, cb, cbarg);
|
|
|
|
break;
|
2019-11-05 13:55:54 -08:00
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-07 13:33:52 +02:00
|
|
|
void
|
|
|
|
isc__nm_senddns(isc_nmhandle_t *handle, isc_region_t *region, isc_nm_cb_t cb,
|
|
|
|
void *cbarg) {
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
|
|
|
switch (handle->sock->type) {
|
|
|
|
case isc_nm_tcpsocket:
|
|
|
|
isc__nm_tcp_senddns(handle, region, cb, cbarg);
|
2022-12-07 14:18:33 +02:00
|
|
|
break;
|
|
|
|
case isc_nm_tlssocket:
|
|
|
|
isc__nm_tls_senddns(handle, region, cb, cbarg);
|
2022-12-07 13:33:52 +02:00
|
|
|
break;
|
2023-03-16 12:50:04 +02:00
|
|
|
case isc_nm_proxystreamsocket:
|
|
|
|
isc__nm_proxystream_senddns(handle, region, cb, cbarg);
|
|
|
|
break;
|
2022-12-07 13:33:52 +02:00
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-21 12:52:09 +02:00
|
|
|
void
|
2020-03-20 11:55:10 +01:00
|
|
|
isc_nm_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg) {
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
|
|
|
switch (handle->sock->type) {
|
2020-09-05 11:07:40 -07:00
|
|
|
case isc_nm_udpsocket:
|
|
|
|
isc__nm_udp_read(handle, cb, cbarg);
|
|
|
|
break;
|
2020-03-20 11:55:10 +01:00
|
|
|
case isc_nm_tcpsocket:
|
2020-10-21 12:52:09 +02:00
|
|
|
isc__nm_tcp_read(handle, cb, cbarg);
|
|
|
|
break;
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
|
|
|
isc__nm_streamdns_read(handle, cb, cbarg);
|
|
|
|
break;
|
2021-04-21 13:52:15 +02:00
|
|
|
case isc_nm_tlssocket:
|
|
|
|
isc__nm_tls_read(handle, cb, cbarg);
|
|
|
|
break;
|
2022-10-18 15:36:00 +03:00
|
|
|
#if HAVE_LIBNGHTTP2
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
case isc_nm_httpsocket:
|
|
|
|
isc__nm_http_read(handle, cb, cbarg);
|
|
|
|
break;
|
2021-04-21 13:52:15 +02:00
|
|
|
#endif
|
2023-03-16 12:50:04 +02:00
|
|
|
case isc_nm_proxystreamsocket:
|
|
|
|
isc__nm_proxystream_read(handle, cb, cbarg);
|
|
|
|
break;
|
2023-07-12 15:25:38 +03:00
|
|
|
case isc_nm_proxyudpsocket:
|
|
|
|
isc__nm_proxyudp_read(handle, cb, cbarg);
|
|
|
|
break;
|
2020-03-20 11:55:10 +01:00
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2020-03-20 11:55:10 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Fix the streaming read callback shutdown logic
When shutting down TCP sockets, the read callback calling logic was
flawed, it would call either one less callback or one extra. Fix the
logic in the way:
1. When isc_nm_read() has been called but isc_nm_read_stop() hasn't on
the handle, the read callback will be called with ISC_R_CANCELED to
cancel active reading from the socket/handle.
2. When isc_nm_read() has been called and isc_nm_read_stop() has been
called on the on the handle, the read callback will be called with
ISC_R_SHUTTINGDOWN to signal that the dormant (not-reading) socket
is being shut down.
3. The .reading and .recv_read flags are little bit tricky. The
.reading flag indicates if the outer layer is reading the data (that
would be uv_tcp_t for TCP and isc_nmsocket_t (TCP) for TLSStream),
the .recv_read flag indicates whether somebody is interested in the
data read from the socket.
Usually, you would expect that the .reading should be false when
.recv_read is false, but it gets even more tricky with TLSStream as
the TLS protocol might need to read from the socket even when sending
data.
Fix the usage of the .recv_read and .reading flags in the TLSStream
to their true meaning - which mostly consist of using .recv_read
everywhere and then wrapping isc_nm_read() and isc_nm_read_stop()
with the .reading flag.
4. The TLS failed read helper has been modified to resemble the TCP code
as much as possible, clearing and re-setting the .recv_read flag in
the TCP timeout code has been fixed and .recv_read is now cleared
when isc_nm_read_stop() has been called on the streaming socket.
5. The use of Network Manager in the named_controlconf, isccc_ccmsg, and
isc_httpd units have been greatly simplified due to the improved design.
6. More unit tests for TCP and TLS testing the shutdown conditions have
been added.
Co-authored-by: Ondřej Surý <ondrej@isc.org>
Co-authored-by: Artem Boldariev <artem@isc.org>
2023-04-13 17:27:50 +02:00
|
|
|
static void
|
|
|
|
cancelread_cb(void *arg) {
|
|
|
|
isc_nmhandle_t *handle = arg;
|
|
|
|
|
2020-06-05 17:32:36 -07:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
Fix the streaming read callback shutdown logic
When shutting down TCP sockets, the read callback calling logic was
flawed, it would call either one less callback or one extra. Fix the
logic in the way:
1. When isc_nm_read() has been called but isc_nm_read_stop() hasn't on
the handle, the read callback will be called with ISC_R_CANCELED to
cancel active reading from the socket/handle.
2. When isc_nm_read() has been called and isc_nm_read_stop() has been
called on the on the handle, the read callback will be called with
ISC_R_SHUTTINGDOWN to signal that the dormant (not-reading) socket
is being shut down.
3. The .reading and .recv_read flags are little bit tricky. The
.reading flag indicates if the outer layer is reading the data (that
would be uv_tcp_t for TCP and isc_nmsocket_t (TCP) for TLSStream),
the .recv_read flag indicates whether somebody is interested in the
data read from the socket.
Usually, you would expect that the .reading should be false when
.recv_read is false, but it gets even more tricky with TLSStream as
the TLS protocol might need to read from the socket even when sending
data.
Fix the usage of the .recv_read and .reading flags in the TLSStream
to their true meaning - which mostly consist of using .recv_read
everywhere and then wrapping isc_nm_read() and isc_nm_read_stop()
with the .reading flag.
4. The TLS failed read helper has been modified to resemble the TCP code
as much as possible, clearing and re-setting the .recv_read flag in
the TCP timeout code has been fixed and .recv_read is now cleared
when isc_nm_read_stop() has been called on the streaming socket.
5. The use of Network Manager in the named_controlconf, isccc_ccmsg, and
isc_httpd units have been greatly simplified due to the improved design.
6. More unit tests for TCP and TLS testing the shutdown conditions have
been added.
Co-authored-by: Ondřej Surý <ondrej@isc.org>
Co-authored-by: Artem Boldariev <artem@isc.org>
2023-04-13 17:27:50 +02:00
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
|
|
|
REQUIRE(handle->sock->tid == isc_tid());
|
|
|
|
|
|
|
|
REQUIRE(handle->sock->tid == isc_tid());
|
2020-06-05 17:32:36 -07:00
|
|
|
|
|
|
|
switch (handle->sock->type) {
|
2020-09-05 11:07:40 -07:00
|
|
|
case isc_nm_udpsocket:
|
2023-07-12 15:25:38 +03:00
|
|
|
case isc_nm_proxyudpsocket:
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
Fix the streaming read callback shutdown logic
When shutting down TCP sockets, the read callback calling logic was
flawed, it would call either one less callback or one extra. Fix the
logic in the way:
1. When isc_nm_read() has been called but isc_nm_read_stop() hasn't on
the handle, the read callback will be called with ISC_R_CANCELED to
cancel active reading from the socket/handle.
2. When isc_nm_read() has been called and isc_nm_read_stop() has been
called on the on the handle, the read callback will be called with
ISC_R_SHUTTINGDOWN to signal that the dormant (not-reading) socket
is being shut down.
3. The .reading and .recv_read flags are little bit tricky. The
.reading flag indicates if the outer layer is reading the data (that
would be uv_tcp_t for TCP and isc_nmsocket_t (TCP) for TLSStream),
the .recv_read flag indicates whether somebody is interested in the
data read from the socket.
Usually, you would expect that the .reading should be false when
.recv_read is false, but it gets even more tricky with TLSStream as
the TLS protocol might need to read from the socket even when sending
data.
Fix the usage of the .recv_read and .reading flags in the TLSStream
to their true meaning - which mostly consist of using .recv_read
everywhere and then wrapping isc_nm_read() and isc_nm_read_stop()
with the .reading flag.
4. The TLS failed read helper has been modified to resemble the TCP code
as much as possible, clearing and re-setting the .recv_read flag in
the TCP timeout code has been fixed and .recv_read is now cleared
when isc_nm_read_stop() has been called on the streaming socket.
5. The use of Network Manager in the named_controlconf, isccc_ccmsg, and
isc_httpd units have been greatly simplified due to the improved design.
6. More unit tests for TCP and TLS testing the shutdown conditions have
been added.
Co-authored-by: Ondřej Surý <ondrej@isc.org>
Co-authored-by: Artem Boldariev <artem@isc.org>
2023-04-13 17:27:50 +02:00
|
|
|
case isc_nm_httpsocket:
|
|
|
|
isc__nm_failed_read_cb(handle->sock, ISC_R_CANCELED, false);
|
2022-06-20 20:30:12 +03:00
|
|
|
break;
|
2020-03-20 11:55:10 +01:00
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2020-03-20 11:55:10 +01:00
|
|
|
}
|
Fix the streaming read callback shutdown logic
When shutting down TCP sockets, the read callback calling logic was
flawed, it would call either one less callback or one extra. Fix the
logic in the way:
1. When isc_nm_read() has been called but isc_nm_read_stop() hasn't on
the handle, the read callback will be called with ISC_R_CANCELED to
cancel active reading from the socket/handle.
2. When isc_nm_read() has been called and isc_nm_read_stop() has been
called on the on the handle, the read callback will be called with
ISC_R_SHUTTINGDOWN to signal that the dormant (not-reading) socket
is being shut down.
3. The .reading and .recv_read flags are little bit tricky. The
.reading flag indicates if the outer layer is reading the data (that
would be uv_tcp_t for TCP and isc_nmsocket_t (TCP) for TLSStream),
the .recv_read flag indicates whether somebody is interested in the
data read from the socket.
Usually, you would expect that the .reading should be false when
.recv_read is false, but it gets even more tricky with TLSStream as
the TLS protocol might need to read from the socket even when sending
data.
Fix the usage of the .recv_read and .reading flags in the TLSStream
to their true meaning - which mostly consist of using .recv_read
everywhere and then wrapping isc_nm_read() and isc_nm_read_stop()
with the .reading flag.
4. The TLS failed read helper has been modified to resemble the TCP code
as much as possible, clearing and re-setting the .recv_read flag in
the TCP timeout code has been fixed and .recv_read is now cleared
when isc_nm_read_stop() has been called on the streaming socket.
5. The use of Network Manager in the named_controlconf, isccc_ccmsg, and
isc_httpd units have been greatly simplified due to the improved design.
6. More unit tests for TCP and TLS testing the shutdown conditions have
been added.
Co-authored-by: Ondřej Surý <ondrej@isc.org>
Co-authored-by: Artem Boldariev <artem@isc.org>
2023-04-13 17:27:50 +02:00
|
|
|
|
|
|
|
isc_nmhandle_detach(&handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc_nm_cancelread(isc_nmhandle_t *handle) {
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
|
|
|
|
|
|
|
/* Running this directly could cause a dead-lock */
|
|
|
|
isc_nmhandle_ref(handle);
|
|
|
|
isc_async_run(handle->sock->worker->loop, cancelread_cb, handle);
|
2020-03-20 11:55:10 +01:00
|
|
|
}
|
|
|
|
|
2020-10-21 12:52:09 +02:00
|
|
|
void
|
2022-08-29 10:55:10 +02:00
|
|
|
isc_nm_read_stop(isc_nmhandle_t *handle) {
|
2020-07-01 16:17:09 -07:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
|
|
|
isc_nmsocket_t *sock = handle->sock;
|
2020-06-05 17:32:36 -07:00
|
|
|
|
2020-03-20 11:55:10 +01:00
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_tcpsocket:
|
2022-08-29 10:55:10 +02:00
|
|
|
isc__nm_tcp_read_stop(handle);
|
2020-10-21 12:52:09 +02:00
|
|
|
break;
|
2021-01-25 17:44:39 +02:00
|
|
|
case isc_nm_tlssocket:
|
2022-08-29 10:55:10 +02:00
|
|
|
isc__nm_tls_read_stop(handle);
|
2021-01-25 17:44:39 +02:00
|
|
|
break;
|
2023-03-16 12:50:04 +02:00
|
|
|
case isc_nm_proxystreamsocket:
|
|
|
|
isc__nm_proxystream_read_stop(handle);
|
|
|
|
break;
|
2020-03-20 11:55:10 +01:00
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2020-03-20 11:55:10 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Fix the streaming read callback shutdown logic
When shutting down TCP sockets, the read callback calling logic was
flawed, it would call either one less callback or one extra. Fix the
logic in the way:
1. When isc_nm_read() has been called but isc_nm_read_stop() hasn't on
the handle, the read callback will be called with ISC_R_CANCELED to
cancel active reading from the socket/handle.
2. When isc_nm_read() has been called and isc_nm_read_stop() has been
called on the on the handle, the read callback will be called with
ISC_R_SHUTTINGDOWN to signal that the dormant (not-reading) socket
is being shut down.
3. The .reading and .recv_read flags are little bit tricky. The
.reading flag indicates if the outer layer is reading the data (that
would be uv_tcp_t for TCP and isc_nmsocket_t (TCP) for TLSStream),
the .recv_read flag indicates whether somebody is interested in the
data read from the socket.
Usually, you would expect that the .reading should be false when
.recv_read is false, but it gets even more tricky with TLSStream as
the TLS protocol might need to read from the socket even when sending
data.
Fix the usage of the .recv_read and .reading flags in the TLSStream
to their true meaning - which mostly consist of using .recv_read
everywhere and then wrapping isc_nm_read() and isc_nm_read_stop()
with the .reading flag.
4. The TLS failed read helper has been modified to resemble the TCP code
as much as possible, clearing and re-setting the .recv_read flag in
the TCP timeout code has been fixed and .recv_read is now cleared
when isc_nm_read_stop() has been called on the streaming socket.
5. The use of Network Manager in the named_controlconf, isccc_ccmsg, and
isc_httpd units have been greatly simplified due to the improved design.
6. More unit tests for TCP and TLS testing the shutdown conditions have
been added.
Co-authored-by: Ondřej Surý <ondrej@isc.org>
Co-authored-by: Artem Boldariev <artem@isc.org>
2023-04-13 17:27:50 +02:00
|
|
|
void
|
|
|
|
isc_nmhandle_close(isc_nmhandle_t *handle) {
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
|
|
|
|
|
|
|
isc__nmsocket_clearcb(handle->sock);
|
|
|
|
isc__nmsocket_prep_destroy(handle->sock);
|
|
|
|
}
|
|
|
|
|
2020-03-20 11:55:10 +01:00
|
|
|
void
|
|
|
|
isc_nm_stoplistening(isc_nmsocket_t *sock) {
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
2020-06-05 17:32:36 -07:00
|
|
|
|
2020-03-20 11:55:10 +01:00
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_udplistener:
|
|
|
|
isc__nm_udp_stoplistening(sock);
|
|
|
|
break;
|
|
|
|
case isc_nm_tcplistener:
|
|
|
|
isc__nm_tcp_stoplistening(sock);
|
|
|
|
break;
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnslistener:
|
|
|
|
isc__nm_streamdns_stoplistening(sock);
|
|
|
|
break;
|
2021-04-21 13:52:15 +02:00
|
|
|
case isc_nm_tlslistener:
|
|
|
|
isc__nm_tls_stoplistening(sock);
|
|
|
|
break;
|
2022-10-18 15:36:00 +03:00
|
|
|
#if HAVE_LIBNGHTTP2
|
2020-12-07 14:19:10 +02:00
|
|
|
case isc_nm_httplistener:
|
|
|
|
isc__nm_http_stoplistening(sock);
|
|
|
|
break;
|
2021-04-21 13:52:15 +02:00
|
|
|
#endif
|
2023-03-16 12:50:04 +02:00
|
|
|
case isc_nm_proxystreamlistener:
|
|
|
|
isc__nm_proxystream_stoplistening(sock);
|
|
|
|
break;
|
2023-07-12 15:25:38 +03:00
|
|
|
case isc_nm_proxyudplistener:
|
|
|
|
isc__nm_proxyudp_stoplistening(sock);
|
|
|
|
break;
|
2020-03-20 11:55:10 +01:00
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2020-03-20 11:55:10 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-14 20:45:40 +03:00
|
|
|
void
|
|
|
|
isc__nmsocket_stop(isc_nmsocket_t *listener) {
|
|
|
|
REQUIRE(VALID_NMSOCK(listener));
|
|
|
|
REQUIRE(listener->tid == isc_tid());
|
|
|
|
REQUIRE(listener->tid == 0);
|
2023-03-24 15:32:02 +01:00
|
|
|
REQUIRE(listener->type == isc_nm_httplistener ||
|
|
|
|
listener->type == isc_nm_tlslistener ||
|
2023-03-16 12:50:04 +02:00
|
|
|
listener->type == isc_nm_streamdnslistener ||
|
2023-07-12 15:25:38 +03:00
|
|
|
listener->type == isc_nm_proxystreamlistener ||
|
|
|
|
listener->type == isc_nm_proxyudplistener);
|
2023-03-24 13:37:19 +01:00
|
|
|
REQUIRE(!listener->closing);
|
2022-10-14 20:45:40 +03:00
|
|
|
|
2023-03-24 13:37:19 +01:00
|
|
|
listener->closing = true;
|
2022-10-14 20:45:40 +03:00
|
|
|
|
2023-03-24 15:32:02 +01:00
|
|
|
REQUIRE(listener->outer != NULL);
|
|
|
|
isc_nm_stoplistening(listener->outer);
|
2022-10-14 20:45:40 +03:00
|
|
|
|
|
|
|
listener->accept_cb = NULL;
|
|
|
|
listener->accept_cbarg = NULL;
|
|
|
|
listener->recv_cb = NULL;
|
|
|
|
listener->recv_cbarg = NULL;
|
|
|
|
|
2023-03-24 15:32:02 +01:00
|
|
|
isc__nmsocket_detach(&listener->outer);
|
2022-10-14 20:45:40 +03:00
|
|
|
|
2023-03-24 13:37:19 +01:00
|
|
|
listener->closed = true;
|
2022-10-14 20:45:40 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nmsocket_barrier_init(isc_nmsocket_t *listener) {
|
|
|
|
REQUIRE(listener->nchildren > 0);
|
2023-01-07 16:30:21 -08:00
|
|
|
isc_barrier_init(&listener->listen_barrier, listener->nchildren);
|
|
|
|
isc_barrier_init(&listener->stop_barrier, listener->nchildren);
|
|
|
|
listener->barriers_initialised = true;
|
2022-10-14 20:45:40 +03:00
|
|
|
}
|
|
|
|
|
2023-03-24 10:36:58 +01:00
|
|
|
static void
|
2023-03-27 22:40:57 +02:00
|
|
|
isc___nm_connectcb(void *arg) {
|
|
|
|
isc__nm_uvreq_t *uvreq = arg;
|
2023-03-24 10:36:58 +01:00
|
|
|
|
|
|
|
uvreq->cb.connect(uvreq->handle, uvreq->result, uvreq->cbarg);
|
2023-03-24 12:11:44 +01:00
|
|
|
isc__nm_uvreq_put(&uvreq);
|
2023-03-24 10:36:58 +01:00
|
|
|
}
|
|
|
|
|
2020-11-11 10:46:33 +01:00
|
|
|
void
|
2023-03-23 06:56:17 +01:00
|
|
|
isc__nm_connectcb(isc_nmsocket_t *sock, isc__nm_uvreq_t *uvreq,
|
|
|
|
isc_result_t eresult, bool async) {
|
2020-11-11 10:46:33 +01:00
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(VALID_UVREQ(uvreq));
|
|
|
|
REQUIRE(VALID_NMHANDLE(uvreq->handle));
|
2023-03-24 10:36:58 +01:00
|
|
|
REQUIRE(uvreq->cb.connect != NULL);
|
2020-12-02 08:54:51 +01:00
|
|
|
|
2023-03-27 22:40:57 +02:00
|
|
|
uvreq->result = eresult;
|
|
|
|
|
2023-03-23 06:56:17 +01:00
|
|
|
if (!async) {
|
2023-03-27 22:40:57 +02:00
|
|
|
isc___nm_connectcb(uvreq);
|
2022-11-23 14:03:23 +01:00
|
|
|
return;
|
2020-11-11 10:46:33 +01:00
|
|
|
}
|
2022-11-23 14:03:23 +01:00
|
|
|
|
2023-03-27 22:40:57 +02:00
|
|
|
isc_job_run(sock->worker->loop, &uvreq->job, isc___nm_connectcb, uvreq);
|
2020-11-11 10:46:33 +01:00
|
|
|
}
|
|
|
|
|
2023-03-23 06:56:17 +01:00
|
|
|
static void
|
2023-03-27 22:40:57 +02:00
|
|
|
isc___nm_readcb(void *arg) {
|
|
|
|
isc__nm_uvreq_t *uvreq = arg;
|
2021-09-28 10:08:26 +10:00
|
|
|
isc_region_t region;
|
2020-11-11 10:46:33 +01:00
|
|
|
|
2021-09-28 10:08:26 +10:00
|
|
|
region.base = (unsigned char *)uvreq->uvbuf.base;
|
|
|
|
region.length = uvreq->uvbuf.len;
|
2023-03-24 10:36:58 +01:00
|
|
|
uvreq->cb.recv(uvreq->handle, uvreq->result, ®ion, uvreq->cbarg);
|
2020-11-11 10:46:33 +01:00
|
|
|
|
2023-03-24 12:11:44 +01:00
|
|
|
isc__nm_uvreq_put(&uvreq);
|
2020-11-11 10:46:33 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2023-03-23 06:56:17 +01:00
|
|
|
isc__nm_readcb(isc_nmsocket_t *sock, isc__nm_uvreq_t *uvreq,
|
2021-03-16 09:03:02 +01:00
|
|
|
isc_result_t eresult, bool async) {
|
2020-11-11 10:46:33 +01:00
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(VALID_UVREQ(uvreq));
|
|
|
|
REQUIRE(VALID_NMHANDLE(uvreq->handle));
|
|
|
|
|
2023-03-27 22:40:57 +02:00
|
|
|
uvreq->result = eresult;
|
2023-03-24 10:36:58 +01:00
|
|
|
|
2023-03-27 22:40:57 +02:00
|
|
|
if (!async) {
|
|
|
|
isc___nm_readcb(uvreq);
|
2021-03-16 09:03:02 +01:00
|
|
|
return;
|
2020-11-11 10:46:33 +01:00
|
|
|
}
|
2021-03-16 09:03:02 +01:00
|
|
|
|
2023-03-27 22:40:57 +02:00
|
|
|
isc_job_run(sock->worker->loop, &uvreq->job, isc___nm_readcb, uvreq);
|
2020-11-11 10:46:33 +01:00
|
|
|
}
|
|
|
|
|
2023-03-23 06:56:17 +01:00
|
|
|
static void
|
2023-03-27 22:40:57 +02:00
|
|
|
isc___nm_sendcb(void *arg) {
|
|
|
|
isc__nm_uvreq_t *uvreq = arg;
|
2020-11-11 10:46:33 +01:00
|
|
|
|
2023-03-24 10:36:58 +01:00
|
|
|
uvreq->cb.send(uvreq->handle, uvreq->result, uvreq->cbarg);
|
2023-03-24 12:11:44 +01:00
|
|
|
isc__nm_uvreq_put(&uvreq);
|
2023-03-23 06:56:17 +01:00
|
|
|
}
|
2020-11-11 10:46:33 +01:00
|
|
|
|
2023-03-23 06:56:17 +01:00
|
|
|
void
|
|
|
|
isc__nm_sendcb(isc_nmsocket_t *sock, isc__nm_uvreq_t *uvreq,
|
|
|
|
isc_result_t eresult, bool async) {
|
2020-11-11 10:46:33 +01:00
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(VALID_UVREQ(uvreq));
|
|
|
|
REQUIRE(VALID_NMHANDLE(uvreq->handle));
|
2023-03-27 22:40:57 +02:00
|
|
|
|
2023-03-23 06:56:17 +01:00
|
|
|
uvreq->result = eresult;
|
2020-11-11 10:46:33 +01:00
|
|
|
|
2023-03-23 06:56:17 +01:00
|
|
|
if (!async) {
|
2023-03-27 22:40:57 +02:00
|
|
|
isc___nm_sendcb(uvreq);
|
2023-03-23 06:56:17 +01:00
|
|
|
return;
|
|
|
|
}
|
2020-11-11 10:46:33 +01:00
|
|
|
|
2023-03-27 22:40:57 +02:00
|
|
|
isc_job_run(sock->worker->loop, &uvreq->job, isc___nm_sendcb, uvreq);
|
2020-11-11 10:46:33 +01:00
|
|
|
}
|
|
|
|
|
2022-03-10 13:51:08 +01:00
|
|
|
static void
|
|
|
|
reset_shutdown(uv_handle_t *handle) {
|
|
|
|
isc_nmsocket_t *sock = uv_handle_get_data(handle);
|
|
|
|
|
|
|
|
isc__nmsocket_shutdown(sock);
|
2022-03-10 13:58:58 +01:00
|
|
|
isc__nmsocket_detach(&sock);
|
2022-03-10 13:51:08 +01:00
|
|
|
}
|
|
|
|
|
2022-02-15 14:41:15 +01:00
|
|
|
void
|
|
|
|
isc__nmsocket_reset(isc_nmsocket_t *sock) {
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
|
|
|
|
switch (sock->type) {
|
2022-02-28 10:25:06 +01:00
|
|
|
case isc_nm_tcpsocket:
|
|
|
|
/*
|
2022-11-22 21:03:02 +02:00
|
|
|
* This can be called from the TCP write timeout.
|
2022-02-28 10:25:06 +01:00
|
|
|
*/
|
2022-02-15 14:41:15 +01:00
|
|
|
REQUIRE(sock->parent == NULL);
|
|
|
|
break;
|
2022-07-26 17:07:19 +03:00
|
|
|
case isc_nm_tlssocket:
|
|
|
|
isc__nmsocket_tls_reset(sock);
|
|
|
|
return;
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
|
|
|
isc__nmsocket_streamdns_reset(sock);
|
|
|
|
return;
|
2023-03-16 12:50:04 +02:00
|
|
|
case isc_nm_proxystreamsocket:
|
|
|
|
isc__nmsocket_proxystream_reset(sock);
|
|
|
|
return;
|
2022-02-15 14:41:15 +01:00
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2022-02-15 14:41:15 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2022-03-10 13:58:58 +01:00
|
|
|
if (!uv_is_closing(&sock->uv_handle.handle) &&
|
|
|
|
uv_is_active(&sock->uv_handle.handle))
|
|
|
|
{
|
2022-02-15 14:41:15 +01:00
|
|
|
/*
|
|
|
|
* The real shutdown will be handled in the respective
|
|
|
|
* close functions.
|
|
|
|
*/
|
2022-03-10 13:58:58 +01:00
|
|
|
isc__nmsocket_attach(sock, &(isc_nmsocket_t *){ NULL });
|
2022-03-10 13:51:08 +01:00
|
|
|
int r = uv_tcp_close_reset(&sock->uv_handle.tcp,
|
|
|
|
reset_shutdown);
|
2024-05-08 09:27:53 +02:00
|
|
|
if (r != 0) {
|
2024-08-13 18:20:26 +02:00
|
|
|
isc_log_write(ISC_LOGCATEGORY_GENERAL,
|
2024-05-08 09:27:53 +02:00
|
|
|
ISC_LOGMODULE_NETMGR, ISC_LOG_DEBUG(1),
|
|
|
|
"TCP Reset (RST) failed: %s",
|
|
|
|
uv_strerror(r));
|
2024-05-08 09:48:08 +02:00
|
|
|
reset_shutdown(&sock->uv_handle.handle);
|
2024-05-08 09:27:53 +02:00
|
|
|
}
|
2022-03-10 13:58:58 +01:00
|
|
|
} else {
|
|
|
|
isc__nmsocket_shutdown(sock);
|
2022-02-15 14:41:15 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-31 11:48:41 +02:00
|
|
|
void
|
2021-03-30 09:25:09 +02:00
|
|
|
isc__nmsocket_shutdown(isc_nmsocket_t *sock) {
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_udpsocket:
|
2020-10-26 14:19:37 +01:00
|
|
|
isc__nm_udp_shutdown(sock);
|
2020-10-26 12:30:54 +01:00
|
|
|
break;
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
case isc_nm_tcpsocket:
|
2020-10-26 14:19:37 +01:00
|
|
|
isc__nm_tcp_shutdown(sock);
|
2019-11-22 14:13:19 +01:00
|
|
|
break;
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
case isc_nm_udplistener:
|
|
|
|
case isc_nm_tcplistener:
|
|
|
|
return;
|
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2019-11-22 14:13:19 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-30 09:25:09 +02:00
|
|
|
static void
|
|
|
|
shutdown_walk_cb(uv_handle_t *handle, void *arg) {
|
2022-07-26 13:03:45 +02:00
|
|
|
isc_nmsocket_t *sock = NULL;
|
2021-03-30 09:25:09 +02:00
|
|
|
UNUSED(arg);
|
|
|
|
|
|
|
|
if (uv_is_closing(handle)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
sock = uv_handle_get_data(handle);
|
|
|
|
|
2021-03-30 09:25:09 +02:00
|
|
|
switch (handle->type) {
|
|
|
|
case UV_UDP:
|
2022-03-10 13:58:58 +01:00
|
|
|
isc__nmsocket_shutdown(sock);
|
|
|
|
return;
|
2021-03-30 09:25:09 +02:00
|
|
|
case UV_TCP:
|
2022-03-10 13:58:58 +01:00
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_tcpsocket:
|
|
|
|
if (sock->parent == NULL) {
|
|
|
|
/* Reset the TCP connections on shutdown */
|
|
|
|
isc__nmsocket_reset(sock);
|
|
|
|
return;
|
|
|
|
}
|
2021-10-11 12:09:16 +02:00
|
|
|
FALLTHROUGH;
|
2022-03-10 13:58:58 +01:00
|
|
|
default:
|
|
|
|
isc__nmsocket_shutdown(sock);
|
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
2021-03-30 09:25:09 +02:00
|
|
|
default:
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-05 01:02:12 -08:00
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nm_setstats(isc_nm_t *mgr, isc_stats_t *stats) {
|
2020-01-05 01:02:12 -08:00
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
REQUIRE(mgr->stats == NULL);
|
|
|
|
REQUIRE(isc_stats_ncounters(stats) == isc_sockstatscounter_max);
|
|
|
|
|
|
|
|
isc_stats_attach(stats, &mgr->stats);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2021-10-02 16:26:43 -07:00
|
|
|
isc__nm_incstats(isc_nmsocket_t *sock, isc__nm_statid_t id) {
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(id < STATID_MAX);
|
2020-01-05 01:02:12 -08:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
if (sock->statsindex != NULL && sock->worker->netmgr->stats != NULL) {
|
|
|
|
isc_stats_increment(sock->worker->netmgr->stats,
|
|
|
|
sock->statsindex[id]);
|
2020-01-05 01:02:12 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2021-10-02 16:26:43 -07:00
|
|
|
isc__nm_decstats(isc_nmsocket_t *sock, isc__nm_statid_t id) {
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(id < STATID_MAX);
|
2020-01-05 01:02:12 -08:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
if (sock->statsindex != NULL && sock->worker->netmgr->stats != NULL) {
|
|
|
|
isc_stats_decrement(sock->worker->netmgr->stats,
|
|
|
|
sock->statsindex[id]);
|
2020-01-05 01:02:12 -08:00
|
|
|
}
|
|
|
|
}
|
2020-07-21 13:29:14 +02:00
|
|
|
|
2021-07-26 13:14:41 +02:00
|
|
|
isc_result_t
|
|
|
|
isc_nm_checkaddr(const isc_sockaddr_t *addr, isc_socktype_t type) {
|
|
|
|
int proto, pf, addrlen, fd, r;
|
|
|
|
|
|
|
|
REQUIRE(addr != NULL);
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case isc_socktype_tcp:
|
|
|
|
proto = SOCK_STREAM;
|
|
|
|
break;
|
|
|
|
case isc_socktype_udp:
|
|
|
|
proto = SOCK_DGRAM;
|
|
|
|
break;
|
|
|
|
default:
|
2024-11-19 10:38:03 +01:00
|
|
|
return ISC_R_NOTIMPLEMENTED;
|
2021-07-26 13:14:41 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
pf = isc_sockaddr_pf(addr);
|
|
|
|
if (pf == AF_INET) {
|
|
|
|
addrlen = sizeof(struct sockaddr_in);
|
|
|
|
} else {
|
|
|
|
addrlen = sizeof(struct sockaddr_in6);
|
|
|
|
}
|
|
|
|
|
|
|
|
fd = socket(pf, proto, 0);
|
|
|
|
if (fd < 0) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return isc_errno_toresult(errno);
|
2021-07-26 13:14:41 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
r = bind(fd, (const struct sockaddr *)&addr->type.sa, addrlen);
|
|
|
|
if (r < 0) {
|
|
|
|
close(fd);
|
2024-11-19 10:38:03 +01:00
|
|
|
return isc_errno_toresult(errno);
|
2021-07-26 13:14:41 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
close(fd);
|
2024-11-19 10:38:03 +01:00
|
|
|
return ISC_R_SUCCESS;
|
2021-07-26 13:14:41 +02:00
|
|
|
}
|
|
|
|
|
2021-05-20 15:53:50 +02:00
|
|
|
#if defined(TCP_CONNECTIONTIMEOUT)
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
#define TIMEOUT_TYPE int
|
|
|
|
#define TIMEOUT_DIV 1000
|
|
|
|
#define TIMEOUT_OPTNAME TCP_CONNECTIONTIMEOUT
|
|
|
|
#elif defined(TCP_RXT_CONNDROPTIME)
|
|
|
|
#define TIMEOUT_TYPE int
|
|
|
|
#define TIMEOUT_DIV 1000
|
|
|
|
#define TIMEOUT_OPTNAME TCP_RXT_CONNDROPTIME
|
|
|
|
#elif defined(TCP_USER_TIMEOUT)
|
|
|
|
#define TIMEOUT_TYPE unsigned int
|
|
|
|
#define TIMEOUT_DIV 1
|
|
|
|
#define TIMEOUT_OPTNAME TCP_USER_TIMEOUT
|
2020-12-02 21:54:25 +01:00
|
|
|
#elif defined(TCP_KEEPINIT)
|
|
|
|
#define TIMEOUT_TYPE int
|
|
|
|
#define TIMEOUT_DIV 1000
|
|
|
|
#define TIMEOUT_OPTNAME TCP_KEEPINIT
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
#endif
|
|
|
|
|
2020-12-02 20:51:38 +01:00
|
|
|
void
|
|
|
|
isc__nm_set_network_buffers(isc_nm_t *nm, uv_handle_t *handle) {
|
|
|
|
int32_t recv_buffer_size = 0;
|
|
|
|
int32_t send_buffer_size = 0;
|
|
|
|
|
|
|
|
switch (handle->type) {
|
|
|
|
case UV_TCP:
|
|
|
|
recv_buffer_size =
|
|
|
|
atomic_load_relaxed(&nm->recv_tcp_buffer_size);
|
|
|
|
send_buffer_size =
|
|
|
|
atomic_load_relaxed(&nm->send_tcp_buffer_size);
|
|
|
|
break;
|
|
|
|
case UV_UDP:
|
|
|
|
recv_buffer_size =
|
|
|
|
atomic_load_relaxed(&nm->recv_udp_buffer_size);
|
|
|
|
send_buffer_size =
|
|
|
|
atomic_load_relaxed(&nm->send_udp_buffer_size);
|
|
|
|
break;
|
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2020-12-02 20:51:38 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (recv_buffer_size > 0) {
|
|
|
|
int r = uv_recv_buffer_size(handle, &recv_buffer_size);
|
2022-02-15 14:51:02 +01:00
|
|
|
UV_RUNTIME_CHECK(uv_recv_buffer_size, r);
|
2020-12-02 20:51:38 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (send_buffer_size > 0) {
|
|
|
|
int r = uv_send_buffer_size(handle, &send_buffer_size);
|
2022-02-15 14:51:02 +01:00
|
|
|
UV_RUNTIME_CHECK(uv_send_buffer_size, r);
|
2020-12-02 20:51:38 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-06 16:36:17 +03:00
|
|
|
void
|
|
|
|
isc_nm_bad_request(isc_nmhandle_t *handle) {
|
2022-02-15 14:41:15 +01:00
|
|
|
isc_nmsocket_t *sock = NULL;
|
2021-07-06 16:36:17 +03:00
|
|
|
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
|
|
|
|
|
|
|
sock = handle->sock;
|
|
|
|
|
2022-02-15 14:41:15 +01:00
|
|
|
switch (sock->type) {
|
2021-07-06 16:36:17 +03:00
|
|
|
case isc_nm_udpsocket:
|
2023-07-12 15:25:38 +03:00
|
|
|
case isc_nm_proxyudpsocket:
|
2022-02-15 14:41:15 +01:00
|
|
|
return;
|
2022-07-26 17:07:19 +03:00
|
|
|
case isc_nm_tcpsocket:
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
2022-07-26 17:07:19 +03:00
|
|
|
case isc_nm_tlssocket:
|
2023-03-16 12:50:04 +02:00
|
|
|
case isc_nm_proxystreamsocket:
|
2022-02-15 14:41:15 +01:00
|
|
|
REQUIRE(sock->parent == NULL);
|
|
|
|
isc__nmsocket_reset(sock);
|
2021-07-06 16:36:17 +03:00
|
|
|
return;
|
2022-02-15 14:41:15 +01:00
|
|
|
#if HAVE_LIBNGHTTP2
|
|
|
|
case isc_nm_httpsocket:
|
|
|
|
isc__nm_http_bad_request(handle);
|
|
|
|
return;
|
2021-07-06 16:36:17 +03:00
|
|
|
#endif /* HAVE_LIBNGHTTP2 */
|
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2021-07-06 16:36:17 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-18 10:36:34 +00:00
|
|
|
isc_result_t
|
|
|
|
isc_nm_xfr_checkperm(isc_nmhandle_t *handle) {
|
2022-07-26 13:03:45 +02:00
|
|
|
isc_nmsocket_t *sock = NULL;
|
2023-01-18 10:36:34 +00:00
|
|
|
isc_result_t result = ISC_R_NOPERM;
|
2021-08-26 15:07:20 +03:00
|
|
|
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
|
|
|
|
|
|
|
sock = handle->sock;
|
|
|
|
|
|
|
|
switch (sock->type) {
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
2023-01-18 10:36:34 +00:00
|
|
|
result = isc__nm_streamdns_xfr_checkperm(sock);
|
|
|
|
break;
|
2021-08-26 15:07:20 +03:00
|
|
|
default:
|
2023-01-18 10:36:34 +00:00
|
|
|
break;
|
2021-08-26 15:07:20 +03:00
|
|
|
}
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return result;
|
2021-08-26 15:07:20 +03:00
|
|
|
}
|
|
|
|
|
2021-11-08 12:44:55 -08:00
|
|
|
bool
|
|
|
|
isc_nm_is_http_handle(isc_nmhandle_t *handle) {
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return handle->sock->type == isc_nm_httpsocket;
|
2021-11-08 12:44:55 -08:00
|
|
|
}
|
|
|
|
|
2023-03-16 12:50:04 +02:00
|
|
|
static isc_nmhandle_t *
|
|
|
|
get_proxy_handle(isc_nmhandle_t *handle) {
|
|
|
|
isc_nmsocket_t *sock = NULL;
|
|
|
|
|
|
|
|
sock = handle->sock;
|
|
|
|
|
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_proxystreamsocket:
|
2023-07-12 15:25:38 +03:00
|
|
|
case isc_nm_proxyudpsocket:
|
2024-11-19 10:38:03 +01:00
|
|
|
return handle;
|
2023-05-10 22:01:36 +03:00
|
|
|
#ifdef HAVE_LIBNGHTTP2
|
|
|
|
case isc_nm_httpsocket:
|
2023-09-12 09:32:30 +02:00
|
|
|
if (sock->h2 != NULL) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return get_proxy_handle(
|
|
|
|
isc__nm_httpsession_handle(sock->h2->session));
|
2023-09-12 09:32:30 +02:00
|
|
|
}
|
2024-11-19 10:38:03 +01:00
|
|
|
return NULL;
|
2023-05-10 22:01:36 +03:00
|
|
|
#endif /* HAVE_LIBNGHTTP2 */
|
2023-03-16 12:50:04 +02:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2023-05-10 22:01:36 +03:00
|
|
|
if (sock->outerhandle != NULL) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return get_proxy_handle(sock->outerhandle);
|
2023-03-16 12:50:04 +02:00
|
|
|
}
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return NULL;
|
2023-03-16 12:50:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
isc_nm_is_proxy_handle(isc_nmhandle_t *handle) {
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return get_proxy_handle(handle) != NULL;
|
2023-03-16 12:50:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
isc_nm_is_proxy_unspec(isc_nmhandle_t *handle) {
|
|
|
|
isc_nmhandle_t *proxyhandle;
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
|
|
|
|
|
|
|
if (handle->sock->client) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return false;
|
2023-03-16 12:50:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
proxyhandle = get_proxy_handle(handle);
|
|
|
|
if (proxyhandle == NULL) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return false;
|
2023-03-16 12:50:04 +02:00
|
|
|
}
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return proxyhandle->proxy_is_unspec;
|
2023-03-16 12:50:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
isc_sockaddr_t
|
|
|
|
isc_nmhandle_real_peeraddr(isc_nmhandle_t *handle) {
|
|
|
|
isc_sockaddr_t addr = { 0 };
|
|
|
|
isc_nmhandle_t *proxyhandle;
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
|
|
|
proxyhandle = get_proxy_handle(handle);
|
|
|
|
if (proxyhandle == NULL) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return isc_nmhandle_peeraddr(handle);
|
2023-03-16 12:50:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
INSIST(VALID_NMSOCK(proxyhandle->sock));
|
|
|
|
|
|
|
|
if (isc_nmhandle_is_stream(proxyhandle)) {
|
|
|
|
addr = isc_nmhandle_peeraddr(proxyhandle->sock->outerhandle);
|
|
|
|
} else {
|
2023-07-12 15:25:38 +03:00
|
|
|
INSIST(proxyhandle->sock->type == isc_nm_proxyudpsocket);
|
|
|
|
addr = isc_nmhandle_peeraddr(proxyhandle->proxy_udphandle);
|
2023-03-16 12:50:04 +02:00
|
|
|
}
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return addr;
|
2023-03-16 12:50:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
isc_sockaddr_t
|
|
|
|
isc_nmhandle_real_localaddr(isc_nmhandle_t *handle) {
|
|
|
|
isc_sockaddr_t addr = { 0 };
|
|
|
|
isc_nmhandle_t *proxyhandle;
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
|
|
|
proxyhandle = get_proxy_handle(handle);
|
|
|
|
if (proxyhandle == NULL) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return isc_nmhandle_localaddr(handle);
|
2023-03-16 12:50:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
INSIST(VALID_NMSOCK(proxyhandle->sock));
|
|
|
|
|
|
|
|
if (isc_nmhandle_is_stream(proxyhandle)) {
|
|
|
|
addr = isc_nmhandle_localaddr(proxyhandle->sock->outerhandle);
|
|
|
|
} else {
|
2023-07-12 15:25:38 +03:00
|
|
|
INSIST(proxyhandle->sock->type == isc_nm_proxyudpsocket);
|
|
|
|
addr = isc_nmhandle_localaddr(proxyhandle->proxy_udphandle);
|
2023-03-16 12:50:04 +02:00
|
|
|
}
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return addr;
|
2023-03-16 12:50:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
isc__nm_valid_proxy_addresses(const isc_sockaddr_t *src,
|
|
|
|
const isc_sockaddr_t *dst) {
|
|
|
|
struct in_addr inv4 = { 0 };
|
|
|
|
struct in6_addr inv6 = { 0 };
|
|
|
|
isc_netaddr_t zerov4 = { 0 }, zerov6 = { 0 };
|
|
|
|
isc_netaddr_t src_addr = { 0 }, dst_addr = { 0 };
|
|
|
|
|
|
|
|
if (src == NULL || dst == NULL) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return false;
|
2023-03-16 12:50:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We should not allow using 0 in source addresses as well, but we
|
|
|
|
* have a precedent of a tool that issues port 0 in the source
|
|
|
|
* addresses (kdig).
|
|
|
|
*/
|
|
|
|
if (isc_sockaddr_getport(dst) == 0) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return false;
|
2023-03-16 12:50:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Anybody using zeroes in source or destination addresses is not
|
|
|
|
* a friend. Considering that most of the upper level code is
|
|
|
|
* written with consideration that bot source and destination
|
|
|
|
* addresses are returned by the OS and should be valid, we should
|
|
|
|
* discard so suspicious addresses. Also, keep in mind that both
|
|
|
|
* "0.0.0.0" and "::" match all interfaces when using as listener
|
|
|
|
* addresses.
|
|
|
|
*/
|
|
|
|
isc_netaddr_fromin(&zerov4, &inv4);
|
|
|
|
isc_netaddr_fromin6(&zerov6, &inv6);
|
|
|
|
|
|
|
|
isc_netaddr_fromsockaddr(&src_addr, src);
|
|
|
|
isc_netaddr_fromsockaddr(&dst_addr, dst);
|
|
|
|
|
|
|
|
INSIST(isc_sockaddr_pf(src) == isc_sockaddr_pf(dst));
|
|
|
|
|
|
|
|
switch (isc_sockaddr_pf(src)) {
|
|
|
|
case AF_INET:
|
|
|
|
if (isc_netaddr_equal(&src_addr, &zerov4)) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return false;
|
2023-03-16 12:50:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (isc_netaddr_equal(&dst_addr, &zerov4)) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return false;
|
2023-03-16 12:50:04 +02:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case AF_INET6:
|
|
|
|
if (isc_netaddr_equal(&src_addr, &zerov6)) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return false;
|
2023-03-16 12:50:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (isc_netaddr_equal(&dst_addr, &zerov6)) {
|
2024-11-19 10:38:03 +01:00
|
|
|
return false;
|
2023-03-16 12:50:04 +02:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return true;
|
2023-03-16 12:50:04 +02:00
|
|
|
}
|
|
|
|
|
2021-10-06 14:09:53 +03:00
|
|
|
void
|
|
|
|
isc_nm_set_maxage(isc_nmhandle_t *handle, const uint32_t ttl) {
|
2021-11-08 12:44:55 -08:00
|
|
|
isc_nmsocket_t *sock = NULL;
|
2021-10-06 14:09:53 +03:00
|
|
|
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
2023-03-24 13:37:19 +01:00
|
|
|
REQUIRE(!handle->sock->client);
|
2021-10-06 14:09:53 +03:00
|
|
|
|
2021-11-08 12:44:55 -08:00
|
|
|
#if !HAVE_LIBNGHTTP2
|
|
|
|
UNUSED(ttl);
|
|
|
|
#endif
|
|
|
|
|
2021-10-06 14:09:53 +03:00
|
|
|
sock = handle->sock;
|
|
|
|
switch (sock->type) {
|
|
|
|
#if HAVE_LIBNGHTTP2
|
|
|
|
case isc_nm_httpsocket:
|
|
|
|
isc__nm_http_set_maxage(handle, ttl);
|
|
|
|
break;
|
|
|
|
#endif /* HAVE_LIBNGHTTP2 */
|
|
|
|
case isc_nm_udpsocket:
|
2023-07-12 15:25:38 +03:00
|
|
|
case isc_nm_proxyudpsocket:
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
2021-10-06 14:09:53 +03:00
|
|
|
return;
|
|
|
|
break;
|
|
|
|
case isc_nm_tcpsocket:
|
|
|
|
case isc_nm_tlssocket:
|
2023-03-16 12:50:04 +02:00
|
|
|
case isc_nm_proxystreamsocket:
|
2021-10-06 14:09:53 +03:00
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2021-10-06 14:09:53 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-15 17:42:15 +02:00
|
|
|
isc_nmsocket_type
|
|
|
|
isc_nm_socket_type(const isc_nmhandle_t *handle) {
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return handle->sock->type;
|
2021-11-15 17:42:15 +02:00
|
|
|
}
|
|
|
|
|
2024-10-04 13:14:52 +03:00
|
|
|
isc_nm_proxy_type_t
|
|
|
|
isc_nmhandle_proxy_type(isc_nmhandle_t *handle) {
|
|
|
|
isc_nmhandle_t *proxyhandle;
|
|
|
|
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
|
|
|
|
|
|
|
proxyhandle = get_proxy_handle(handle);
|
|
|
|
|
|
|
|
if (proxyhandle == NULL) {
|
|
|
|
return ISC_NM_PROXY_NONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (isc_nm_has_encryption(proxyhandle)) {
|
|
|
|
return ISC_NM_PROXY_ENCRYPTED;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ISC_NM_PROXY_PLAIN;
|
|
|
|
}
|
|
|
|
|
2021-11-16 13:35:37 +02:00
|
|
|
bool
|
|
|
|
isc_nm_has_encryption(const isc_nmhandle_t *handle) {
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
|
|
|
|
|
|
|
switch (handle->sock->type) {
|
|
|
|
case isc_nm_tlssocket:
|
2024-11-19 10:38:03 +01:00
|
|
|
return true;
|
2021-11-16 13:35:37 +02:00
|
|
|
#if HAVE_LIBNGHTTP2
|
|
|
|
case isc_nm_httpsocket:
|
2024-11-19 10:38:03 +01:00
|
|
|
return isc__nm_http_has_encryption(handle);
|
2021-11-16 13:35:37 +02:00
|
|
|
#endif /* HAVE_LIBNGHTTP2 */
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
2024-11-19 10:38:03 +01:00
|
|
|
return isc__nm_streamdns_has_encryption(handle);
|
2023-10-17 20:36:58 +03:00
|
|
|
case isc_nm_proxystreamsocket:
|
2024-11-19 10:38:03 +01:00
|
|
|
return isc__nm_proxystream_has_encryption(handle);
|
2021-11-16 13:35:37 +02:00
|
|
|
default:
|
2024-11-19 10:38:03 +01:00
|
|
|
return false;
|
2021-11-16 13:35:37 +02:00
|
|
|
};
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return false;
|
2021-11-16 13:35:37 +02:00
|
|
|
}
|
|
|
|
|
2022-01-13 14:35:24 +02:00
|
|
|
const char *
|
|
|
|
isc_nm_verify_tls_peer_result_string(const isc_nmhandle_t *handle) {
|
2022-07-26 13:03:45 +02:00
|
|
|
isc_nmsocket_t *sock = NULL;
|
2022-01-13 14:35:24 +02:00
|
|
|
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
|
|
|
|
|
|
|
sock = handle->sock;
|
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_tlssocket:
|
2024-11-19 10:38:03 +01:00
|
|
|
return isc__nm_tls_verify_tls_peer_result_string(handle);
|
2022-01-13 14:35:24 +02:00
|
|
|
break;
|
2023-03-16 12:50:04 +02:00
|
|
|
case isc_nm_proxystreamsocket:
|
2024-11-19 10:38:03 +01:00
|
|
|
return isc__nm_proxystream_verify_tls_peer_result_string(
|
|
|
|
handle);
|
2023-03-16 12:50:04 +02:00
|
|
|
break;
|
2022-10-18 15:36:00 +03:00
|
|
|
#if HAVE_LIBNGHTTP2
|
2022-01-13 14:35:24 +02:00
|
|
|
case isc_nm_httpsocket:
|
2024-11-19 10:38:03 +01:00
|
|
|
return isc__nm_http_verify_tls_peer_result_string(handle);
|
2022-01-13 14:35:24 +02:00
|
|
|
break;
|
|
|
|
#endif /* HAVE_LIBNGHTTP2 */
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
2024-11-19 10:38:03 +01:00
|
|
|
return isc__nm_streamdns_verify_tls_peer_result_string(handle);
|
2022-06-20 20:30:12 +03:00
|
|
|
break;
|
2022-01-13 14:35:24 +02:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return NULL;
|
2022-01-13 14:35:24 +02:00
|
|
|
}
|
|
|
|
|
2023-03-23 23:47:33 +01:00
|
|
|
typedef struct settlsctx_data {
|
|
|
|
isc_nmsocket_t *listener;
|
|
|
|
isc_tlsctx_t *tlsctx;
|
|
|
|
} settlsctx_data_t;
|
2022-03-31 13:47:48 +03:00
|
|
|
|
2023-03-23 23:47:33 +01:00
|
|
|
static void
|
|
|
|
settlsctx_cb(void *arg) {
|
|
|
|
settlsctx_data_t *data = arg;
|
|
|
|
const uint32_t tid = isc_tid();
|
|
|
|
isc_nmsocket_t *listener = data->listener;
|
|
|
|
isc_tlsctx_t *tlsctx = data->tlsctx;
|
|
|
|
isc__networker_t *worker = &listener->worker->netmgr->workers[tid];
|
2022-03-31 13:47:48 +03:00
|
|
|
|
2023-03-23 23:47:33 +01:00
|
|
|
isc_mem_put(worker->loop->mctx, data, sizeof(*data));
|
|
|
|
|
|
|
|
REQUIRE(listener->type == isc_nm_tlslistener);
|
|
|
|
|
|
|
|
isc__nm_async_tls_set_tlsctx(listener, tlsctx, tid);
|
|
|
|
|
|
|
|
isc__nmsocket_detach(&listener);
|
|
|
|
isc_tlsctx_free(&tlsctx);
|
2022-03-31 13:47:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
set_tlsctx_workers(isc_nmsocket_t *listener, isc_tlsctx_t *tlsctx) {
|
2022-10-14 21:36:51 +03:00
|
|
|
const size_t nworkers =
|
|
|
|
(size_t)isc_loopmgr_nloops(listener->worker->netmgr->loopmgr);
|
2022-03-31 13:47:48 +03:00
|
|
|
/* Update the TLS context reference for every worker thread. */
|
2022-10-14 21:36:51 +03:00
|
|
|
for (size_t i = 0; i < nworkers; i++) {
|
|
|
|
isc__networker_t *worker =
|
|
|
|
&listener->worker->netmgr->workers[i];
|
2023-08-23 11:05:14 +02:00
|
|
|
settlsctx_data_t *data = isc_mem_cget(worker->loop->mctx, 1,
|
|
|
|
sizeof(*data));
|
2023-03-23 23:47:33 +01:00
|
|
|
|
|
|
|
isc__nmsocket_attach(listener, &data->listener);
|
|
|
|
isc_tlsctx_attach(tlsctx, &data->tlsctx);
|
|
|
|
|
|
|
|
isc_async_run(worker->loop, settlsctx_cb, data);
|
2022-03-31 13:47:48 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc_nmsocket_set_tlsctx(isc_nmsocket_t *listener, isc_tlsctx_t *tlsctx) {
|
|
|
|
REQUIRE(VALID_NMSOCK(listener));
|
|
|
|
REQUIRE(tlsctx != NULL);
|
|
|
|
|
|
|
|
switch (listener->type) {
|
|
|
|
#if HAVE_LIBNGHTTP2
|
|
|
|
case isc_nm_httplistener:
|
|
|
|
/*
|
|
|
|
* We handle HTTP listener sockets differently, as they rely
|
|
|
|
* on underlying TLS sockets for networking. The TLS context
|
|
|
|
* will get passed to these underlying sockets via the call to
|
|
|
|
* isc__nm_http_set_tlsctx().
|
|
|
|
*/
|
|
|
|
isc__nm_http_set_tlsctx(listener, tlsctx);
|
|
|
|
break;
|
2022-10-18 15:36:00 +03:00
|
|
|
#endif /* HAVE_LIBNGHTTP2 */
|
2022-03-31 13:47:48 +03:00
|
|
|
case isc_nm_tlslistener:
|
|
|
|
set_tlsctx_workers(listener, tlsctx);
|
|
|
|
break;
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnslistener:
|
|
|
|
isc__nm_streamdns_set_tlsctx(listener, tlsctx);
|
|
|
|
break;
|
2023-10-17 20:36:58 +03:00
|
|
|
case isc_nm_proxystreamlistener:
|
|
|
|
isc__nm_proxystream_set_tlsctx(listener, tlsctx);
|
|
|
|
break;
|
2022-03-31 13:47:48 +03:00
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
break;
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2022-06-22 16:45:28 +03:00
|
|
|
void
|
|
|
|
isc_nmsocket_set_max_streams(isc_nmsocket_t *listener,
|
|
|
|
const uint32_t max_streams) {
|
|
|
|
REQUIRE(VALID_NMSOCK(listener));
|
|
|
|
switch (listener->type) {
|
|
|
|
#if HAVE_LIBNGHTTP2
|
|
|
|
case isc_nm_httplistener:
|
|
|
|
isc__nm_http_set_max_streams(listener, max_streams);
|
|
|
|
break;
|
|
|
|
#endif /* HAVE_LIBNGHTTP2 */
|
|
|
|
default:
|
|
|
|
UNUSED(max_streams);
|
|
|
|
break;
|
|
|
|
};
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-04-22 15:59:11 +03:00
|
|
|
void
|
|
|
|
isc__nmsocket_log_tls_session_reuse(isc_nmsocket_t *sock, isc_tls_t *tls) {
|
|
|
|
const int log_level = ISC_LOG_DEBUG(1);
|
|
|
|
char client_sabuf[ISC_SOCKADDR_FORMATSIZE];
|
|
|
|
char local_sabuf[ISC_SOCKADDR_FORMATSIZE];
|
|
|
|
|
|
|
|
REQUIRE(tls != NULL);
|
|
|
|
|
2024-08-13 18:20:26 +02:00
|
|
|
if (!isc_log_wouldlog(log_level)) {
|
2022-04-22 15:59:11 +03:00
|
|
|
return;
|
|
|
|
};
|
|
|
|
|
|
|
|
isc_sockaddr_format(&sock->peer, client_sabuf, sizeof(client_sabuf));
|
|
|
|
isc_sockaddr_format(&sock->iface, local_sabuf, sizeof(local_sabuf));
|
2022-12-07 09:45:34 +01:00
|
|
|
isc__nmsocket_log(sock, log_level, "TLS %s session %s for %s on %s",
|
|
|
|
SSL_is_server(tls) ? "server" : "client",
|
|
|
|
SSL_session_reused(tls) ? "resumed" : "created",
|
|
|
|
client_sabuf, local_sabuf);
|
2022-04-22 15:59:11 +03:00
|
|
|
}
|
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
static void
|
|
|
|
isc__networker_destroy(isc__networker_t *worker) {
|
|
|
|
isc_nm_t *netmgr = worker->netmgr;
|
|
|
|
worker->netmgr = NULL;
|
|
|
|
|
2022-12-07 09:45:34 +01:00
|
|
|
isc__netmgr_log(netmgr, ISC_LOG_DEBUG(1),
|
2022-10-26 20:10:08 -07:00
|
|
|
"Destroying network manager worker on loop %p(%d)",
|
2022-12-07 09:45:34 +01:00
|
|
|
worker->loop, isc_tid());
|
2022-07-26 13:03:45 +02:00
|
|
|
|
|
|
|
isc_loop_detach(&worker->loop);
|
|
|
|
|
2023-01-04 15:57:00 +01:00
|
|
|
isc_mempool_destroy(&worker->uvreq_pool);
|
2023-09-12 19:13:45 +02:00
|
|
|
isc_mempool_destroy(&worker->nmsocket_pool);
|
2023-01-04 15:57:00 +01:00
|
|
|
|
2022-07-26 13:03:45 +02:00
|
|
|
isc_mem_putanddetach(&worker->mctx, worker->recvbuf,
|
|
|
|
ISC_NETMGR_RECVBUF_SIZE);
|
|
|
|
isc_nm_detach(&netmgr);
|
|
|
|
}
|
|
|
|
|
|
|
|
ISC_REFCOUNT_IMPL(isc__networker, isc__networker_destroy);
|
|
|
|
|
2022-12-07 09:45:34 +01:00
|
|
|
void
|
|
|
|
isc__netmgr_log(const isc_nm_t *netmgr, int level, const char *fmt, ...) {
|
|
|
|
char msgbuf[2048];
|
|
|
|
va_list ap;
|
|
|
|
|
2024-08-13 18:20:26 +02:00
|
|
|
if (!isc_log_wouldlog(level)) {
|
2022-12-07 09:45:34 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
|
|
|
vsnprintf(msgbuf, sizeof(msgbuf), fmt, ap);
|
|
|
|
va_end(ap);
|
|
|
|
|
2024-08-14 14:38:07 +02:00
|
|
|
isc_log_write(ISC_LOGCATEGORY_GENERAL, ISC_LOGMODULE_NETMGR, level,
|
2024-08-13 18:20:26 +02:00
|
|
|
"netmgr %p: %s", netmgr, msgbuf);
|
2022-12-07 09:45:34 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nmsocket_log(const isc_nmsocket_t *sock, int level, const char *fmt, ...) {
|
|
|
|
char msgbuf[2048];
|
|
|
|
va_list ap;
|
|
|
|
|
2024-08-13 18:20:26 +02:00
|
|
|
if (!isc_log_wouldlog(level)) {
|
2022-12-07 09:45:34 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
|
|
|
vsnprintf(msgbuf, sizeof(msgbuf), fmt, ap);
|
|
|
|
va_end(ap);
|
|
|
|
|
2024-08-14 14:38:07 +02:00
|
|
|
isc_log_write(ISC_LOGCATEGORY_GENERAL, ISC_LOGMODULE_NETMGR, level,
|
2024-08-13 18:20:26 +02:00
|
|
|
"socket %p: %s", sock, msgbuf);
|
2022-12-07 09:45:34 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc__nmhandle_log(const isc_nmhandle_t *handle, int level, const char *fmt,
|
|
|
|
...) {
|
|
|
|
char msgbuf[2048];
|
|
|
|
va_list ap;
|
|
|
|
|
2024-08-13 18:20:26 +02:00
|
|
|
if (!isc_log_wouldlog(level)) {
|
2022-12-07 09:45:34 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
|
|
|
vsnprintf(msgbuf, sizeof(msgbuf), fmt, ap);
|
|
|
|
va_end(ap);
|
|
|
|
|
|
|
|
isc__nmsocket_log(handle->sock, level, "handle %p: %s", handle, msgbuf);
|
|
|
|
}
|
|
|
|
|
2023-03-16 12:50:04 +02:00
|
|
|
void
|
|
|
|
isc__nm_received_proxy_header_log(isc_nmhandle_t *handle,
|
|
|
|
const isc_proxy2_command_t cmd,
|
|
|
|
const int socktype,
|
|
|
|
const isc_sockaddr_t *restrict src_addr,
|
|
|
|
const isc_sockaddr_t *restrict dst_addr,
|
|
|
|
const isc_region_t *restrict tlvs) {
|
|
|
|
const int log_level = ISC_LOG_DEBUG(1);
|
|
|
|
isc_sockaddr_t real_local, real_peer;
|
|
|
|
char real_local_fmt[ISC_SOCKADDR_FORMATSIZE] = { 0 };
|
|
|
|
char real_peer_fmt[ISC_SOCKADDR_FORMATSIZE] = { 0 };
|
|
|
|
char common_msg[512] = { 0 };
|
|
|
|
const char *proto = NULL;
|
|
|
|
const char *real_addresses_msg =
|
|
|
|
"real source and destination addresses are used";
|
|
|
|
|
2024-08-13 18:20:26 +02:00
|
|
|
if (!isc_log_wouldlog(log_level)) {
|
2023-03-16 12:50:04 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (isc_nmhandle_is_stream(handle)) {
|
|
|
|
proto = isc_nm_has_encryption(handle) ? "TLS" : "TCP";
|
|
|
|
} else {
|
|
|
|
proto = "UDP";
|
|
|
|
}
|
|
|
|
|
|
|
|
real_local = isc_nmhandle_real_localaddr(handle);
|
|
|
|
real_peer = isc_nmhandle_real_peeraddr(handle);
|
|
|
|
|
|
|
|
isc_sockaddr_format(&real_local, real_local_fmt,
|
|
|
|
sizeof(real_local_fmt));
|
|
|
|
|
|
|
|
isc_sockaddr_format(&real_peer, real_peer_fmt, sizeof(real_peer_fmt));
|
|
|
|
|
|
|
|
(void)snprintf(common_msg, sizeof(common_msg),
|
|
|
|
"Received a PROXYv2 header from %s on %s over %s",
|
|
|
|
real_peer_fmt, real_local_fmt, proto);
|
|
|
|
|
|
|
|
if (cmd == ISC_PROXY2_CMD_LOCAL) {
|
2024-08-14 14:38:07 +02:00
|
|
|
isc_log_write(ISC_LOGCATEGORY_GENERAL, ISC_LOGMODULE_NETMGR,
|
2024-08-13 18:20:26 +02:00
|
|
|
log_level, "%s: command: LOCAL (%s)", common_msg,
|
2023-03-16 12:50:04 +02:00
|
|
|
real_addresses_msg);
|
|
|
|
return;
|
|
|
|
} else if (cmd == ISC_PROXY2_CMD_PROXY) {
|
|
|
|
const char *tlvs_msg = tlvs == NULL ? "no" : "yes";
|
|
|
|
const char *socktype_name = NULL;
|
|
|
|
const char *src_addr_msg = "(none)", *dst_addr_msg = "(none)";
|
|
|
|
char src_addr_fmt[ISC_SOCKADDR_FORMATSIZE] = { 0 };
|
|
|
|
char dst_addr_fmt[ISC_SOCKADDR_FORMATSIZE] = { 0 };
|
|
|
|
|
|
|
|
switch (socktype) {
|
|
|
|
case 0:
|
2024-08-14 14:38:07 +02:00
|
|
|
isc_log_write(ISC_LOGCATEGORY_GENERAL,
|
2023-03-16 12:50:04 +02:00
|
|
|
ISC_LOGMODULE_NETMGR, log_level,
|
|
|
|
"%s: command: PROXY (unspecified address "
|
|
|
|
"and socket type, %s)",
|
|
|
|
common_msg, real_addresses_msg);
|
|
|
|
return;
|
|
|
|
case SOCK_STREAM:
|
|
|
|
socktype_name = "SOCK_STREAM";
|
|
|
|
break;
|
|
|
|
case SOCK_DGRAM:
|
|
|
|
socktype_name = "SOCK_DGRAM";
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (src_addr) {
|
|
|
|
isc_sockaddr_format(src_addr, src_addr_fmt,
|
|
|
|
sizeof(src_addr_fmt));
|
|
|
|
src_addr_msg = src_addr_fmt;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dst_addr) {
|
|
|
|
isc_sockaddr_format(dst_addr, dst_addr_fmt,
|
|
|
|
sizeof(dst_addr_fmt));
|
|
|
|
dst_addr_msg = dst_addr_fmt;
|
|
|
|
}
|
|
|
|
|
2024-08-14 14:38:07 +02:00
|
|
|
isc_log_write(ISC_LOGCATEGORY_GENERAL, ISC_LOGMODULE_NETMGR,
|
2024-08-13 18:20:26 +02:00
|
|
|
log_level,
|
2023-03-16 12:50:04 +02:00
|
|
|
"%s: command: PROXY, socket type: %s, source: "
|
|
|
|
"%s, destination: %s, TLVs: %s",
|
|
|
|
common_msg, socktype_name, src_addr_msg,
|
|
|
|
dst_addr_msg, tlvs_msg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-18 15:21:10 +03:00
|
|
|
void
|
|
|
|
isc__nmhandle_set_manual_timer(isc_nmhandle_t *handle, const bool manual) {
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
2023-01-03 08:27:54 +01:00
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
|
|
|
|
|
|
|
isc_nmsocket_t *sock = handle->sock;
|
2022-10-18 15:21:10 +03:00
|
|
|
|
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_tcpsocket:
|
|
|
|
isc__nmhandle_tcp_set_manual_timer(handle, manual);
|
|
|
|
return;
|
2022-10-20 15:40:51 +03:00
|
|
|
case isc_nm_tlssocket:
|
|
|
|
isc__nmhandle_tls_set_manual_timer(handle, manual);
|
|
|
|
return;
|
2023-03-16 12:50:04 +02:00
|
|
|
case isc_nm_proxystreamsocket:
|
|
|
|
isc__nmhandle_proxystream_set_manual_timer(handle, manual);
|
|
|
|
return;
|
2022-10-18 15:21:10 +03:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
};
|
|
|
|
|
|
|
|
UNREACHABLE();
|
|
|
|
}
|
|
|
|
|
2022-08-03 14:46:33 +03:00
|
|
|
void
|
|
|
|
isc__nmhandle_get_selected_alpn(isc_nmhandle_t *handle,
|
|
|
|
const unsigned char **alpn,
|
|
|
|
unsigned int *alpnlen) {
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
2023-01-03 08:27:54 +01:00
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
|
|
|
|
|
|
|
isc_nmsocket_t *sock = handle->sock;
|
2022-08-03 14:46:33 +03:00
|
|
|
|
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_tlssocket:
|
|
|
|
isc__nmhandle_tls_get_selected_alpn(handle, alpn, alpnlen);
|
|
|
|
return;
|
2023-10-17 20:36:58 +03:00
|
|
|
case isc_nm_proxystreamsocket:
|
|
|
|
isc__nmhandle_proxystream_get_selected_alpn(handle, alpn,
|
|
|
|
alpnlen);
|
|
|
|
return;
|
2022-08-03 14:46:33 +03:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2022-08-25 22:18:15 +03:00
|
|
|
isc_result_t
|
|
|
|
isc_nmhandle_set_tcp_nodelay(isc_nmhandle_t *handle, const bool value) {
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
2023-01-03 08:27:54 +01:00
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
|
|
|
|
|
|
|
isc_result_t result = ISC_R_FAILURE;
|
|
|
|
isc_nmsocket_t *sock = handle->sock;
|
2022-08-25 22:18:15 +03:00
|
|
|
|
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_tcpsocket: {
|
|
|
|
uv_os_fd_t tcp_fd = (uv_os_fd_t)-1;
|
|
|
|
(void)uv_fileno((uv_handle_t *)&sock->uv_handle.tcp, &tcp_fd);
|
|
|
|
RUNTIME_CHECK(tcp_fd != (uv_os_fd_t)-1);
|
|
|
|
result = isc__nm_socket_tcp_nodelay((uv_os_sock_t)tcp_fd,
|
|
|
|
value);
|
|
|
|
} break;
|
2022-08-25 22:37:26 +03:00
|
|
|
case isc_nm_tlssocket:
|
|
|
|
result = isc__nmhandle_tls_set_tcp_nodelay(handle, value);
|
|
|
|
break;
|
2023-03-16 12:50:04 +02:00
|
|
|
case isc_nm_proxystreamsocket:
|
|
|
|
result = isc__nmhandle_proxystream_set_tcp_nodelay(handle,
|
|
|
|
value);
|
|
|
|
break;
|
2022-08-25 22:18:15 +03:00
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
break;
|
|
|
|
};
|
|
|
|
|
2024-11-19 10:38:03 +01:00
|
|
|
return result;
|
2022-08-25 22:18:15 +03:00
|
|
|
}
|
|
|
|
|
2023-01-31 13:30:12 -08:00
|
|
|
isc_sockaddr_t
|
|
|
|
isc_nmsocket_getaddr(isc_nmsocket_t *sock) {
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
2024-11-19 10:38:03 +01:00
|
|
|
return sock->iface;
|
2023-01-31 13:30:12 -08:00
|
|
|
}
|
|
|
|
|
2023-03-16 12:50:04 +02:00
|
|
|
void
|
|
|
|
isc_nm_proxyheader_info_init(isc_nm_proxyheader_info_t *restrict info,
|
|
|
|
isc_sockaddr_t *restrict src_addr,
|
|
|
|
isc_sockaddr_t *restrict dst_addr,
|
|
|
|
isc_region_t *restrict tlv_data) {
|
|
|
|
REQUIRE(info != NULL);
|
|
|
|
REQUIRE(src_addr != NULL);
|
|
|
|
REQUIRE(dst_addr != NULL);
|
|
|
|
REQUIRE(tlv_data == NULL ||
|
|
|
|
(tlv_data->length > 0 && tlv_data->base != NULL));
|
|
|
|
|
|
|
|
*info = (isc_nm_proxyheader_info_t){ .proxy_info.src_addr = *src_addr,
|
|
|
|
.proxy_info.dst_addr = *dst_addr };
|
|
|
|
if (tlv_data != NULL) {
|
|
|
|
info->proxy_info.tlv_data = *tlv_data;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc_nm_proxyheader_info_init_complete(isc_nm_proxyheader_info_t *restrict info,
|
|
|
|
isc_region_t *restrict header_data) {
|
|
|
|
REQUIRE(info != NULL);
|
|
|
|
REQUIRE(header_data != NULL);
|
|
|
|
REQUIRE(header_data->base != NULL &&
|
|
|
|
header_data->length >= ISC_PROXY2_HEADER_SIZE);
|
|
|
|
|
|
|
|
*info = (isc_nm_proxyheader_info_t){ .complete = true,
|
|
|
|
.complete_header = *header_data };
|
|
|
|
}
|
|
|
|
|
2023-01-03 08:27:54 +01:00
|
|
|
#if ISC_NETMGR_TRACE
|
2020-09-02 17:57:44 +02:00
|
|
|
/*
|
|
|
|
* Dump all active sockets in netmgr. We output to stderr
|
|
|
|
* as the logger might be already shut down.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static const char *
|
|
|
|
nmsocket_type_totext(isc_nmsocket_type type) {
|
|
|
|
switch (type) {
|
|
|
|
case isc_nm_udpsocket:
|
2024-11-19 10:38:03 +01:00
|
|
|
return "isc_nm_udpsocket";
|
2020-09-02 17:57:44 +02:00
|
|
|
case isc_nm_udplistener:
|
2024-11-19 10:38:03 +01:00
|
|
|
return "isc_nm_udplistener";
|
2020-09-02 17:57:44 +02:00
|
|
|
case isc_nm_tcpsocket:
|
2024-11-19 10:38:03 +01:00
|
|
|
return "isc_nm_tcpsocket";
|
2020-09-02 17:57:44 +02:00
|
|
|
case isc_nm_tcplistener:
|
2024-11-19 10:38:03 +01:00
|
|
|
return "isc_nm_tcplistener";
|
2021-01-25 17:44:39 +02:00
|
|
|
case isc_nm_tlssocket:
|
2024-11-19 10:38:03 +01:00
|
|
|
return "isc_nm_tlssocket";
|
2021-01-25 17:44:39 +02:00
|
|
|
case isc_nm_tlslistener:
|
2024-11-19 10:38:03 +01:00
|
|
|
return "isc_nm_tlslistener";
|
2020-12-07 14:19:10 +02:00
|
|
|
case isc_nm_httplistener:
|
2024-11-19 10:38:03 +01:00
|
|
|
return "isc_nm_httplistener";
|
refactor outgoing HTTP connection support
- style, cleanup, and removal of unnecessary code.
- combined isc_nm_http_add_endpoint() and isc_nm_http_add_doh_endpoint()
into one function, renamed isc_http_endpoint().
- moved isc_nm_http_connect_send_request() into doh_test.c as a helper
function; remove it from the public API.
- renamed isc_http2 and isc_nm_http2 types and functions to just isc_http
and isc_nm_http, for consistency with other existing names.
- shortened a number of long names.
- the caller is now responsible for determining the peer address.
in isc_nm_httpconnect(); this eliminates the need to parse the URI
and the dependency on an external resolver.
- the caller is also now responsible for creating the SSL client context,
for consistency with isc_nm_tlsdnsconnect().
- added setter functions for HTTP/2 ALPN. instead of setting up ALPN in
isc_tlsctx_createclient(), we now have a function
isc_tlsctx_enable_http2client_alpn() that can be run from
isc_nm_httpconnect().
- refactored isc_nm_httprequest() into separate read and send functions.
isc_nm_send() or isc_nm_read() is called on an http socket, it will
be stored until a corresponding isc_nm_read() or _send() arrives; when
we have both halves of the pair the HTTP request will be initiated.
- isc_nm_httprequest() is renamed isc__nm_http_request() for use as an
internal helper function by the DoH unit test. (eventually doh_test
should be rewritten to use read and send, and this function should
be removed.)
- added implementations of isc__nm_tls_settimeout() and
isc__nm_http_settimeout().
- increased NGHTTP2 header block length for client connections to 128K.
- use isc_mem_t for internal memory allocations inside nghttp2, to
help track memory leaks.
- send "Cache-Control" header in requests and responses. (note:
currently we try to bypass HTTP caching proxies, but ideally we should
interact with them: https://tools.ietf.org/html/rfc8484#section-5.1)
2021-02-03 16:59:49 -08:00
|
|
|
case isc_nm_httpsocket:
|
2024-11-19 10:38:03 +01:00
|
|
|
return "isc_nm_httpsocket";
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnslistener:
|
2024-11-19 10:38:03 +01:00
|
|
|
return "isc_nm_streamdnslistener";
|
2022-06-20 20:30:12 +03:00
|
|
|
case isc_nm_streamdnssocket:
|
2024-11-19 10:38:03 +01:00
|
|
|
return "isc_nm_streamdnssocket";
|
2023-03-16 12:50:04 +02:00
|
|
|
case isc_nm_proxystreamlistener:
|
2024-11-19 10:38:03 +01:00
|
|
|
return "isc_nm_proxystreamlistener";
|
2023-03-16 12:50:04 +02:00
|
|
|
case isc_nm_proxystreamsocket:
|
2024-11-19 10:38:03 +01:00
|
|
|
return "isc_nm_proxystreamsocket";
|
2023-07-12 15:25:38 +03:00
|
|
|
case isc_nm_proxyudplistener:
|
2024-11-19 10:38:03 +01:00
|
|
|
return "isc_nm_proxyudplistener";
|
2023-07-12 15:25:38 +03:00
|
|
|
case isc_nm_proxyudpsocket:
|
2024-11-19 10:38:03 +01:00
|
|
|
return "isc_nm_proxyudpsocket";
|
2020-09-02 17:57:44 +02:00
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2020-09-02 17:57:44 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nmhandle_dump(isc_nmhandle_t *handle) {
|
2021-01-29 13:00:46 +01:00
|
|
|
fprintf(stderr, "Active handle %p, refs %" PRIuFAST32 "\n", handle,
|
2020-09-02 17:57:44 +02:00
|
|
|
isc_refcount_current(&handle->references));
|
|
|
|
fprintf(stderr, "Created by:\n");
|
2021-04-27 16:20:03 +02:00
|
|
|
isc_backtrace_symbols_fd(handle->backtrace, handle->backtrace_size,
|
|
|
|
STDERR_FILENO);
|
2020-09-02 17:57:44 +02:00
|
|
|
fprintf(stderr, "\n\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nmsocket_dump(isc_nmsocket_t *sock) {
|
|
|
|
fprintf(stderr, "\n=================\n");
|
2021-01-29 13:00:46 +01:00
|
|
|
fprintf(stderr, "Active %s socket %p, type %s, refs %" PRIuFAST32 "\n",
|
2023-03-24 13:37:19 +01:00
|
|
|
sock->client ? "client" : "server", sock,
|
2020-09-02 17:57:44 +02:00
|
|
|
nmsocket_type_totext(sock->type),
|
|
|
|
isc_refcount_current(&sock->references));
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
fprintf(stderr,
|
2021-05-27 09:45:07 +02:00
|
|
|
"Parent %p, listener %p, server %p, statichandle = "
|
|
|
|
"%p\n",
|
Refactor netmgr and add more unit tests
This is a part of the works that intends to make the netmgr stable,
testable, maintainable and tested. It contains a numerous changes to
the netmgr code and unfortunately, it was not possible to split this
into smaller chunks as the work here needs to be committed as a complete
works.
NOTE: There's a quite a lot of duplicated code between udp.c, tcp.c and
tcpdns.c and it should be a subject to refactoring in the future.
The changes that are included in this commit are listed here
(extensively, but not exclusively):
* The netmgr_test unit test was split into individual tests (udp_test,
tcp_test, tcpdns_test and newly added tcp_quota_test)
* The udp_test and tcp_test has been extended to allow programatic
failures from the libuv API. Unfortunately, we can't use cmocka
mock() and will_return(), so we emulate the behaviour with #define and
including the netmgr/{udp,tcp}.c source file directly.
* The netievents that we put on the nm queue have variable number of
members, out of these the isc_nmsocket_t and isc_nmhandle_t always
needs to be attached before enqueueing the netievent_<foo> and
detached after we have called the isc_nm_async_<foo> to ensure that
the socket (handle) doesn't disappear between scheduling the event and
actually executing the event.
* Cancelling the in-flight TCP connection using libuv requires to call
uv_close() on the original uv_tcp_t handle which just breaks too many
assumptions we have in the netmgr code. Instead of using uv_timer for
TCP connection timeouts, we use platform specific socket option.
* Fix the synchronization between {nm,async}_{listentcp,tcpconnect}
When isc_nm_listentcp() or isc_nm_tcpconnect() is called it was
waiting for socket to either end up with error (that path was fine) or
to be listening or connected using condition variable and mutex.
Several things could happen:
0. everything is ok
1. the waiting thread would miss the SIGNAL() - because the enqueued
event would be processed faster than we could start WAIT()ing.
In case the operation would end up with error, it would be ok, as
the error variable would be unchanged.
2. the waiting thread miss the sock->{connected,listening} = `true`
would be set to `false` in the tcp_{listen,connect}close_cb() as
the connection would be so short lived that the socket would be
closed before we could even start WAIT()ing
* The tcpdns has been converted to using libuv directly. Previously,
the tcpdns protocol used tcp protocol from netmgr, this proved to be
very complicated to understand, fix and make changes to. The new
tcpdns protocol is modeled in a similar way how tcp netmgr protocol.
Closes: #2194, #2283, #2318, #2266, #2034, #1920
* The tcp and tcpdns is now not using isc_uv_import/isc_uv_export to
pass accepted TCP sockets between netthreads, but instead (similar to
UDP) uses per netthread uv_loop listener. This greatly reduces the
complexity as the socket is always run in the associated nm and uv
loops, and we are also not touching the libuv internals.
There's an unfortunate side effect though, the new code requires
support for load-balanced sockets from the operating system for both
UDP and TCP (see #2137). If the operating system doesn't support the
load balanced sockets (either SO_REUSEPORT on Linux or SO_REUSEPORT_LB
on FreeBSD 12+), the number of netthreads is limited to 1.
* The netmgr has now two debugging #ifdefs:
1. Already existing NETMGR_TRACE prints any dangling nmsockets and
nmhandles before triggering assertion failure. This options would
reduce performance when enabled, but in theory, it could be enabled
on low-performance systems.
2. New NETMGR_TRACE_VERBOSE option has been added that enables
extensive netmgr logging that allows the software engineer to
precisely track any attach/detach operations on the nmsockets and
nmhandles. This is not suitable for any kind of production
machine, only for debugging.
* The tlsdns netmgr protocol has been split from the tcpdns and it still
uses the old method of stacking the netmgr boxes on top of each other.
We will have to refactor the tlsdns netmgr protocol to use the same
approach - build the stack using only libuv and openssl.
* Limit but not assert the tcp buffer size in tcp_alloc_cb
Closes: #2061
2020-11-12 10:32:18 +01:00
|
|
|
sock->parent, sock->listener, sock->server, sock->statichandle);
|
Fix the streaming read callback shutdown logic
When shutting down TCP sockets, the read callback calling logic was
flawed, it would call either one less callback or one extra. Fix the
logic in the way:
1. When isc_nm_read() has been called but isc_nm_read_stop() hasn't on
the handle, the read callback will be called with ISC_R_CANCELED to
cancel active reading from the socket/handle.
2. When isc_nm_read() has been called and isc_nm_read_stop() has been
called on the on the handle, the read callback will be called with
ISC_R_SHUTTINGDOWN to signal that the dormant (not-reading) socket
is being shut down.
3. The .reading and .recv_read flags are little bit tricky. The
.reading flag indicates if the outer layer is reading the data (that
would be uv_tcp_t for TCP and isc_nmsocket_t (TCP) for TLSStream),
the .recv_read flag indicates whether somebody is interested in the
data read from the socket.
Usually, you would expect that the .reading should be false when
.recv_read is false, but it gets even more tricky with TLSStream as
the TLS protocol might need to read from the socket even when sending
data.
Fix the usage of the .recv_read and .reading flags in the TLSStream
to their true meaning - which mostly consist of using .recv_read
everywhere and then wrapping isc_nm_read() and isc_nm_read_stop()
with the .reading flag.
4. The TLS failed read helper has been modified to resemble the TCP code
as much as possible, clearing and re-setting the .recv_read flag in
the TCP timeout code has been fixed and .recv_read is now cleared
when isc_nm_read_stop() has been called on the streaming socket.
5. The use of Network Manager in the named_controlconf, isccc_ccmsg, and
isc_httpd units have been greatly simplified due to the improved design.
6. More unit tests for TCP and TLS testing the shutdown conditions have
been added.
Co-authored-by: Ondřej Surý <ondrej@isc.org>
Co-authored-by: Artem Boldariev <artem@isc.org>
2023-04-13 17:27:50 +02:00
|
|
|
fprintf(stderr, "Flags:%s%s%s%s%s\n", sock->active ? " active" : "",
|
2023-03-24 13:37:19 +01:00
|
|
|
sock->closing ? " closing" : "",
|
|
|
|
sock->destroying ? " destroying" : "",
|
|
|
|
sock->connecting ? " connecting" : "",
|
|
|
|
sock->accepting ? " accepting" : "");
|
2020-09-02 17:57:44 +02:00
|
|
|
fprintf(stderr, "Created by:\n");
|
2021-04-27 16:20:03 +02:00
|
|
|
isc_backtrace_symbols_fd(sock->backtrace, sock->backtrace_size,
|
|
|
|
STDERR_FILENO);
|
2020-09-02 17:57:44 +02:00
|
|
|
fprintf(stderr, "\n");
|
2020-10-21 12:52:09 +02:00
|
|
|
|
2025-03-20 22:25:56 -07:00
|
|
|
ISC_LIST_FOREACH (sock->active_handles, handle, active_link) {
|
2020-10-21 12:52:09 +02:00
|
|
|
static bool first = true;
|
|
|
|
if (first) {
|
|
|
|
fprintf(stderr, "Active handles:\n");
|
|
|
|
first = false;
|
|
|
|
}
|
2020-09-02 17:57:44 +02:00
|
|
|
nmhandle_dump(handle);
|
|
|
|
}
|
2020-10-21 12:52:09 +02:00
|
|
|
|
2020-09-02 17:57:44 +02:00
|
|
|
fprintf(stderr, "\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2023-01-03 08:27:54 +01:00
|
|
|
isc__nm_dump_active(isc__networker_t *worker) {
|
|
|
|
bool first = true;
|
2020-10-21 12:52:09 +02:00
|
|
|
|
2025-03-20 22:25:56 -07:00
|
|
|
ISC_LIST_FOREACH (worker->active_sockets, sock, active_link) {
|
2020-10-21 12:52:09 +02:00
|
|
|
if (first) {
|
|
|
|
fprintf(stderr, "Outstanding sockets\n");
|
|
|
|
first = false;
|
|
|
|
}
|
2020-09-02 17:57:44 +02:00
|
|
|
nmsocket_dump(sock);
|
|
|
|
}
|
|
|
|
}
|
2023-09-26 15:37:48 +03:00
|
|
|
|
|
|
|
void
|
|
|
|
isc__nm_dump_active_manager(isc_nm_t *netmgr) {
|
|
|
|
size_t i = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < netmgr->nloops; i++) {
|
|
|
|
isc__networker_t *worker = &netmgr->workers[i];
|
|
|
|
|
|
|
|
if (!ISC_LIST_EMPTY(worker->active_sockets)) {
|
|
|
|
fprintf(stderr, "Worker #%zu (%p)\n", i, worker);
|
|
|
|
isc__nm_dump_active(worker);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-09-02 17:57:44 +02:00
|
|
|
#endif
|