2
0
mirror of https://gitlab.isc.org/isc-projects/bind9 synced 2025-08-31 22:45:39 +00:00

Merge branch 'ondrej-netmgr-simplify-uv_close' into 'main'

Reorder the uv_close() calls to close the socket immediately

See merge request isc-projects/bind9!6704
This commit is contained in:
Ondřej Surý
2022-09-19 12:43:39 +00:00
6 changed files with 188 additions and 77 deletions

46
doc/dev/libuv.md Normal file
View File

@@ -0,0 +1,46 @@
<!--
Copyright (C) Internet Systems Consortium, Inc. ("ISC")
SPDX-License-Identifier: MPL-2.0
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, you can obtain one at https://mozilla.org/MPL/2.0/.
See the COPYRIGHT file distributed with this work for additional
information regarding copyright ownership.
-->
## Libuv Notes
This document describes various notes related to the using of the libuv library.
### Queueing Events onto the ``uv_loop_t``
The upstream documentation on [the I/O
loop](http://docs.libuv.org/en/v1.x/design.html#the-i-o-loop) describes the
order in which are the various handles processed. However, it does not describe
the order in which the loop processes the events in the same buckets, and
because it is counterintuitive, it is described here.
When scheduling the events of the same class (f.e. ``uv_*_start()`` or
``uv_close()``), the events are executed in the LIFO order (e.g. it's a stack,
not a queue). The reasoning for the upstream design choice is described in [the
upstream issue](https://github.com/libuv/libuv/issues/3582).
What does this means in practice? F.e. when closing the handles:
uv_close(&handle1, callback1);
uv_close(&handle2, callback2);
The ``callback2()`` will be called before the ``callback1()``, so if they are
using the same resource, the resource can be freed in the ``callback1()`` and
not in the ``callback2()``.
Same applies f.e. to the ``uv_idle_t``, if you want the ``action1()`` to execute
before ``action2()``, the valid code would be:
uv_idle_start(&idle2, action2);
uv_idle_start(&idle1, action1);
which is really counterintuitive.

View File

@@ -665,6 +665,8 @@ nmsocket_cleanup(isc_nmsocket_t *sock, bool dofree FLARG) {
"\n",
sock, isc_refcount_current(&sock->references));
isc_refcount_destroy(&sock->references);
isc__nm_decstats(sock, STATID_ACTIVE);
atomic_store(&sock->destroying, true);
@@ -675,7 +677,9 @@ nmsocket_cleanup(isc_nmsocket_t *sock, bool dofree FLARG) {
* so we can clean up and free the children.
*/
for (size_t i = 0; i < sock->nchildren; i++) {
if (!atomic_load(&sock->children[i].destroying)) {
REQUIRE(!atomic_load(&sock->children[i].destroying));
if (isc_refcount_decrement(
&sock->children[i].references)) {
nmsocket_cleanup(&sock->children[i],
false FLARG_PASS);
}

View File

@@ -62,6 +62,8 @@ static isc_result_t
tcp_send_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req);
static void
tcp_connect_cb(uv_connect_t *uvreq, int status);
static void
tcp_stop_cb(uv_handle_t *handle);
static void
tcp_connection_cb(uv_stream_t *server, int status);
@@ -683,7 +685,20 @@ isc__nm_async_tcpstop(isc__networker_t *worker, isc__netievent_t *ev0) {
RUNTIME_CHECK(atomic_compare_exchange_strong(&sock->closing,
&(bool){ false }, true));
tcp_close_direct(sock);
/*
* The order of the close operation is important here, the uv_close()
* gets scheduled in the reverse order, so we need to close the timer
* last, so its gone by the time we destroy the socket
*/
/* 2. close the listening socket */
isc__nmsocket_clearcb(sock);
isc__nm_stop_reading(sock);
uv_close(&sock->uv_handle.handle, tcp_stop_cb);
/* 1. close the read timer */
isc__nmsocket_timer_stop(sock);
uv_close(&sock->read_timer, NULL);
(void)atomic_fetch_sub(&sock->parent->rchildren, 1);
@@ -1217,25 +1232,12 @@ tcp_close_cb(uv_handle_t *handle) {
tcp_close_sock(sock);
}
static void
read_timer_close_cb(uv_handle_t *handle) {
isc_nmsocket_t *sock = uv_handle_get_data(handle);
uv_handle_set_data(handle, NULL);
if (sock->parent) {
uv_close(&sock->uv_handle.handle, tcp_stop_cb);
} else if (uv_is_closing(&sock->uv_handle.handle)) {
tcp_close_sock(sock);
} else {
uv_close(&sock->uv_handle.handle, tcp_close_cb);
}
}
static void
tcp_close_direct(isc_nmsocket_t *sock) {
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(sock->tid == isc_tid());
REQUIRE(atomic_load(&sock->closing));
REQUIRE(sock->parent == NULL);
if (sock->server != NULL) {
REQUIRE(VALID_NMSOCK(sock->server));
@@ -1251,12 +1253,31 @@ tcp_close_direct(isc_nmsocket_t *sock) {
isc_quota_detach(&sock->quota);
}
isc__nmsocket_clearcb(sock);
isc__nmsocket_timer_stop(sock);
isc__nm_stop_reading(sock);
/*
* The order of the close operation is important here, the uv_close()
* gets scheduled in the reverse order, so we need to close the timer
* last, so its gone by the time we destroy the socket
*/
if (!uv_is_closing(&sock->uv_handle.handle)) {
/* Normal order of operation */
/* 2. close the socket + destroy the socket in callback */
isc__nmsocket_clearcb(sock);
isc__nm_stop_reading(sock);
uv_close(&sock->uv_handle.handle, tcp_close_cb);
/* 1. close the timer */
isc__nmsocket_timer_stop(sock);
uv_close((uv_handle_t *)&sock->read_timer, NULL);
} else {
/* The socket was already closed elsewhere */
/* 1. close the timer + destroy the socket in callback */
isc__nmsocket_timer_stop(sock);
uv_handle_set_data((uv_handle_t *)&sock->read_timer, sock);
uv_close((uv_handle_t *)&sock->read_timer, read_timer_close_cb);
uv_close((uv_handle_t *)&sock->read_timer, tcp_close_cb);
}
}
void

View File

@@ -63,6 +63,9 @@ tcpdns_connect_cb(uv_connect_t *uvreq, int status);
static void
tcpdns_connection_cb(uv_stream_t *server, int status);
static void
tcpdns_stop_cb(uv_handle_t *handle);
static void
tcpdns_close_cb(uv_handle_t *uvhandle);
@@ -646,7 +649,20 @@ isc__nm_async_tcpdnsstop(isc__networker_t *worker, isc__netievent_t *ev0) {
RUNTIME_CHECK(atomic_compare_exchange_strong(&sock->closing,
&(bool){ false }, true));
tcpdns_close_direct(sock);
/*
* The order of the close operation is important here, the uv_close()
* gets scheduled in the reverse order, so we need to close the timer
* last, so its gone by the time we destroy the socket
*/
/* 2. close the listening socket */
isc__nmsocket_clearcb(sock);
isc__nm_stop_reading(sock);
uv_close(&sock->uv_handle.handle, tcpdns_stop_cb);
/* 1. close the read timer */
isc__nmsocket_timer_stop(sock);
uv_close(&sock->read_timer, NULL);
(void)atomic_fetch_sub(&sock->parent->rchildren, 1);
@@ -1270,22 +1286,6 @@ tcpdns_close_cb(uv_handle_t *handle) {
tcpdns_close_sock(sock);
}
static void
read_timer_close_cb(uv_handle_t *timer) {
isc_nmsocket_t *sock = uv_handle_get_data(timer);
uv_handle_set_data(timer, NULL);
REQUIRE(VALID_NMSOCK(sock));
if (sock->parent) {
uv_close(&sock->uv_handle.handle, tcpdns_stop_cb);
} else if (uv_is_closing(&sock->uv_handle.handle)) {
tcpdns_close_sock(sock);
} else {
uv_close(&sock->uv_handle.handle, tcpdns_close_cb);
}
}
static void
tcpdns_close_direct(isc_nmsocket_t *sock) {
REQUIRE(VALID_NMSOCK(sock));
@@ -1300,12 +1300,30 @@ tcpdns_close_direct(isc_nmsocket_t *sock) {
isc_nmhandle_detach(&sock->recv_handle);
}
isc__nmsocket_clearcb(sock);
isc__nmsocket_timer_stop(sock);
isc__nm_stop_reading(sock);
/*
* The order of the close operation is important here, the uv_close()
* gets scheduled in the reverse order, so we need to close the timer
* last, so its gone by the time we destroy the socket
*/
if (!uv_is_closing(&sock->uv_handle.handle)) {
/* Normal order of operation */
/* 2. close the socket + destroy the socket in callback */
isc__nmsocket_clearcb(sock);
isc__nm_stop_reading(sock);
uv_close(&sock->uv_handle.handle, tcpdns_close_cb);
/* 1. close the timer */
uv_close((uv_handle_t *)&sock->read_timer, NULL);
} else {
/* The socket was already closed elsewhere */
/* 1. close the timer + destroy the socket in callback */
isc__nmsocket_timer_stop(sock);
uv_handle_set_data((uv_handle_t *)&sock->read_timer, sock);
uv_close((uv_handle_t *)&sock->read_timer, read_timer_close_cb);
uv_close((uv_handle_t *)&sock->read_timer, tcpdns_close_cb);
}
}
void

View File

@@ -56,6 +56,9 @@ tlsdns_connect_cb(uv_connect_t *uvreq, int status);
static void
tlsdns_connection_cb(uv_stream_t *server, int status);
static void
tlsdns_stop_cb(uv_handle_t *handle);
static void
tlsdns_close_cb(uv_handle_t *uvhandle);
@@ -769,7 +772,20 @@ isc__nm_async_tlsdnsstop(isc__networker_t *worker, isc__netievent_t *ev0) {
RUNTIME_CHECK(atomic_compare_exchange_strong(&sock->closing,
&(bool){ false }, true));
tlsdns_close_direct(sock);
/*
* The order of the close operation is important here, the uv_close()
* gets scheduled in the reverse order, so we need to close the timer
* last, so its gone by the time we destroy the socket
*/
/* 2. close the listening socket */
isc__nmsocket_clearcb(sock);
isc__nm_stop_reading(sock);
uv_close(&sock->uv_handle.handle, tlsdns_stop_cb);
/* 1. close the read timer */
isc__nmsocket_timer_stop(sock);
uv_close(&sock->read_timer, NULL);
(void)atomic_fetch_sub(&sock->parent->rchildren, 1);
@@ -1908,27 +1924,12 @@ tlsdns_close_cb(uv_handle_t *handle) {
tlsdns_close_sock(sock);
}
static void
read_timer_close_cb(uv_handle_t *handle) {
isc_nmsocket_t *sock = uv_handle_get_data(handle);
uv_handle_set_data(handle, NULL);
REQUIRE(VALID_NMSOCK(sock));
if (sock->parent) {
uv_close(&sock->uv_handle.handle, tlsdns_stop_cb);
} else if (uv_is_closing(&sock->uv_handle.handle)) {
tlsdns_close_sock(sock);
} else {
uv_close(&sock->uv_handle.handle, tlsdns_close_cb);
}
}
static void
tlsdns_close_direct(isc_nmsocket_t *sock) {
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(sock->tid == isc_tid());
REQUIRE(atomic_load(&sock->closing));
REQUIRE(sock->parent == NULL);
REQUIRE(sock->tls.pending_req == NULL);
@@ -1940,12 +1941,31 @@ tlsdns_close_direct(isc_nmsocket_t *sock) {
isc_nmhandle_detach(&sock->recv_handle);
}
isc__nmsocket_clearcb(sock);
isc__nmsocket_timer_stop(sock);
isc__nm_stop_reading(sock);
/*
* The order of the close operation is important here, the uv_close()
* gets scheduled in the reverse order, so we need to close the timer
* last, so its gone by the time we destroy the socket
*/
if (!uv_is_closing(&sock->uv_handle.handle)) {
/* Normal order of operation */
/* 2. close the socket + destroy the socket in callback */
isc__nmsocket_clearcb(sock);
isc__nm_stop_reading(sock);
uv_close(&sock->uv_handle.handle, tlsdns_close_cb);
/* 1. close the timer */
isc__nmsocket_timer_stop(sock);
uv_close((uv_handle_t *)&sock->read_timer, NULL);
} else {
/* The socket was already closed elsewhere */
/* 1. close the timer + destroy the socket in callback */
isc__nmsocket_timer_stop(sock);
uv_handle_set_data((uv_handle_t *)&sock->read_timer, sock);
uv_close((uv_handle_t *)&sock->read_timer, read_timer_close_cb);
uv_close((uv_handle_t *)&sock->read_timer, tlsdns_close_cb);
}
}
void

View File

@@ -65,9 +65,6 @@ udp_send_cb(uv_udp_send_t *req, int status);
static void
udp_close_cb(uv_handle_t *handle);
static void
read_timer_close_cb(uv_handle_t *handle);
static uv_os_sock_t
isc__nm_udp_lb_socket(isc_nm_t *mgr, sa_family_t sa_family) {
isc_result_t result;
@@ -1008,14 +1005,6 @@ udp_close_cb(uv_handle_t *handle) {
}
}
static void
read_timer_close_cb(uv_handle_t *handle) {
isc_nmsocket_t *sock = uv_handle_get_data(handle);
uv_handle_set_data(handle, NULL);
uv_close(&sock->uv_handle.handle, udp_close_cb);
}
void
isc__nm_udp_close(isc_nmsocket_t *sock) {
REQUIRE(VALID_NMSOCK(sock));
@@ -1031,7 +1020,20 @@ isc__nm_udp_close(isc_nmsocket_t *sock) {
isc__nmsocket_timer_stop(sock);
isc__nm_stop_reading(sock);
uv_close((uv_handle_t *)&sock->read_timer, read_timer_close_cb);
/*
* The order of the close operation is important here, the uv_close()
* gets scheduled in the reverse order, so we need to close the timer
* last, so its gone by the time we destroy the socket
*/
/* 2. close the listening socket */
isc__nmsocket_clearcb(sock);
isc__nm_stop_reading(sock);
uv_close(&sock->uv_handle.handle, udp_close_cb);
/* 1. close the read timer */
isc__nmsocket_timer_stop(sock);
uv_close((uv_handle_t *)&sock->read_timer, NULL);
}
void