2
0
mirror of https://gitlab.isc.org/isc-projects/bind9 synced 2025-08-31 06:25:31 +00:00

Fix TCPDNS and TLSDNS timers

After the TCPDNS refactoring the initial and idle timers were broken and
only the tcp-initial-timeout was always applied on the whole TCP
connection.

This broke any TCP connection that took longer than tcp-initial-timeout,
most often this would affect large zone AXFRs.

This commit changes the timeout logic in this way:

  * On TCP connection accept the tcp-initial-timeout is applied
    and the timer is started
  * When we are processing and/or sending any DNS message the timer is
    stopped
  * When we stop processing all DNS messages, the tcp-idle-timeout
    is applied and the timer is started again
This commit is contained in:
Ondřej Surý
2021-03-16 09:03:02 +01:00
parent 64cff61c02
commit caa5b6548a
10 changed files with 538 additions and 315 deletions

View File

@@ -86,9 +86,6 @@ stop_tcp_parent(isc_nmsocket_t *sock);
static void
stop_tcp_child(isc_nmsocket_t *sock);
static void
start_sock_timer(isc_nmsocket_t *sock);
static void
start_reading(isc_nmsocket_t *sock);
@@ -719,6 +716,11 @@ destroy:
}
}
void
isc__nm_tcp_failed_read_cb(isc_nmsocket_t *sock, isc_result_t result) {
failed_read_cb(sock, result);
}
static void
failed_send_cb(isc_nmsocket_t *sock, isc__nm_uvreq_t *req,
isc_result_t eresult) {
@@ -726,7 +728,7 @@ failed_send_cb(isc_nmsocket_t *sock, isc__nm_uvreq_t *req,
REQUIRE(VALID_UVREQ(req));
if (req->cb.send != NULL) {
isc__nm_sendcb(sock, req, eresult);
isc__nm_sendcb(sock, req, eresult, true);
} else {
isc__nm_uvreq_put(&req, sock);
}
@@ -744,35 +746,6 @@ get_read_req(isc_nmsocket_t *sock) {
return req;
}
static void
readtimeout_cb(uv_timer_t *timer) {
isc_nmsocket_t *sock = uv_handle_get_data((uv_handle_t *)timer);
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(sock->tid == isc_nm_tid());
REQUIRE(sock->reading);
/*
* Timeout; stop reading and process whatever we have.
*/
failed_read_cb(sock, ISC_R_TIMEDOUT);
}
static void
start_sock_timer(isc_nmsocket_t *sock) {
if (sock->read_timeout > 0) {
int r = uv_timer_start(&sock->timer, readtimeout_cb,
sock->read_timeout, 0);
REQUIRE(r == 0);
}
}
static void
stop_sock_timer(isc_nmsocket_t *sock) {
int r = uv_timer_stop(&sock->timer);
REQUIRE(r == 0);
}
static void
start_reading(isc_nmsocket_t *sock) {
if (sock->reading) {
@@ -782,8 +755,6 @@ start_reading(isc_nmsocket_t *sock) {
int r = uv_read_start(&sock->uv_handle.stream, tcp_alloc_cb, read_cb);
REQUIRE(r == 0);
sock->reading = true;
start_sock_timer(sock);
}
static void
@@ -796,7 +767,7 @@ stop_reading(isc_nmsocket_t *sock) {
REQUIRE(r == 0);
sock->reading = false;
stop_sock_timer(sock);
isc__nmsocket_timer_stop(sock);
}
void
@@ -879,6 +850,7 @@ isc__nm_async_tcpstartread(isc__networker_t *worker, isc__netievent_t *ev0) {
}
start_reading(sock);
isc__nmsocket_timer_start(sock);
}
void
@@ -997,7 +969,7 @@ read_cb(uv_stream_t *stream, ssize_t nread, const uv_buf_t *buf) {
/* The readcb could have paused the reading */
if (sock->reading) {
/* The timer will be updated */
start_sock_timer(sock);
isc__nmsocket_timer_restart(sock);
}
free:
@@ -1199,7 +1171,7 @@ tcp_send_cb(uv_write_t *req, int status) {
return;
}
isc__nm_sendcb(sock, uvreq, ISC_R_SUCCESS);
isc__nm_sendcb(sock, uvreq, ISC_R_SUCCESS, false);
}
/*
@@ -1479,20 +1451,6 @@ isc__nm_async_tcpcancel(isc__networker_t *worker, isc__netievent_t *ev0) {
failed_read_cb(sock, ISC_R_EOF);
}
void
isc__nm_tcp_settimeout(isc_nmhandle_t *handle, uint32_t timeout) {
isc_nmsocket_t *sock = NULL;
REQUIRE(VALID_NMHANDLE(handle));
sock = handle->sock;
sock->read_timeout = timeout;
if (uv_is_active((uv_handle_t *)&sock->timer)) {
start_sock_timer(sock);
}
}
int_fast32_t
isc__nm_tcp_listener_nactive(isc_nmsocket_t *listener) {
int_fast32_t nactive;