2
0
mirror of https://gitlab.isc.org/isc-projects/bind9 synced 2025-09-01 15:05:23 +00:00

Make it possible to recover from connect timeouts

Similarly to the read timeout, it's now possible to recover from
ISC_R_TIMEDOUT event by restarting the timer from the connect callback.

The change here also fixes platforms that missing the socket() options
to set the TCP connection timeout, by moving the timeout code into user
space.  On platforms that support setting the connect timeout via a
socket option, the timeout has been hardcoded to 2 minutes (the maximum
value of tcp-initial-timeout).
This commit is contained in:
Ondřej Surý
2021-03-30 09:25:09 +02:00
parent 33c00c281f
commit 5a87c7372c
7 changed files with 323 additions and 157 deletions

View File

@@ -1058,7 +1058,7 @@ http_call_connect_cb(isc_nmsocket_t *sock, isc_result_t result) {
req->handle = isc__nmhandle_get(sock, &sock->peer, &sock->iface->addr); req->handle = isc__nmhandle_get(sock, &sock->peer, &sock->iface->addr);
isc__nmsocket_clearcb(sock); isc__nmsocket_clearcb(sock);
isc__nm_connectcb_force_async(sock, req, result); isc__nm_connectcb(sock, req, result, true);
} }
static void static void

View File

@@ -1173,11 +1173,7 @@ isc__nmsocket_timer_running(isc_nmsocket_t *sock);
void void
isc__nm_connectcb(isc_nmsocket_t *sock, isc__nm_uvreq_t *uvreq, isc__nm_connectcb(isc_nmsocket_t *sock, isc__nm_uvreq_t *uvreq,
isc_result_t eresult); isc_result_t eresult, bool async);
void
isc__nm_connectcb_force_async(isc_nmsocket_t *sock, isc__nm_uvreq_t *uvreq,
isc_result_t eresult);
void void
isc__nm_async_connectcb(isc__networker_t *worker, isc__netievent_t *ev0); isc__nm_async_connectcb(isc__networker_t *worker, isc__netievent_t *ev0);
@@ -1896,4 +1892,7 @@ isc__nm_failed_connect_cb(isc_nmsocket_t *sock, isc__nm_uvreq_t *req,
void void
isc__nm_failed_read_cb(isc_nmsocket_t *sock, isc_result_t result); isc__nm_failed_read_cb(isc_nmsocket_t *sock, isc_result_t result);
void
isc__nmsocket_connecttimeout_cb(uv_timer_t *timer);
#define STREAM_CLIENTS_PER_CONN 23 #define STREAM_CLIENTS_PER_CONN 23

View File

@@ -827,7 +827,6 @@ NETIEVENT_SOCKET_REQ_DEF(tcpconnect);
NETIEVENT_SOCKET_REQ_DEF(tcpsend); NETIEVENT_SOCKET_REQ_DEF(tcpsend);
NETIEVENT_SOCKET_REQ_DEF(tlssend); NETIEVENT_SOCKET_REQ_DEF(tlssend);
NETIEVENT_SOCKET_REQ_DEF(udpconnect); NETIEVENT_SOCKET_REQ_DEF(udpconnect);
NETIEVENT_SOCKET_REQ_RESULT_DEF(connectcb); NETIEVENT_SOCKET_REQ_RESULT_DEF(connectcb);
NETIEVENT_SOCKET_REQ_RESULT_DEF(readcb); NETIEVENT_SOCKET_REQ_RESULT_DEF(readcb);
NETIEVENT_SOCKET_REQ_RESULT_DEF(sendcb); NETIEVENT_SOCKET_REQ_RESULT_DEF(sendcb);
@@ -1022,6 +1021,9 @@ nmsocket_maybe_destroy(isc_nmsocket_t *sock FLARG) {
int active_handles; int active_handles;
bool destroy = false; bool destroy = false;
NETMGR_TRACE_LOG("%s():%p->references = %" PRIuFAST32 "\n", __func__,
sock, isc_refcount_current(&sock->references));
if (sock->parent != NULL) { if (sock->parent != NULL) {
/* /*
* This is a child socket and cannot be destroyed except * This is a child socket and cannot be destroyed except
@@ -1519,6 +1521,9 @@ isc__nmhandle_detach(isc_nmhandle_t **handlep FLARG) {
} }
} }
static void
isc__nmsocket_shutdown(isc_nmsocket_t *sock);
static void static void
nmhandle_detach_cb(isc_nmhandle_t **handlep FLARG) { nmhandle_detach_cb(isc_nmhandle_t **handlep FLARG) {
isc_nmsocket_t *sock = NULL; isc_nmsocket_t *sock = NULL;
@@ -1664,11 +1669,12 @@ isc__nm_failed_connect_cb(isc_nmsocket_t *sock, isc__nm_uvreq_t *req,
REQUIRE(atomic_load(&sock->connecting)); REQUIRE(atomic_load(&sock->connecting));
REQUIRE(req->cb.connect != NULL); REQUIRE(req->cb.connect != NULL);
isc__nmsocket_timer_stop(sock);
atomic_store(&sock->connecting, false); atomic_store(&sock->connecting, false);
isc__nmsocket_clearcb(sock); isc__nmsocket_clearcb(sock);
isc__nm_connectcb(sock, req, eresult, true);
isc__nm_connectcb(sock, req, eresult);
isc__nmsocket_prep_destroy(sock); isc__nmsocket_prep_destroy(sock);
} }
@@ -1695,6 +1701,32 @@ isc__nm_failed_read_cb(isc_nmsocket_t *sock, isc_result_t result) {
} }
} }
void
isc__nmsocket_connecttimeout_cb(uv_timer_t *timer) {
uv_connect_t *uvreq = uv_handle_get_data((uv_handle_t *)timer);
isc_nmsocket_t *sock = uv_handle_get_data((uv_handle_t *)uvreq->handle);
isc__nm_uvreq_t *req = uv_handle_get_data((uv_handle_t *)uvreq);
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(sock->tid == isc_nm_tid());
REQUIRE(atomic_load(&sock->connecting));
REQUIRE(atomic_load(&sock->client));
REQUIRE(VALID_UVREQ(req));
REQUIRE(VALID_NMHANDLE(req->handle));
isc__nmsocket_timer_stop(sock);
/* Call the connect callback directly */
req->cb.connect(req->handle, ISC_R_TIMEDOUT, req->cbarg);
/* Timer is not running, cleanup and shutdown everything */
if (!isc__nmsocket_timer_running(sock)) {
isc__nmsocket_clearcb(sock);
isc__nmsocket_shutdown(sock);
atomic_store(&sock->connecting, false);
}
}
static void static void
isc__nmsocket_readtimeout_cb(uv_timer_t *timer) { isc__nmsocket_readtimeout_cb(uv_timer_t *timer) {
isc_nmsocket_t *sock = uv_handle_get_data((uv_handle_t *)timer); isc_nmsocket_t *sock = uv_handle_get_data((uv_handle_t *)timer);
@@ -1704,6 +1736,8 @@ isc__nmsocket_readtimeout_cb(uv_timer_t *timer) {
REQUIRE(sock->reading); REQUIRE(sock->reading);
if (atomic_load(&sock->client)) { if (atomic_load(&sock->client)) {
uv_timer_stop(timer);
if (sock->recv_cb != NULL) { if (sock->recv_cb != NULL) {
isc__nm_uvreq_t *req = isc__nm_get_read_req(sock, NULL); isc__nm_uvreq_t *req = isc__nm_get_read_req(sock, NULL);
isc__nm_readcb(sock, req, ISC_R_TIMEDOUT); isc__nm_readcb(sock, req, ISC_R_TIMEDOUT);
@@ -1720,14 +1754,28 @@ isc__nmsocket_readtimeout_cb(uv_timer_t *timer) {
void void
isc__nmsocket_timer_restart(isc_nmsocket_t *sock) { isc__nmsocket_timer_restart(isc_nmsocket_t *sock) {
int r = 0;
REQUIRE(VALID_NMSOCK(sock)); REQUIRE(VALID_NMSOCK(sock));
if (sock->read_timeout == 0) { if (atomic_load(&sock->connecting)) {
return; if (sock->connect_timeout == 0) {
return;
}
r = uv_timer_start(&sock->timer,
isc__nmsocket_connecttimeout_cb,
sock->connect_timeout + 10, 0);
} else {
if (sock->read_timeout == 0) {
return;
}
r = uv_timer_start(&sock->timer, isc__nmsocket_readtimeout_cb,
sock->read_timeout, 0);
} }
int r = uv_timer_start(&sock->timer, isc__nmsocket_readtimeout_cb,
sock->read_timeout, 0);
RUNTIME_CHECK(r == 0); RUNTIME_CHECK(r == 0);
} }
@@ -2263,36 +2311,24 @@ isc_nm_stoplistening(isc_nmsocket_t *sock) {
} }
} }
static void void
nm_connectcb(isc_nmsocket_t *sock, isc__nm_uvreq_t *uvreq, isc_result_t eresult, isc__nm_connectcb(isc_nmsocket_t *sock, isc__nm_uvreq_t *uvreq,
bool force_async) { isc_result_t eresult, bool async) {
isc__netievent_connectcb_t *ievent = NULL;
REQUIRE(VALID_NMSOCK(sock)); REQUIRE(VALID_NMSOCK(sock));
REQUIRE(VALID_UVREQ(uvreq)); REQUIRE(VALID_UVREQ(uvreq));
REQUIRE(VALID_NMHANDLE(uvreq->handle)); REQUIRE(VALID_NMHANDLE(uvreq->handle));
ievent = isc__nm_get_netievent_connectcb(sock->mgr, sock, uvreq, if (!async) {
eresult); isc__netievent_connectcb_t ievent = { .sock = sock,
if (force_async) { .req = uvreq,
isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], .result = eresult };
(isc__netievent_t *)ievent); isc__nm_async_connectcb(NULL, (isc__netievent_t *)&ievent);
} else { return;
isc__nm_maybe_enqueue_ievent(&sock->mgr->workers[sock->tid],
(isc__netievent_t *)ievent);
} }
} isc__netievent_connectcb_t *ievent = isc__nm_get_netievent_connectcb(
sock->mgr, sock, uvreq, eresult);
void isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
isc__nm_connectcb(isc_nmsocket_t *sock, isc__nm_uvreq_t *uvreq, (isc__netievent_t *)ievent);
isc_result_t eresult) {
nm_connectcb(sock, uvreq, eresult, false);
}
void
isc__nm_connectcb_force_async(isc_nmsocket_t *sock, isc__nm_uvreq_t *uvreq,
isc_result_t eresult) {
nm_connectcb(sock, uvreq, eresult, true);
} }
void void
@@ -2426,22 +2462,7 @@ isc__nm_async_detach(isc__networker_t *worker, isc__netievent_t *ev0) {
} }
static void static void
shutdown_walk_cb(uv_handle_t *handle, void *arg) { isc__nmsocket_shutdown(isc_nmsocket_t *sock) {
isc_nmsocket_t *sock = uv_handle_get_data(handle);
UNUSED(arg);
if (uv_is_closing(handle)) {
return;
}
switch (handle->type) {
case UV_UDP:
case UV_TCP:
break;
default:
return;
}
REQUIRE(VALID_NMSOCK(sock)); REQUIRE(VALID_NMSOCK(sock));
switch (sock->type) { switch (sock->type) {
case isc_nm_udpsocket: case isc_nm_udpsocket:
@@ -2467,6 +2488,26 @@ shutdown_walk_cb(uv_handle_t *handle, void *arg) {
} }
} }
static void
shutdown_walk_cb(uv_handle_t *handle, void *arg) {
isc_nmsocket_t *sock = uv_handle_get_data(handle);
UNUSED(arg);
if (uv_is_closing(handle)) {
return;
}
switch (handle->type) {
case UV_UDP:
case UV_TCP:
break;
default:
return;
}
isc__nmsocket_shutdown(sock);
}
void void
isc__nm_async_shutdown(isc__networker_t *worker, isc__netievent_t *ev0) { isc__nm_async_shutdown(isc__networker_t *worker, isc__netievent_t *ev0) {
UNUSED(ev0); UNUSED(ev0);

View File

@@ -132,23 +132,6 @@ failed_accept_cb(isc_nmsocket_t *sock, isc_result_t eresult) {
} }
} }
static void
failed_connect_cb(isc_nmsocket_t *sock, isc__nm_uvreq_t *req,
isc_result_t eresult) {
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(VALID_UVREQ(req));
REQUIRE(sock->tid == isc_nm_tid());
REQUIRE(atomic_load(&sock->connecting));
REQUIRE(req->cb.connect != NULL);
atomic_store(&sock->connecting, false);
isc__nmsocket_clearcb(sock);
isc__nm_connectcb(sock, req, eresult);
isc__nmsocket_prep_destroy(sock);
}
static isc_result_t static isc_result_t
tcp_connect_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) { tcp_connect_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) {
isc__networker_t *worker = NULL; isc__networker_t *worker = NULL;
@@ -161,21 +144,20 @@ tcp_connect_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) {
REQUIRE(isc__nm_in_netthread()); REQUIRE(isc__nm_in_netthread());
REQUIRE(sock->tid == isc_nm_tid()); REQUIRE(sock->tid == isc_nm_tid());
result = isc__nm_socket_connectiontimeout(sock->fd,
sock->connect_timeout);
RUNTIME_CHECK(result == ISC_R_SUCCESS);
worker = &sock->mgr->workers[sock->tid]; worker = &sock->mgr->workers[sock->tid];
atomic_store(&sock->connecting, true); atomic_store(&sock->connecting, true);
/* 2 minute timeout */
result = isc__nm_socket_connectiontimeout(sock->fd, 120 * 1000);
RUNTIME_CHECK(result == ISC_R_SUCCESS);
r = uv_tcp_init(&worker->loop, &sock->uv_handle.tcp); r = uv_tcp_init(&worker->loop, &sock->uv_handle.tcp);
RUNTIME_CHECK(r == 0); RUNTIME_CHECK(r == 0);
uv_handle_set_data(&sock->uv_handle.handle, sock); uv_handle_set_data(&sock->uv_handle.handle, sock);
r = uv_timer_init(&worker->loop, &sock->timer); r = uv_timer_init(&worker->loop, &sock->timer);
RUNTIME_CHECK(r == 0); RUNTIME_CHECK(r == 0);
uv_handle_set_data((uv_handle_t *)&sock->timer, sock);
r = uv_tcp_open(&sock->uv_handle.tcp, sock->fd); r = uv_tcp_open(&sock->uv_handle.tcp, sock->fd);
if (r != 0) { if (r != 0) {
@@ -204,6 +186,9 @@ tcp_connect_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) {
} }
isc__nm_incstats(sock->mgr, sock->statsindex[STATID_CONNECT]); isc__nm_incstats(sock->mgr, sock->statsindex[STATID_CONNECT]);
uv_handle_set_data((uv_handle_t *)&sock->timer, &req->uv_req.connect);
isc__nmsocket_timer_start(sock);
atomic_store(&sock->connected, true); atomic_store(&sock->connected, true);
done: done:
@@ -262,24 +247,37 @@ tcp_connect_cb(uv_connect_t *uvreq, int status) {
REQUIRE(VALID_NMSOCK(sock)); REQUIRE(VALID_NMSOCK(sock));
REQUIRE(sock->tid == isc_nm_tid()); REQUIRE(sock->tid == isc_nm_tid());
REQUIRE(atomic_load(&sock->connecting));
isc__nmsocket_timer_stop(sock);
uv_handle_set_data((uv_handle_t *)&sock->timer, sock);
req = uv_handle_get_data((uv_handle_t *)uvreq); req = uv_handle_get_data((uv_handle_t *)uvreq);
REQUIRE(VALID_UVREQ(req)); REQUIRE(VALID_UVREQ(req));
REQUIRE(VALID_NMHANDLE(req->handle)); REQUIRE(VALID_NMHANDLE(req->handle));
/* Socket was closed midflight by isc__nm_tcp_shutdown() */ if (!atomic_load(&sock->connecting)) {
if (!isc__nmsocket_active(sock)) { /*
* The connect was cancelled from timeout; just clean up
* the req.
*/
isc__nm_uvreq_put(&req, sock);
return;
} else if (!isc__nmsocket_active(sock)) {
/* Socket was closed midflight by isc__nm_tcp_shutdown() */
result = ISC_R_CANCELED; result = ISC_R_CANCELED;
goto error; goto error;
} } else if (status == UV_ETIMEDOUT) {
/* Timeout status code here indicates hard error */
if (status != 0) { result = ISC_R_TIMEDOUT;
goto error;
} else if (status != 0) {
result = isc__nm_uverr2result(status); result = isc__nm_uverr2result(status);
goto error; goto error;
} }
uv_handle_set_data((uv_handle_t *)&sock->timer, sock);
isc__nm_incstats(sock->mgr, sock->statsindex[STATID_CONNECT]); isc__nm_incstats(sock->mgr, sock->statsindex[STATID_CONNECT]);
r = uv_tcp_getpeername(&sock->uv_handle.tcp, (struct sockaddr *)&ss, r = uv_tcp_getpeername(&sock->uv_handle.tcp, (struct sockaddr *)&ss,
&(int){ sizeof(ss) }); &(int){ sizeof(ss) });
@@ -293,12 +291,12 @@ tcp_connect_cb(uv_connect_t *uvreq, int status) {
result = isc_sockaddr_fromsockaddr(&sock->peer, (struct sockaddr *)&ss); result = isc_sockaddr_fromsockaddr(&sock->peer, (struct sockaddr *)&ss);
RUNTIME_CHECK(result == ISC_R_SUCCESS); RUNTIME_CHECK(result == ISC_R_SUCCESS);
isc__nm_connectcb(sock, req, ISC_R_SUCCESS); isc__nm_connectcb(sock, req, ISC_R_SUCCESS, false);
return; return;
error: error:
failed_connect_cb(sock, req, result); isc__nm_failed_connect_cb(sock, req, result);
} }
isc_result_t isc_result_t
@@ -1225,10 +1223,7 @@ tcp_stop_cb(uv_handle_t *handle) {
} }
static void static void
tcp_close_cb(uv_handle_t *handle) { tcp_close_sock(isc_nmsocket_t *sock) {
isc_nmsocket_t *sock = uv_handle_get_data(handle);
uv_handle_set_data(handle, NULL);
REQUIRE(VALID_NMSOCK(sock)); REQUIRE(VALID_NMSOCK(sock));
REQUIRE(sock->tid == isc_nm_tid()); REQUIRE(sock->tid == isc_nm_tid());
REQUIRE(atomic_load(&sock->closing)); REQUIRE(atomic_load(&sock->closing));
@@ -1250,6 +1245,14 @@ tcp_close_cb(uv_handle_t *handle) {
isc__nmsocket_prep_destroy(sock); isc__nmsocket_prep_destroy(sock);
} }
static void
tcp_close_cb(uv_handle_t *handle) {
isc_nmsocket_t *sock = uv_handle_get_data(handle);
uv_handle_set_data(handle, NULL);
tcp_close_sock(sock);
}
static void static void
timer_close_cb(uv_handle_t *handle) { timer_close_cb(uv_handle_t *handle) {
isc_nmsocket_t *sock = uv_handle_get_data(handle); isc_nmsocket_t *sock = uv_handle_get_data(handle);
@@ -1257,6 +1260,8 @@ timer_close_cb(uv_handle_t *handle) {
if (sock->parent) { if (sock->parent) {
uv_close(&sock->uv_handle.handle, tcp_stop_cb); uv_close(&sock->uv_handle.handle, tcp_stop_cb);
} else if (uv_is_closing(&sock->uv_handle.handle)) {
tcp_close_sock(sock);
} else { } else {
uv_close(&sock->uv_handle.handle, tcp_close_cb); uv_close(&sock->uv_handle.handle, tcp_close_cb);
} }
@@ -1374,6 +1379,19 @@ isc__nm_async_tcpclose(isc__networker_t *worker, isc__netievent_t *ev0) {
tcp_close_direct(sock); tcp_close_direct(sock);
} }
static void
tcp_close_connect_cb(uv_handle_t *handle) {
isc_nmsocket_t *sock = uv_handle_get_data(handle);
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(isc__nm_in_netthread());
REQUIRE(sock->tid == isc_nm_tid());
isc__nmsocket_prep_destroy(sock);
isc__nmsocket_detach(&sock);
}
void void
isc__nm_tcp_shutdown(isc_nmsocket_t *sock) { isc__nm_tcp_shutdown(isc_nmsocket_t *sock) {
REQUIRE(VALID_NMSOCK(sock)); REQUIRE(VALID_NMSOCK(sock));
@@ -1388,7 +1406,14 @@ isc__nm_tcp_shutdown(isc_nmsocket_t *sock) {
return; return;
} }
if (atomic_load(&sock->connecting) || sock->accepting) { if (sock->accepting) {
return;
}
if (atomic_load(&sock->connecting)) {
isc_nmsocket_t *tsock = NULL;
isc__nmsocket_attach(sock, &tsock);
uv_close(&sock->uv_handle.handle, tcp_close_connect_cb);
return; return;
} }

View File

@@ -137,6 +137,9 @@ tcpdns_connect_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) {
} }
isc__nm_incstats(sock->mgr, sock->statsindex[STATID_CONNECT]); isc__nm_incstats(sock->mgr, sock->statsindex[STATID_CONNECT]);
uv_handle_set_data((uv_handle_t *)&sock->timer, &req->uv_req.connect);
isc__nmsocket_timer_start(sock);
atomic_store(&sock->connected, true); atomic_store(&sock->connected, true);
done: done:
@@ -193,20 +196,31 @@ tcpdns_connect_cb(uv_connect_t *uvreq, int status) {
REQUIRE(VALID_NMSOCK(sock)); REQUIRE(VALID_NMSOCK(sock));
REQUIRE(sock->tid == isc_nm_tid()); REQUIRE(sock->tid == isc_nm_tid());
REQUIRE(atomic_load(&sock->connecting));
isc__nmsocket_timer_stop(sock);
uv_handle_set_data((uv_handle_t *)&sock->timer, sock);
req = uv_handle_get_data((uv_handle_t *)uvreq); req = uv_handle_get_data((uv_handle_t *)uvreq);
REQUIRE(VALID_UVREQ(req)); REQUIRE(VALID_UVREQ(req));
REQUIRE(VALID_NMHANDLE(req->handle)); REQUIRE(VALID_NMHANDLE(req->handle));
/* Socket was closed midflight by isc__nm_tcpdns_shutdown() */ if (!atomic_load(&sock->connecting)) {
if (!isc__nmsocket_active(sock)) { /*
* The connect was cancelled from timeout; just clean up
* the req.
*/
isc__nm_uvreq_put(&req, sock);
return;
} else if (!isc__nmsocket_active(sock)) {
/* Socket was closed midflight by isc__nm_tcpdns_shutdown() */
result = ISC_R_CANCELED; result = ISC_R_CANCELED;
goto error; goto error;
} } else if (status == UV_ETIMEDOUT) {
/* Timeout status code here indicates hard error */
if (status != 0) { result = ISC_R_CANCELED;
goto error;
} else if (status != 0) {
result = isc__nm_uverr2result(status); result = isc__nm_uverr2result(status);
goto error; goto error;
} }
@@ -224,7 +238,7 @@ tcpdns_connect_cb(uv_connect_t *uvreq, int status) {
result = isc_sockaddr_fromsockaddr(&sock->peer, (struct sockaddr *)&ss); result = isc_sockaddr_fromsockaddr(&sock->peer, (struct sockaddr *)&ss);
RUNTIME_CHECK(result == ISC_R_SUCCESS); RUNTIME_CHECK(result == ISC_R_SUCCESS);
isc__nm_connectcb(sock, req, ISC_R_SUCCESS); isc__nm_connectcb(sock, req, ISC_R_SUCCESS, false);
return; return;
@@ -267,7 +281,7 @@ isc_nm_tcpdnsconnect(isc_nm_t *mgr, isc_nmiface_t *local, isc_nmiface_t *peer,
sock->fd = fd; sock->fd = fd;
atomic_init(&sock->client, true); atomic_init(&sock->client, true);
result = isc__nm_socket_connectiontimeout(fd, timeout); result = isc__nm_socket_connectiontimeout(fd, 120 * 1000); /* 2 mins */
RUNTIME_CHECK(result == ISC_R_SUCCESS); RUNTIME_CHECK(result == ISC_R_SUCCESS);
req = isc__nm_uvreq_get(mgr, sock); req = isc__nm_uvreq_get(mgr, sock);
@@ -1167,11 +1181,7 @@ tcpdns_stop_cb(uv_handle_t *handle) {
} }
static void static void
tcpdns_close_cb(uv_handle_t *handle) { tcpdns_close_sock(isc_nmsocket_t *sock) {
isc_nmsocket_t *sock = uv_handle_get_data(handle);
uv_handle_set_data(handle, NULL);
REQUIRE(VALID_NMSOCK(sock)); REQUIRE(VALID_NMSOCK(sock));
REQUIRE(sock->tid == isc_nm_tid()); REQUIRE(sock->tid == isc_nm_tid());
REQUIRE(atomic_load(&sock->closing)); REQUIRE(atomic_load(&sock->closing));
@@ -1194,12 +1204,25 @@ tcpdns_close_cb(uv_handle_t *handle) {
} }
static void static void
timer_close_cb(uv_handle_t *handle) { tcpdns_close_cb(uv_handle_t *handle) {
isc_nmsocket_t *sock = uv_handle_get_data(handle); isc_nmsocket_t *sock = uv_handle_get_data(handle);
uv_handle_set_data(handle, NULL); uv_handle_set_data(handle, NULL);
tcpdns_close_sock(sock);
}
static void
timer_close_cb(uv_handle_t *timer) {
isc_nmsocket_t *sock = uv_handle_get_data(timer);
uv_handle_set_data(timer, NULL);
REQUIRE(VALID_NMSOCK(sock));
if (sock->parent) { if (sock->parent) {
uv_close(&sock->uv_handle.handle, tcpdns_stop_cb); uv_close(&sock->uv_handle.handle, tcpdns_stop_cb);
} else if (uv_is_closing(&sock->uv_handle.handle)) {
tcpdns_close_sock(sock);
} else { } else {
uv_close(&sock->uv_handle.handle, tcpdns_close_cb); uv_close(&sock->uv_handle.handle, tcpdns_close_cb);
} }
@@ -1271,6 +1294,7 @@ tcpdns_close_direct(isc_nmsocket_t *sock) {
isc__nmsocket_timer_stop(sock); isc__nmsocket_timer_stop(sock);
isc__nm_stop_reading(sock); isc__nm_stop_reading(sock);
uv_close((uv_handle_t *)&sock->timer, timer_close_cb); uv_close((uv_handle_t *)&sock->timer, timer_close_cb);
} }
@@ -1313,6 +1337,19 @@ isc__nm_async_tcpdnsclose(isc__networker_t *worker, isc__netievent_t *ev0) {
tcpdns_close_direct(sock); tcpdns_close_direct(sock);
} }
static void
tcpdns_close_connect_cb(uv_handle_t *handle) {
isc_nmsocket_t *sock = uv_handle_get_data(handle);
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(isc__nm_in_netthread());
REQUIRE(sock->tid == isc_nm_tid());
isc__nmsocket_prep_destroy(sock);
isc__nmsocket_detach(&sock);
}
void void
isc__nm_tcpdns_shutdown(isc_nmsocket_t *sock) { isc__nm_tcpdns_shutdown(isc_nmsocket_t *sock) {
REQUIRE(VALID_NMSOCK(sock)); REQUIRE(VALID_NMSOCK(sock));
@@ -1327,7 +1364,14 @@ isc__nm_tcpdns_shutdown(isc_nmsocket_t *sock) {
return; return;
} }
if (atomic_load(&sock->connecting) || sock->accepting) { if (sock->accepting) {
return;
}
if (atomic_load(&sock->connecting)) {
isc_nmsocket_t *tsock = NULL;
isc__nmsocket_attach(sock, &tsock);
uv_close(&sock->uv_handle.handle, tcpdns_close_connect_cb);
return; return;
} }

View File

@@ -149,6 +149,9 @@ tlsdns_connect_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) {
} }
isc__nm_incstats(sock->mgr, sock->statsindex[STATID_CONNECT]); isc__nm_incstats(sock->mgr, sock->statsindex[STATID_CONNECT]);
uv_handle_set_data((uv_handle_t *)&sock->timer, &req->uv_req.connect);
isc__nmsocket_timer_start(sock);
atomic_store(&sock->connected, true); atomic_store(&sock->connected, true);
done: done:
@@ -205,20 +208,31 @@ tlsdns_connect_cb(uv_connect_t *uvreq, int status) {
REQUIRE(VALID_NMSOCK(sock)); REQUIRE(VALID_NMSOCK(sock));
REQUIRE(sock->tid == isc_nm_tid()); REQUIRE(sock->tid == isc_nm_tid());
REQUIRE(atomic_load(&sock->connecting));
isc__nmsocket_timer_stop(sock);
uv_handle_set_data((uv_handle_t *)&sock->timer, sock);
req = uv_handle_get_data((uv_handle_t *)uvreq); req = uv_handle_get_data((uv_handle_t *)uvreq);
REQUIRE(VALID_UVREQ(req)); REQUIRE(VALID_UVREQ(req));
REQUIRE(VALID_NMHANDLE(req->handle)); REQUIRE(VALID_NMHANDLE(req->handle));
/* Socket was closed midflight by isc__nm_tlsdns_shutdown() */ if (!atomic_load(&sock->connecting)) {
if (!isc__nmsocket_active(sock)) { /*
* The connect was cancelled from timeout; just clean up
* the req.
*/
isc__nm_uvreq_put(&req, sock);
return;
} else if (!isc__nmsocket_active(sock)) {
/* Socket was closed midflight by isc__nm_tcpdns_shutdown() */
result = ISC_R_CANCELED; result = ISC_R_CANCELED;
goto error; goto error;
} } else if (status == UV_ETIMEDOUT) {
/* Timeout status code here indicates hard error */
if (status != 0) { result = ISC_R_CANCELED;
goto error;
} else if (status != 0) {
result = isc__nm_uverr2result(status); result = isc__nm_uverr2result(status);
goto error; goto error;
} }
@@ -248,10 +262,11 @@ tlsdns_connect_cb(uv_connect_t *uvreq, int status) {
#if HAVE_SSL_SET0_RBIO && HAVE_SSL_SET0_WBIO #if HAVE_SSL_SET0_RBIO && HAVE_SSL_SET0_WBIO
/* /*
* Note that if the rbio and wbio are the same then SSL_set0_rbio() and * Note that if the rbio and wbio are the same then
* SSL_set0_wbio() each take ownership of one reference. Therefore it * SSL_set0_rbio() and SSL_set0_wbio() each take ownership of
* may be necessary to increment the number of references available * one reference. Therefore it may be necessary to increment the
* using BIO_up_ref(3) before calling the set0 functions. * number of references available using BIO_up_ref(3) before
* calling the set0 functions.
*/ */
SSL_set0_rbio(sock->tls.tls, sock->tls.ssl_rbio); SSL_set0_rbio(sock->tls.tls, sock->tls.ssl_rbio);
SSL_set0_wbio(sock->tls.tls, sock->tls.ssl_wbio); SSL_set0_wbio(sock->tls.tls, sock->tls.ssl_wbio);
@@ -299,8 +314,8 @@ isc_nm_tlsdnsconnect(isc_nm_t *mgr, isc_nmiface_t *local, isc_nmiface_t *peer,
sa_family = peer->addr.type.sa.sa_family; sa_family = peer->addr.type.sa.sa_family;
/* /*
* The socket() call can fail spuriously on FreeBSD 12, so we need to * The socket() call can fail spuriously on FreeBSD 12, so we
* handle the failure early and gracefully. * need to handle the failure early and gracefully.
*/ */
result = isc__nm_socket(sa_family, SOCK_STREAM, 0, &fd); result = isc__nm_socket(sa_family, SOCK_STREAM, 0, &fd);
if (result != ISC_R_SUCCESS) { if (result != ISC_R_SUCCESS) {
@@ -318,7 +333,7 @@ isc_nm_tlsdnsconnect(isc_nm_t *mgr, isc_nmiface_t *local, isc_nmiface_t *peer,
atomic_init(&sock->client, true); atomic_init(&sock->client, true);
result = isc__nm_socket_connectiontimeout(fd, timeout); result = isc__nm_socket_connectiontimeout(fd, 120 * 1000); /* 2 mins */
RUNTIME_CHECK(result == ISC_R_SUCCESS); RUNTIME_CHECK(result == ISC_R_SUCCESS);
req = isc__nm_uvreq_get(mgr, sock); req = isc__nm_uvreq_get(mgr, sock);
@@ -552,8 +567,9 @@ isc__nm_async_tlsdnslisten(isc__networker_t *worker, isc__netievent_t *ev0) {
#endif #endif
/* /*
* The callback will run in the same thread uv_listen() was called * The callback will run in the same thread uv_listen() was
* from, so a race with tlsdns_connection_cb() isn't possible. * called from, so a race with tlsdns_connection_cb() isn't
* possible.
*/ */
r = uv_listen((uv_stream_t *)&sock->uv_handle.tcp, sock->backlog, r = uv_listen((uv_stream_t *)&sock->uv_handle.tcp, sock->backlog,
tlsdns_connection_cb); tlsdns_connection_cb);
@@ -742,7 +758,8 @@ isc__nm_async_tlsdnsstop(isc__networker_t *worker, isc__netievent_t *ev0) {
} }
/* /*
* If network manager is interlocked, re-enqueue the event for later. * If network manager is interlocked, re-enqueue the event for
* later.
*/ */
if (!isc__nm_acquire_interlocked(sock->mgr)) { if (!isc__nm_acquire_interlocked(sock->mgr)) {
enqueue_stoplistening(sock); enqueue_stoplistening(sock);
@@ -780,8 +797,8 @@ isc__nm_tlsdns_failed_read_cb(isc_nmsocket_t *sock, isc_result_t result) {
destroy: destroy:
isc__nmsocket_prep_destroy(sock); isc__nmsocket_prep_destroy(sock);
/* We need to detach from quota after the read callback function had a /* We need to detach from quota after the read callback function
* chance to be executed. */ * had a chance to be executed. */
if (sock->quota) { if (sock->quota) {
isc_quota_detach(&sock->quota); isc_quota_detach(&sock->quota);
} }
@@ -813,10 +830,11 @@ isc__nm_tlsdns_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg) {
ievent = isc__nm_get_netievent_tlsdnsread(sock->mgr, sock); ievent = isc__nm_get_netievent_tlsdnsread(sock->mgr, sock);
/* /*
* This MUST be done asynchronously, no matter which thread we're * This MUST be done asynchronously, no matter which thread
* in. The callback function for isc_nm_read() often calls * we're in. The callback function for isc_nm_read() often calls
* isc_nm_read() again; if we tried to do that synchronously * isc_nm_read() again; if we tried to do that synchronously
* we'd clash in processbuffer() and grow the stack indefinitely. * we'd clash in processbuffer() and grow the stack
* indefinitely.
*/ */
isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
(isc__netievent_t *)ievent); (isc__netievent_t *)ievent);
@@ -892,7 +910,8 @@ isc__nm_tlsdns_processbuffer(isc_nmsocket_t *sock) {
/* /*
* We need to launch the resume_processing after the buffer has * We need to launch the resume_processing after the buffer has
* been consumed, thus we need to delay the detaching the handle. * been consumed, thus we need to delay the detaching the
* handle.
*/ */
isc_nmhandle_attach(req->handle, &handle); isc_nmhandle_attach(req->handle, &handle);
@@ -905,15 +924,16 @@ isc__nm_tlsdns_processbuffer(isc_nmsocket_t *sock) {
req->uvbuf.len = len; req->uvbuf.len = len;
/* /*
* If isc__nm_tlsdns_read() was called, it will be satisfied by single * If isc__nm_tlsdns_read() was called, it will be satisfied by
* DNS message in the next call. * single DNS message in the next call.
*/ */
sock->recv_read = false; sock->recv_read = false;
/* /*
* The assertion failure here means that there's a errnoneous extra * The assertion failure here means that there's a errnoneous
* nmhandle detach happening in the callback and resume_processing gets * extra nmhandle detach happening in the callback and
* called while we are still processing the buffer. * resume_processing gets called while we are still processing
* the buffer.
*/ */
REQUIRE(sock->processing == false); REQUIRE(sock->processing == false);
sock->processing = true; sock->processing = true;
@@ -958,10 +978,12 @@ tls_cycle_input(isc_nmsocket_t *sock) {
sock->buf + sock->buf_len, sock->buf + sock->buf_len,
sock->buf_size - sock->buf_len, &len); sock->buf_size - sock->buf_len, &len);
if (rv != 1) { if (rv != 1) {
/* Process what's in the buffer so far */ /* Process what's in the buffer so far
*/
isc__nm_process_sock_buffer(sock); isc__nm_process_sock_buffer(sock);
/* FIXME: Should we call failed_read_cb()? */ /* FIXME: Should we call
* failed_read_cb()? */
break; break;
} }
@@ -1032,7 +1054,7 @@ tls_cycle_input(isc_nmsocket_t *sock) {
atomic_store(&sock->connecting, false); atomic_store(&sock->connecting, false);
isc__nm_connectcb(sock, req, ISC_R_SUCCESS); isc__nm_connectcb(sock, req, ISC_R_SUCCESS, true);
} }
async_tlsdns_cycle(sock); async_tlsdns_cycle(sock);
} }
@@ -1141,7 +1163,8 @@ tls_cycle_output(isc_nmsocket_t *sock) {
req->uvbuf.len - err); req->uvbuf.len - err);
req->uvbuf.len = req->uvbuf.len - err; req->uvbuf.len = req->uvbuf.len - err;
} else if (err == UV_ENOSYS || err == UV_EAGAIN) { } else if (err == UV_ENOSYS || err == UV_EAGAIN) {
/* uv_try_write is not supported, send asynchronously */ /* uv_try_write is not supported, send
* asynchronously */
} else { } else {
result = isc__nm_uverr2result(err); result = isc__nm_uverr2result(err);
isc__nm_uvreq_put(&req, sock); isc__nm_uvreq_put(&req, sock);
@@ -1301,7 +1324,8 @@ quota_accept_cb(isc_quota_t *quota, void *sock0) {
REQUIRE(VALID_NMSOCK(sock)); REQUIRE(VALID_NMSOCK(sock));
/* /*
* Create a tlsdnsaccept event and pass it using the async channel. * Create a tlsdnsaccept event and pass it using the async
* channel.
*/ */
isc__netievent_tlsdnsaccept_t *ievent = isc__netievent_tlsdnsaccept_t *ievent =
@@ -1418,8 +1442,8 @@ accept_connection(isc_nmsocket_t *ssock, isc_quota_t *quota) {
} }
/* /*
* The handle will be either detached on acceptcb failure or in the * The handle will be either detached on acceptcb failure or in
* readcb. * the readcb.
*/ */
handle = isc__nmhandle_get(csock, NULL, &local); handle = isc__nmhandle_get(csock, NULL, &local);
@@ -1444,10 +1468,11 @@ accept_connection(isc_nmsocket_t *ssock, isc_quota_t *quota) {
#if HAVE_SSL_SET0_RBIO && HAVE_SSL_SET0_WBIO #if HAVE_SSL_SET0_RBIO && HAVE_SSL_SET0_WBIO
/* /*
* Note that if the rbio and wbio are the same then SSL_set0_rbio() and * Note that if the rbio and wbio are the same then
* SSL_set0_wbio() each take ownership of one reference. Therefore it * SSL_set0_rbio() and SSL_set0_wbio() each take ownership of
* may be necessary to increment the number of references available * one reference. Therefore it may be necessary to increment the
* using BIO_up_ref(3) before calling the set0 functions. * number of references available using BIO_up_ref(3) before
* calling the set0 functions.
*/ */
SSL_set0_rbio(csock->tls.tls, csock->tls.ssl_rbio); SSL_set0_rbio(csock->tls.tls, csock->tls.ssl_rbio);
SSL_set0_wbio(csock->tls.tls, csock->tls.ssl_wbio); SSL_set0_wbio(csock->tls.tls, csock->tls.ssl_wbio);
@@ -1468,15 +1493,15 @@ accept_connection(isc_nmsocket_t *ssock, isc_quota_t *quota) {
csock->closehandle_cb = isc__nm_resume_processing; csock->closehandle_cb = isc__nm_resume_processing;
/* /*
* We need to keep the handle alive until we fail to read or connection * We need to keep the handle alive until we fail to read or
* is closed by the other side, it will be detached via * connection is closed by the other side, it will be detached
* prep_destroy()->tlsdns_close_direct(). * via prep_destroy()->tlsdns_close_direct().
*/ */
isc_nmhandle_attach(handle, &csock->recv_handle); isc_nmhandle_attach(handle, &csock->recv_handle);
/* /*
* The initial timer has been set, update the read timeout for the next * The initial timer has been set, update the read timeout for
* reads. * the next reads.
*/ */
csock->read_timeout = (atomic_load(&csock->keepalive) csock->read_timeout = (atomic_load(&csock->keepalive)
? atomic_load(&csock->mgr->keepalive) ? atomic_load(&csock->mgr->keepalive)
@@ -1593,8 +1618,8 @@ tlsdns_send_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) {
} }
/* /*
* There's no SSL_writev(), so we need to use a local buffer to assemble * There's no SSL_writev(), so we need to use a local buffer to
* the whole message * assemble the whole message
*/ */
worker = &sock->mgr->workers[sock->tid]; worker = &sock->mgr->workers[sock->tid];
sendlen = req->uvbuf.len + sizeof(uint16_t); sendlen = req->uvbuf.len + sizeof(uint16_t);
@@ -1664,10 +1689,7 @@ tlsdns_stop_cb(uv_handle_t *handle) {
} }
static void static void
tlsdns_close_cb(uv_handle_t *handle) { tlsdns_close_sock(isc_nmsocket_t *sock) {
isc_nmsocket_t *sock = uv_handle_get_data(handle);
uv_handle_set_data(handle, NULL);
REQUIRE(VALID_NMSOCK(sock)); REQUIRE(VALID_NMSOCK(sock));
REQUIRE(sock->tid == isc_nm_tid()); REQUIRE(sock->tid == isc_nm_tid());
REQUIRE(atomic_load(&sock->closing)); REQUIRE(atomic_load(&sock->closing));
@@ -1698,14 +1720,25 @@ tlsdns_close_cb(uv_handle_t *handle) {
isc__nmsocket_prep_destroy(sock); isc__nmsocket_prep_destroy(sock);
} }
static void
tlsdns_close_cb(uv_handle_t *handle) {
isc_nmsocket_t *sock = uv_handle_get_data(handle);
uv_handle_set_data(handle, NULL);
tlsdns_close_sock(sock);
}
static void static void
timer_close_cb(uv_handle_t *handle) { timer_close_cb(uv_handle_t *handle) {
isc_nmsocket_t *sock = uv_handle_get_data(handle); isc_nmsocket_t *sock = uv_handle_get_data(handle);
uv_handle_set_data(handle, NULL); uv_handle_set_data(handle, NULL);
REQUIRE(VALID_NMSOCK(sock));
if (sock->parent) { if (sock->parent) {
uv_close(&sock->uv_handle.handle, tlsdns_stop_cb); uv_close(&sock->uv_handle.handle, tlsdns_stop_cb);
} else if (uv_is_closing(&sock->uv_handle.handle)) {
tlsdns_close_sock(sock);
} else { } else {
uv_close(&sock->uv_handle.handle, tlsdns_close_cb); uv_close(&sock->uv_handle.handle, tlsdns_close_cb);
} }
@@ -1798,7 +1831,8 @@ isc__nm_tlsdns_close(isc_nmsocket_t *sock) {
tlsdns_close_direct(sock); tlsdns_close_direct(sock);
} else { } else {
/* /*
* We need to create an event and pass it using async channel * We need to create an event and pass it using async
* channel
*/ */
isc__netievent_tlsdnsclose_t *ievent = isc__netievent_tlsdnsclose_t *ievent =
isc__nm_get_netievent_tlsdnsclose(sock->mgr, sock); isc__nm_get_netievent_tlsdnsclose(sock->mgr, sock);
@@ -1822,6 +1856,19 @@ isc__nm_async_tlsdnsclose(isc__networker_t *worker, isc__netievent_t *ev0) {
tlsdns_close_direct(sock); tlsdns_close_direct(sock);
} }
static void
tlsdns_close_connect_cb(uv_handle_t *handle) {
isc_nmsocket_t *sock = uv_handle_get_data(handle);
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(isc__nm_in_netthread());
REQUIRE(sock->tid == isc_nm_tid());
isc__nmsocket_prep_destroy(sock);
isc__nmsocket_detach(&sock);
}
void void
isc__nm_tlsdns_shutdown(isc_nmsocket_t *sock) { isc__nm_tlsdns_shutdown(isc_nmsocket_t *sock) {
REQUIRE(VALID_NMSOCK(sock)); REQUIRE(VALID_NMSOCK(sock));
@@ -1836,13 +1883,23 @@ isc__nm_tlsdns_shutdown(isc_nmsocket_t *sock) {
return; return;
} }
if (sock->accepting) {
return;
}
if (sock->tls.pending_req != NULL) { if (sock->tls.pending_req != NULL) {
isc__nm_uvreq_t *req = sock->tls.pending_req; isc__nm_uvreq_t *req = sock->tls.pending_req;
sock->tls.pending_req = NULL; sock->tls.pending_req = NULL;
isc__nm_failed_connect_cb(sock, req, ISC_R_CANCELED); isc__nm_failed_connect_cb(sock, req, ISC_R_CANCELED);
return;
} }
if (atomic_load(&sock->connecting) || sock->accepting) { if (atomic_load(&sock->connecting)) {
isc_nmsocket_t *tsock = NULL;
isc__nmsocket_attach(sock, &tsock);
uv_close(&sock->uv_handle.handle, tlsdns_close_connect_cb);
return; return;
} }

View File

@@ -699,7 +699,7 @@ isc__nm_async_udpconnect(isc__networker_t *worker, isc__netievent_t *ev0) {
* The callback has to be called after the socket has been * The callback has to be called after the socket has been
* initialized * initialized
*/ */
isc__nm_connectcb(sock, req, ISC_R_SUCCESS); isc__nm_connectcb(sock, req, ISC_R_SUCCESS, true);
} }
/* /*