2
0
mirror of https://gitlab.isc.org/isc-projects/bind9 synced 2025-08-30 22:15:20 +00:00

Fix a race in tcpdns close with uv_close on timer

stop timers before closing

netmgr: tcpdns_close needs to be asynchronous, it manipulates sock->timer
This commit is contained in:
Witold Kręcicki
2019-12-06 22:25:52 +01:00
parent 23ab349bbd
commit 3e66b7ba1c
4 changed files with 60 additions and 26 deletions

View File

@@ -122,6 +122,7 @@ typedef enum isc__netievent_type {
netievent_udpstoplisten, netievent_udpstoplisten,
netievent_tcpstoplisten, netievent_tcpstoplisten,
netievent_tcpclose, netievent_tcpclose,
netievent_tcpdnsclose,
netievent_prio = 0xff, /* event type values higher than this netievent_prio = 0xff, /* event type values higher than this
* will be treated as high-priority * will be treated as high-priority
* events, which can be processed * events, which can be processed
@@ -194,6 +195,7 @@ typedef isc__netievent__socket_t isc__netievent_udpstoplisten_t;
typedef isc__netievent__socket_t isc__netievent_tcpstoplisten_t; typedef isc__netievent__socket_t isc__netievent_tcpstoplisten_t;
typedef isc__netievent__socket_t isc__netievent_tcpstopchildlisten_t; typedef isc__netievent__socket_t isc__netievent_tcpstopchildlisten_t;
typedef isc__netievent__socket_t isc__netievent_tcpclose_t; typedef isc__netievent__socket_t isc__netievent_tcpclose_t;
typedef isc__netievent__socket_t isc__netievent_tcpdnsclose_t;
typedef isc__netievent__socket_t isc__netievent_startread_t; typedef isc__netievent__socket_t isc__netievent_startread_t;
typedef isc__netievent__socket_t isc__netievent_pauseread_t; typedef isc__netievent__socket_t isc__netievent_pauseread_t;
typedef isc__netievent__socket_t isc__netievent_closecb_t; typedef isc__netievent__socket_t isc__netievent_closecb_t;
@@ -644,6 +646,9 @@ isc__nm_tcpdns_close(isc_nmsocket_t *sock);
* Close a TCPDNS socket. * Close a TCPDNS socket.
*/ */
void
isc__nm_async_tcpdnsclose(isc__networker_t *worker, isc__netievent_t *ievent0);
#define isc__nm_uverr2result(x) \ #define isc__nm_uverr2result(x) \
isc___nm_uverr2result(x, true, __FILE__, __LINE__) isc___nm_uverr2result(x, true, __FILE__, __LINE__)
isc_result_t isc_result_t

View File

@@ -550,6 +550,9 @@ process_queue(isc__networker_t *worker, isc_queue_t *queue) {
case netievent_tcpclose: case netievent_tcpclose:
isc__nm_async_tcpclose(worker, ievent); isc__nm_async_tcpclose(worker, ievent);
break; break;
case netievent_tcpdnsclose:
isc__nm_async_tcpdnsclose(worker, ievent);
break;
case netievent_closecb: case netievent_closecb:
isc__nm_async_closecb(worker, ievent); isc__nm_async_closecb(worker, ievent);
break; break;
@@ -675,8 +678,9 @@ nmsocket_cleanup(isc_nmsocket_t *sock, bool dofree) {
sock->pquota = NULL; sock->pquota = NULL;
if (sock->timer_initialized) { if (sock->timer_initialized) {
uv_close((uv_handle_t *)&sock->timer, NULL);
sock->timer_initialized = false; sock->timer_initialized = false;
uv_timer_stop(&sock->timer);
uv_close((uv_handle_t *)&sock->timer, NULL);
} }
isc_astack_destroy(sock->inactivehandles); isc_astack_destroy(sock->inactivehandles);
@@ -1022,6 +1026,7 @@ isc_nmhandle_unref(isc_nmhandle_t *handle) {
isc_nmsocket_t *sock = NULL; isc_nmsocket_t *sock = NULL;
size_t handlenum; size_t handlenum;
bool reuse = false; bool reuse = false;
bool do_close = true;
int refs; int refs;
REQUIRE(VALID_NMHANDLE(handle)); REQUIRE(VALID_NMHANDLE(handle));
@@ -1039,28 +1044,28 @@ isc_nmhandle_unref(isc_nmhandle_t *handle) {
handle->doreset(handle->opaque); handle->doreset(handle->opaque);
} }
/* /*
* The handle is closed. If the socket has a callback configured * The handle is closed. If the socket has a callback configured
* for that (e.g., to perform cleanup after request processing), * for that (e.g., to perform cleanup after request processing),
* call it now. * call it now, or schedule it to run asynchronously.
*/ */
bool do_close = true;
if (sock->closehandle_cb != NULL) { if (sock->closehandle_cb != NULL) {
if (sock->tid == isc_nm_tid()) { if (sock->tid == isc_nm_tid()) {
sock->closehandle_cb(sock); sock->closehandle_cb(sock);
} else { } else {
isc__netievent_closecb_t * event = isc__netievent_closecb_t *event =
isc__nm_get_ievent(sock->mgr, isc__nm_get_ievent(sock->mgr,
netievent_closecb); netievent_closecb);
isc_nmsocket_attach(sock, &event->sock); isc_nmsocket_attach(sock, &event->sock);
isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
(isc__netievent_t *) event); (isc__netievent_t *) event);
/* /*
* If we do this asynchronously then the async event * If we're doing this asynchronously, then the
* will clean the socket, so clean up the handle from * async event will take care of closing the
* socket and exit. * socket, so we can clean up the handle
* from the socket, but skip calling
* nmsocket_maybe_destory()
*/ */
do_close = false; do_close = false;
} }
@@ -1068,9 +1073,8 @@ isc_nmhandle_unref(isc_nmhandle_t *handle) {
/* /*
* We do all of this under lock to avoid races with socket * We do all of this under lock to avoid races with socket
* destruction. * destruction. We have to do this now, because at this point the
* We have to do this now otherwise we might race - at this point * socket is either unused or still attached to event->sock.
* the socket is either unused or attached to event->sock.
*/ */
LOCK(&sock->lock); LOCK(&sock->lock);
@@ -1092,15 +1096,8 @@ isc_nmhandle_unref(isc_nmhandle_t *handle) {
} }
UNLOCK(&sock->lock); UNLOCK(&sock->lock);
/* Close callback will clean everything up */ if (do_close && atomic_load(&sock->ah) == 0 &&
if (!do_close) { !atomic_load(&sock->active) && !atomic_load(&sock->destroying))
return;
}
if (atomic_load(&sock->ah) == 0 &&
!atomic_load(&sock->active) &&
!atomic_load(&sock->destroying))
{ {
nmsocket_maybe_destroy(sock); nmsocket_maybe_destroy(sock);
} }

View File

@@ -1003,8 +1003,9 @@ tcp_close_direct(isc_nmsocket_t *sock) {
} }
} }
if (sock->timer_initialized) { if (sock->timer_initialized) {
uv_close((uv_handle_t *)&sock->timer, timer_close_cb);
sock->timer_initialized = false; sock->timer_initialized = false;
uv_timer_stop(&sock->timer);
uv_close((uv_handle_t *)&sock->timer, timer_close_cb);
} else { } else {
isc_nmsocket_detach(&sock->server); isc_nmsocket_detach(&sock->server);
uv_close(&sock->uv_handle.handle, tcp_close_cb); uv_close(&sock->uv_handle.handle, tcp_close_cb);

View File

@@ -79,7 +79,6 @@ static void
timer_close_cb(uv_handle_t *handle) { timer_close_cb(uv_handle_t *handle) {
isc_nmsocket_t *sock = (isc_nmsocket_t *) uv_handle_get_data(handle); isc_nmsocket_t *sock = (isc_nmsocket_t *) uv_handle_get_data(handle);
INSIST(VALID_NMSOCK(sock)); INSIST(VALID_NMSOCK(sock));
sock->timer_initialized = false;
atomic_store(&sock->closed, true); atomic_store(&sock->closed, true);
isc_nmsocket_detach(&sock); isc_nmsocket_detach(&sock);
} }
@@ -489,10 +488,42 @@ isc__nm_tcpdns_send(isc_nmhandle_t *handle, isc_region_t *region,
} }
void static void
isc__nm_tcpdns_close(isc_nmsocket_t *sock) { tcpdns_close_direct(isc_nmsocket_t *sock) {
if (sock->outer != NULL) { if (sock->outer != NULL) {
isc_nmsocket_detach(&sock->outer); isc_nmsocket_detach(&sock->outer);
} }
/* We don't need atomics here, it's all in single network thread */
if (sock->timer_initialized) {
sock->timer_initialized = false;
uv_timer_stop(&sock->timer);
uv_close((uv_handle_t *) &sock->timer, timer_close_cb); uv_close((uv_handle_t *) &sock->timer, timer_close_cb);
}
}
void
isc__nm_tcpdns_close(isc_nmsocket_t *sock) {
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(sock->type == isc_nm_tcpdnssocket);
if (sock->tid == isc_nm_tid()) {
tcpdns_close_direct(sock);
} else {
isc__netievent_tcpdnsclose_t *ievent =
isc__nm_get_ievent(sock->mgr, netievent_tcpdnsclose);
ievent->sock = sock;
isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
(isc__netievent_t *) ievent);
}
}
void
isc__nm_async_tcpdnsclose(isc__networker_t *worker, isc__netievent_t *ievent0) {
isc__netievent_tcpdnsclose_t *ievent =
(isc__netievent_tcpdnsclose_t *) ievent0;
REQUIRE(worker->id == ievent->sock->tid);
tcpdns_close_direct(ievent->sock);
} }