mirror of
https://gitlab.isc.org/isc-projects/bind9
synced 2025-09-01 15:05:23 +00:00
Fix a shutdown race in netmgr udp
We need to mark the socket as inactive early (and synchronously) in the stoplistening process; otherwise we might destroy the callback argument before we actually stop listening, and call the callback on bad memory.
This commit is contained in:
committed by
Evan Hunt
parent
3704c4fff2
commit
1cf65cd882
@@ -833,10 +833,13 @@ nmsocket_maybe_destroy(isc_nmsocket_t *sock) {
|
|||||||
if (active_handles == 0 || sock->tcphandle != NULL) {
|
if (active_handles == 0 || sock->tcphandle != NULL) {
|
||||||
destroy = true;
|
destroy = true;
|
||||||
}
|
}
|
||||||
UNLOCK(&sock->lock);
|
|
||||||
|
|
||||||
if (destroy) {
|
if (destroy) {
|
||||||
|
atomic_store(&sock->destroying, true);
|
||||||
|
UNLOCK(&sock->lock);
|
||||||
nmsocket_cleanup(sock, true);
|
nmsocket_cleanup(sock, true);
|
||||||
|
} else {
|
||||||
|
UNLOCK(&sock->lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -210,19 +210,6 @@ static void
|
|||||||
stoplistening(isc_nmsocket_t *sock) {
|
stoplistening(isc_nmsocket_t *sock) {
|
||||||
REQUIRE(sock->type == isc_nm_udplistener);
|
REQUIRE(sock->type == isc_nm_udplistener);
|
||||||
|
|
||||||
/*
|
|
||||||
* Socket is already closing; there's nothing to do.
|
|
||||||
*/
|
|
||||||
if (!isc__nmsocket_active(sock)) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Mark it inactive now so that all sends will be ignored
|
|
||||||
* and we won't try to stop listening again.
|
|
||||||
*/
|
|
||||||
atomic_store(&sock->active, false);
|
|
||||||
|
|
||||||
for (int i = 0; i < sock->nchildren; i++) {
|
for (int i = 0; i < sock->nchildren; i++) {
|
||||||
isc__netievent_udpstop_t *event = NULL;
|
isc__netievent_udpstop_t *event = NULL;
|
||||||
|
|
||||||
@@ -256,6 +243,18 @@ isc__nm_udp_stoplistening(isc_nmsocket_t *sock) {
|
|||||||
REQUIRE(VALID_NMSOCK(sock));
|
REQUIRE(VALID_NMSOCK(sock));
|
||||||
REQUIRE(sock->type == isc_nm_udplistener);
|
REQUIRE(sock->type == isc_nm_udplistener);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Socket is already closing; there's nothing to do.
|
||||||
|
*/
|
||||||
|
if (!isc__nmsocket_active(sock)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* Mark it inactive now so that all sends will be ignored
|
||||||
|
* and we won't try to stop listening again.
|
||||||
|
*/
|
||||||
|
atomic_store(&sock->active, false);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the manager is interlocked, re-enqueue this as an asynchronous
|
* If the manager is interlocked, re-enqueue this as an asynchronous
|
||||||
* event. Otherwise, go ahead and stop listening right away.
|
* event. Otherwise, go ahead and stop listening right away.
|
||||||
@@ -336,10 +335,17 @@ udp_recv_cb(uv_udp_t *handle, ssize_t nrecv, const uv_buf_t *buf,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If addr == NULL that's the end of stream - we can
|
* Three reasons to return now without processing:
|
||||||
* free the buffer and bail.
|
* - If addr == NULL that's the end of stream - we can
|
||||||
|
* free the buffer and bail.
|
||||||
|
* - If we're simulating a firewall blocking UDP packets
|
||||||
|
* bigger than 'maxudp' bytes for testing purposes.
|
||||||
|
* - If the socket is no longer active.
|
||||||
*/
|
*/
|
||||||
if (addr == NULL) {
|
maxudp = atomic_load(&sock->mgr->maxudp);
|
||||||
|
if ((addr == NULL) || (maxudp != 0 && (uint32_t)nrecv > maxudp) ||
|
||||||
|
(!isc__nmsocket_active(sock)))
|
||||||
|
{
|
||||||
if (free_buf) {
|
if (free_buf) {
|
||||||
isc__nm_free_uvbuf(sock, buf);
|
isc__nm_free_uvbuf(sock, buf);
|
||||||
}
|
}
|
||||||
@@ -347,16 +353,6 @@ udp_recv_cb(uv_udp_t *handle, ssize_t nrecv, const uv_buf_t *buf,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Simulate a firewall blocking UDP packets bigger than
|
|
||||||
* 'maxudp' bytes.
|
|
||||||
*/
|
|
||||||
maxudp = atomic_load(&sock->mgr->maxudp);
|
|
||||||
if (maxudp != 0 && (uint32_t)nrecv > maxudp) {
|
|
||||||
isc__nmsocket_detach(&sock);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
result = isc_sockaddr_fromsockaddr(&sockaddr, addr);
|
result = isc_sockaddr_fromsockaddr(&sockaddr, addr);
|
||||||
RUNTIME_CHECK(result == ISC_R_SUCCESS);
|
RUNTIME_CHECK(result == ISC_R_SUCCESS);
|
||||||
nmhandle = isc__nmhandle_get(sock, &sockaddr, NULL);
|
nmhandle = isc__nmhandle_get(sock, &sockaddr, NULL);
|
||||||
@@ -398,7 +394,7 @@ isc__nm_udp_send(isc_nmhandle_t *handle, isc_region_t *region, isc_nm_cb_t cb,
|
|||||||
uint32_t maxudp = atomic_load(&sock->mgr->maxudp);
|
uint32_t maxudp = atomic_load(&sock->mgr->maxudp);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Simulate a firewall blocking UDP packets bigger than
|
* We're simulating a firewall blocking UDP packets bigger than
|
||||||
* 'maxudp' bytes, for testing purposes.
|
* 'maxudp' bytes, for testing purposes.
|
||||||
*
|
*
|
||||||
* The client would ordinarily have unreferenced the handle
|
* The client would ordinarily have unreferenced the handle
|
||||||
@@ -522,6 +518,9 @@ udp_send_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req,
|
|||||||
REQUIRE(sock->tid == isc_nm_tid());
|
REQUIRE(sock->tid == isc_nm_tid());
|
||||||
REQUIRE(sock->type == isc_nm_udpsocket);
|
REQUIRE(sock->type == isc_nm_udpsocket);
|
||||||
|
|
||||||
|
if (!isc__nmsocket_active(sock)) {
|
||||||
|
return (ISC_R_CANCELED);
|
||||||
|
}
|
||||||
isc_nmhandle_ref(req->handle);
|
isc_nmhandle_ref(req->handle);
|
||||||
rv = uv_udp_send(&req->uv_req.udp_send, &sock->uv_handle.udp,
|
rv = uv_udp_send(&req->uv_req.udp_send, &sock->uv_handle.udp,
|
||||||
&req->uvbuf, 1, &peer->type.sa, udp_send_cb);
|
&req->uvbuf, 1, &peer->type.sa, udp_send_cb);
|
||||||
|
Reference in New Issue
Block a user