2
0
mirror of https://gitlab.isc.org/isc-projects/bind9 synced 2025-08-31 06:25:31 +00:00

Use barriers for netmgr synchronization

The netmgr listening, stoplistening, pausing and resuming functions
now use barriers for synchronization, which makes the code much simpler.

isc/barrier.h defines isc_barrier macros as a front-end for uv_barrier
on platforms where that works, and pthread_barrier where it doesn't
(including TSAN builds).
This commit is contained in:
Ondřej Surý
2021-05-05 11:51:39 +02:00
committed by Evan Hunt
parent 2eae7813b6
commit 4c8f6ebeb1
14 changed files with 589 additions and 328 deletions

View File

@@ -13,6 +13,7 @@
#include <uv.h>
#include <isc/atomic.h>
#include <isc/barrier.h>
#include <isc/buffer.h>
#include <isc/condition.h>
#include <isc/errno.h>
@@ -77,18 +78,44 @@ isc__nm_udp_lb_socket(sa_family_t sa_family) {
return (sock);
}
static void
start_udp_child(isc_nm_t *mgr, isc_nmiface_t *iface, isc_nmsocket_t *sock,
uv_os_sock_t fd, int tid) {
isc_nmsocket_t *csock;
isc__netievent_udplisten_t *ievent = NULL;
csock = &sock->children[tid];
isc__nmsocket_init(csock, mgr, isc_nm_udpsocket, iface);
csock->parent = sock;
csock->iface = sock->iface;
csock->reading = true;
csock->recv_cb = sock->recv_cb;
csock->recv_cbarg = sock->recv_cbarg;
csock->extrahandlesize = sock->extrahandlesize;
csock->tid = tid;
#if HAVE_SO_REUSEPORT_LB || defined(WIN32)
UNUSED(fd);
csock->fd = isc__nm_udp_lb_socket(iface->addr.type.sa.sa_family);
#else
csock->fd = dup(fd);
#endif
REQUIRE(csock->fd >= 0);
ievent = isc__nm_get_netievent_udplisten(mgr, csock);
isc__nm_maybe_enqueue_ievent(&mgr->workers[tid],
(isc__netievent_t *)ievent);
}
isc_result_t
isc_nm_listenudp(isc_nm_t *mgr, isc_nmiface_t *iface, isc_nm_recv_cb_t cb,
void *cbarg, size_t extrahandlesize, isc_nmsocket_t **sockp) {
isc_result_t result = ISC_R_SUCCESS;
isc_nmsocket_t *sock = NULL;
sa_family_t sa_family = iface->addr.type.sa.sa_family;
size_t children_size = 0;
#if !HAVE_SO_REUSEPORT_LB && !defined(WIN32)
uv_os_sock_t fd = -1;
#endif
REQUIRE(VALID_NM(mgr));
uv_os_sock_t fd = -1;
/*
* We are creating mgr->nworkers duplicated sockets, one
@@ -97,7 +124,7 @@ isc_nm_listenudp(isc_nm_t *mgr, isc_nmiface_t *iface, isc_nm_recv_cb_t cb,
sock = isc_mem_get(mgr->mctx, sizeof(isc_nmsocket_t));
isc__nmsocket_init(sock, mgr, isc_nm_udplistener, iface);
sock->rchildren = 0;
atomic_init(&sock->rchildren, 0);
#if defined(WIN32)
sock->nchildren = 1;
#else
@@ -111,37 +138,29 @@ isc_nm_listenudp(isc_nm_t *mgr, isc_nmiface_t *iface, isc_nm_recv_cb_t cb,
sock->recv_cb = cb;
sock->recv_cbarg = cbarg;
sock->extrahandlesize = extrahandlesize;
sock->result = ISC_R_DEFAULT;
sock->tid = isc_random_uniform(sock->nchildren);
sock->result = ISC_R_UNSET;
if (isc__nm_in_netthread()) {
sock->tid = isc_nm_tid();
} else {
sock->tid = isc_random_uniform(sock->nchildren);
}
sock->fd = -1;
#if !HAVE_SO_REUSEPORT_LB && !defined(WIN32)
fd = isc__nm_udp_lb_socket(sa_family);
fd = isc__nm_udp_lb_socket(iface->addr.type.sa.sa_family);
#endif
isc_barrier_init(&sock->startlistening, sock->nchildren);
for (size_t i = 0; i < sock->nchildren; i++) {
isc__netievent_udplisten_t *ievent = NULL;
isc_nmsocket_t *csock = &sock->children[i];
if ((int)i == isc_nm_tid()) {
continue;
}
start_udp_child(mgr, iface, sock, fd, i);
}
isc__nmsocket_init(csock, mgr, isc_nm_udpsocket, iface);
csock->parent = sock;
csock->iface = sock->iface;
csock->reading = true;
csock->recv_cb = cb;
csock->recv_cbarg = cbarg;
csock->extrahandlesize = sock->extrahandlesize;
csock->tid = i;
#if HAVE_SO_REUSEPORT_LB || defined(WIN32)
csock->fd = isc__nm_udp_lb_socket(sa_family);
#else
csock->fd = dup(fd);
#endif
REQUIRE(csock->fd >= 0);
ievent = isc__nm_get_netievent_udplisten(mgr, csock);
isc__nm_maybe_enqueue_ievent(&mgr->workers[i],
(isc__netievent_t *)ievent);
if (isc__nm_in_netthread()) {
start_udp_child(mgr, iface, sock, fd, isc_nm_tid());
}
#if !HAVE_SO_REUSEPORT_LB && !defined(WIN32)
@@ -149,21 +168,21 @@ isc_nm_listenudp(isc_nm_t *mgr, isc_nmiface_t *iface, isc_nm_recv_cb_t cb,
#endif
LOCK(&sock->lock);
while (sock->rchildren != sock->nchildren) {
while (atomic_load(&sock->rchildren) != sock->nchildren) {
WAIT(&sock->cond, &sock->lock);
}
result = sock->result;
atomic_store(&sock->active, true);
BROADCAST(&sock->scond);
UNLOCK(&sock->lock);
INSIST(result != ISC_R_DEFAULT);
INSIST(result != ISC_R_UNSET);
if (result == ISC_R_SUCCESS) {
REQUIRE(sock->rchildren == sock->nchildren);
REQUIRE(atomic_load(&sock->rchildren) == sock->nchildren);
*sockp = sock;
} else {
atomic_store(&sock->active, false);
isc__nm_udp_stoplistening(sock);
isc_nm_stoplistening(sock);
isc_nmsocket_close(&sock);
}
@@ -181,7 +200,7 @@ isc__nm_async_udplisten(isc__networker_t *worker, isc__netievent_t *ev0) {
int r, uv_bind_flags = 0;
int uv_init_flags = 0;
sa_family_t sa_family;
isc_result_t result = ISC_R_DEFAULT;
isc_result_t result = ISC_R_UNSET;
REQUIRE(VALID_NMSOCK(ievent->sock));
REQUIRE(ievent->sock->tid == isc_nm_tid());
@@ -269,16 +288,14 @@ isc__nm_async_udplisten(isc__networker_t *worker, isc__netievent_t *ev0) {
done:
result = isc__nm_uverr2result(r);
sock->parent->rchildren += 1;
if (sock->parent->result == ISC_R_DEFAULT) {
atomic_fetch_add(&sock->parent->rchildren, 1);
if (sock->parent->result == ISC_R_UNSET) {
sock->parent->result = result;
}
SIGNAL(&sock->parent->cond);
if (!atomic_load(&sock->parent->active)) {
WAIT(&sock->parent->scond, &sock->parent->lock);
}
INSIST(atomic_load(&sock->parent->active));
UNLOCK(&sock->parent->lock);
isc_barrier_wait(&sock->parent->startlistening);
}
static void
@@ -300,7 +317,14 @@ isc__nm_udp_stoplistening(isc_nmsocket_t *sock) {
ISC_UNREACHABLE();
}
enqueue_stoplistening(sock);
if (!isc__nm_in_netthread()) {
enqueue_stoplistening(sock);
} else if (!isc__nm_acquire_interlocked(sock->mgr)) {
enqueue_stoplistening(sock);
} else {
stop_udp_parent(sock);
isc__nm_drop_interlocked(sock->mgr);
}
}
/*
@@ -324,7 +348,12 @@ isc__nm_async_udpstop(isc__networker_t *worker, isc__netievent_t *ev0) {
/*
* If network manager is paused, re-enqueue the event for later.
*/
stop_udp_parent(sock);
if (!isc__nm_acquire_interlocked(sock->mgr)) {
enqueue_stoplistening(sock);
} else {
stop_udp_parent(sock);
isc__nm_drop_interlocked(sock->mgr);
}
}
/*
@@ -590,7 +619,7 @@ static isc_result_t
udp_connect_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) {
isc__networker_t *worker = NULL;
int uv_bind_flags = UV_UDP_REUSEADDR;
isc_result_t result = ISC_R_DEFAULT;
isc_result_t result = ISC_R_UNSET;
int tries = 3;
int r;
@@ -733,7 +762,7 @@ isc_nm_udpconnect(isc_nm_t *mgr, isc_nmiface_t *local, isc_nmiface_t *peer,
sock->read_timeout = timeout;
sock->extrahandlesize = extrahandlesize;
sock->peer = peer->addr;
sock->result = ISC_R_DEFAULT;
sock->result = ISC_R_UNSET;
atomic_init(&sock->client, true);
req = isc__nm_uvreq_get(mgr, sock);
@@ -782,7 +811,7 @@ isc_nm_udpconnect(isc_nm_t *mgr, isc_nmiface_t *local, isc_nmiface_t *peer,
(isc__netievent_t *)event);
}
LOCK(&sock->lock);
while (sock->result == ISC_R_DEFAULT) {
while (sock->result == ISC_R_UNSET) {
WAIT(&sock->cond, &sock->lock);
}
atomic_store(&sock->active, true);
@@ -970,8 +999,6 @@ stop_udp_child(isc_nmsocket_t *sock) {
REQUIRE(sock->type == isc_nm_udpsocket);
REQUIRE(sock->tid == isc_nm_tid());
bool last_child = false;
if (!atomic_compare_exchange_strong(&sock->closing, &(bool){ false },
true)) {
return;
@@ -979,33 +1006,41 @@ stop_udp_child(isc_nmsocket_t *sock) {
udp_close_direct(sock);
LOCK(&sock->parent->lock);
sock->parent->rchildren -= 1;
last_child = (sock->parent->rchildren == 0);
UNLOCK(&sock->parent->lock);
atomic_fetch_sub(&sock->parent->rchildren, 1);
if (last_child) {
atomic_store(&sock->parent->closed, true);
isc__nmsocket_prep_destroy(sock->parent);
}
isc_barrier_wait(&sock->parent->stoplistening);
}
static void
stop_udp_parent(isc_nmsocket_t *sock) {
isc_nmsocket_t *csock = NULL;
REQUIRE(VALID_NMSOCK(sock));
REQUIRE(sock->type == isc_nm_udplistener);
isc_barrier_init(&sock->stoplistening, sock->nchildren);
for (size_t i = 0; i < sock->nchildren; i++) {
isc__netievent_udpstop_t *ievent = NULL;
isc_nmsocket_t *csock = &sock->children[i];
csock = &sock->children[i];
REQUIRE(VALID_NMSOCK(csock));
atomic_store(&csock->active, false);
if ((int)i == isc_nm_tid()) {
/*
* We need to schedule closing the other sockets first
*/
continue;
}
ievent = isc__nm_get_netievent_udpstop(sock->mgr, csock);
isc__nm_enqueue_ievent(&sock->mgr->workers[i],
(isc__netievent_t *)ievent);
atomic_store(&csock->active, false);
enqueue_stoplistening(csock);
}
csock = &sock->children[isc_nm_tid()];
atomic_store(&csock->active, false);
stop_udp_child(csock);
atomic_store(&sock->closed, true);
isc__nmsocket_prep_destroy(sock);
}
static void