2
0
mirror of https://gitlab.isc.org/isc-projects/bind9 synced 2025-08-31 06:25:31 +00:00

Distribute queries among threads even on platforms without lb sockets

On platforms without load-balancing socket all the queries would be
handle by a single thread.  Currently, the support for load-balanced
sockets is present in Linux with SO_REUSEPORT and FreeBSD 12 with
SO_REUSEPORT_LB.

This commit adds workaround for such platforms that:

1. setups single shared listening socket for all listening nmthreads for
   UDP, TCP and TCPDNS netmgr transports

2. Calls uv_udp_bind/uv_tcp_bind on the underlying socket just once and
   for rest of the nmthreads only copy the internal libuv flags (should
   be just UV_HANDLE_BOUND and optionally UV_HANDLE_IPV6).

3. start reading on UDP socket or listening on TCP socket

The load distribution among the nmthreads is uneven, but it's still
better than utilizing just one thread for processing all the incoming
queries
This commit is contained in:
Ondřej Surý
2020-12-02 15:37:18 +01:00
parent d0f5407825
commit 1d066e4bc5
6 changed files with 182 additions and 125 deletions

View File

@@ -77,27 +77,26 @@ inactive(isc_nmsocket_t *sock) {
(sock->server != NULL && !isc__nmsocket_active(sock->server)));
}
static isc_result_t
isc__nm_udp_lb_socket(sa_family_t sa_family, uv_os_sock_t *sockp) {
static uv_os_sock_t
isc__nm_udp_lb_socket(sa_family_t sa_family) {
isc_result_t result;
uv_os_sock_t sock;
result = isc__nm_socket(sa_family, SOCK_DGRAM, 0, &sock);
REQUIRE(result == ISC_R_SUCCESS);
RUNTIME_CHECK(result == ISC_R_SUCCESS);
(void)isc__nm_socket_incoming_cpu(sock);
(void)isc__nm_socket_dontfrag(sock, sa_family);
result = isc__nm_socket_reuse(sock);
REQUIRE(result == ISC_R_SUCCESS);
RUNTIME_CHECK(result == ISC_R_SUCCESS);
#if HAVE_SO_REUSEPORT_LB
result = isc__nm_socket_reuse_lb(sock);
REQUIRE(result == ISC_R_SUCCESS || result == ISC_R_NOTIMPLEMENTED);
RUNTIME_CHECK(result == ISC_R_SUCCESS);
#endif
*sockp = sock;
return (result);
return (sock);
}
isc_result_t
@@ -107,6 +106,7 @@ isc_nm_listenudp(isc_nm_t *mgr, isc_nmiface_t *iface, isc_nm_recv_cb_t cb,
isc_nmsocket_t *sock = NULL;
sa_family_t sa_family = iface->addr.type.sa.sa_family;
size_t children_size = 0;
uv_os_sock_t fd = -1;
REQUIRE(VALID_NM(mgr));
@@ -118,7 +118,12 @@ isc_nm_listenudp(isc_nm_t *mgr, isc_nmiface_t *iface, isc_nm_recv_cb_t cb,
isc__nmsocket_init(sock, mgr, isc_nm_udplistener, iface);
sock->rchildren = 0;
#if defined(WIN32)
sock->nchildren = 1;
#else
sock->nchildren = mgr->nworkers;
#endif
children_size = sock->nchildren * sizeof(sock->children[0]);
sock->children = isc_mem_get(mgr->mctx, children_size);
memset(sock->children, 0, children_size);
@@ -130,6 +135,10 @@ isc_nm_listenudp(isc_nm_t *mgr, isc_nmiface_t *iface, isc_nm_recv_cb_t cb,
sock->tid = isc_random_uniform(mgr->nworkers);
sock->fd = -1;
#if !HAVE_SO_REUSEPORT_LB && !defined(WIN32)
fd = isc__nm_udp_lb_socket(sa_family);
#endif
for (size_t i = 0; i < mgr->nworkers; i++) {
isc__netievent_udplisten_t *ievent = NULL;
isc_nmsocket_t *csock = &sock->children[i];
@@ -143,7 +152,11 @@ isc_nm_listenudp(isc_nm_t *mgr, isc_nmiface_t *iface, isc_nm_recv_cb_t cb,
csock->extrahandlesize = sock->extrahandlesize;
csock->tid = i;
(void)isc__nm_udp_lb_socket(sa_family, &csock->fd);
#if HAVE_SO_REUSEPORT_LB || defined(WIN32)
csock->fd = isc__nm_udp_lb_socket(sa_family);
#else
csock->fd = dup(fd);
#endif
REQUIRE(csock->fd >= 0);
ievent = isc__nm_get_netievent_udplisten(mgr, csock);
@@ -151,6 +164,10 @@ isc_nm_listenudp(isc_nm_t *mgr, isc_nmiface_t *iface, isc_nm_recv_cb_t cb,
(isc__netievent_t *)ievent);
}
#if !HAVE_SO_REUSEPORT_LB && !defined(WIN32)
isc__nm_closesocket(fd);
#endif
LOCK(&sock->lock);
while (sock->rchildren != mgr->nworkers) {
WAIT(&sock->cond, &sock->lock);
@@ -205,11 +222,12 @@ udp_alloc_cb(uv_handle_t *handle, size_t size, uv_buf_t *buf) {
void
isc__nm_async_udplisten(isc__networker_t *worker, isc__netievent_t *ev0) {
isc__netievent_udplisten_t *ievent = (isc__netievent_udplisten_t *)ev0;
isc_nmiface_t *iface;
isc_nmiface_t *iface = NULL;
isc_nmsocket_t *sock = NULL;
int r, uv_bind_flags = 0;
int uv_init_flags = 0;
sa_family_t sa_family;
isc_result_t result = ISC_R_DEFAULT;
REQUIRE(VALID_NMSOCK(ievent->sock));
REQUIRE(ievent->sock->tid == isc_nm_tid());
@@ -237,6 +255,8 @@ isc__nm_async_udplisten(isc__networker_t *worker, isc__netievent_t *ev0) {
RUNTIME_CHECK(r == 0);
uv_handle_set_data((uv_handle_t *)&sock->timer, sock);
LOCK(&sock->parent->lock);
r = uv_udp_open(&sock->uv_handle.udp, sock->fd);
if (r < 0) {
isc__nm_closesocket(sock->fd);
@@ -249,13 +269,33 @@ isc__nm_async_udplisten(isc__networker_t *worker, isc__netievent_t *ev0) {
uv_bind_flags |= UV_UDP_IPV6ONLY;
}
#if HAVE_SO_REUSEPORT_LB || WIN32
r = isc_uv_udp_freebind(&sock->uv_handle.udp,
&sock->parent->iface->addr.type.sa,
uv_bind_flags);
if (r < 0 && r != UV_EINVAL) {
if (r < 0) {
isc__nm_incstats(sock->mgr, sock->statsindex[STATID_BINDFAIL]);
goto failure;
}
#else
if (sock->parent->fd == -1) {
/* This thread is first, bind the socket */
r = isc_uv_udp_freebind(&sock->uv_handle.udp,
&sock->parent->iface->addr.type.sa,
uv_bind_flags);
if (r < 0) {
isc__nm_incstats(sock->mgr,
sock->statsindex[STATID_BINDFAIL]);
goto failure;
}
sock->parent->uv_handle.udp.flags = sock->uv_handle.udp.flags;
sock->parent->fd = sock->fd;
} else {
/* The socket is already bound, just copy the flags */
sock->uv_handle.udp.flags = sock->parent->uv_handle.udp.flags;
}
#endif
#ifdef ISC_RECV_BUFFER_SIZE
uv_recv_buffer_size(&sock->uv_handle.handle,
&(int){ ISC_RECV_BUFFER_SIZE });
@@ -272,24 +312,11 @@ isc__nm_async_udplisten(isc__networker_t *worker, isc__netievent_t *ev0) {
atomic_store(&sock->listening, true);
LOCK(&sock->parent->lock);
sock->parent->rchildren += 1;
if (sock->parent->result == ISC_R_DEFAULT) {
sock->parent->result = ISC_R_SUCCESS;
}
SIGNAL(&sock->parent->cond);
if (!atomic_load(&sock->parent->active)) {
WAIT(&sock->parent->scond, &sock->parent->lock);
}
INSIST(atomic_load(&sock->parent->active));
UNLOCK(&sock->parent->lock);
return;
failure:
LOCK(&sock->parent->lock);
result = isc__nm_uverr2result(r);
sock->parent->rchildren += 1;
if (sock->parent->result == ISC_R_DEFAULT) {
sock->parent->result = isc__nm_uverr2result(r);
sock->parent->result = result;
}
SIGNAL(&sock->parent->cond);
if (!atomic_load(&sock->parent->active)) {
@@ -359,7 +386,7 @@ static void
udp_recv_cb(uv_udp_t *handle, ssize_t nrecv, const uv_buf_t *buf,
const struct sockaddr *addr, unsigned flags) {
isc_nmsocket_t *sock = uv_handle_get_data((uv_handle_t *)handle);
isc__nm_uvreq_t *req;
isc__nm_uvreq_t *req = NULL;
uint32_t maxudp;
bool free_buf;
isc_sockaddr_t sockaddr;
@@ -1224,7 +1251,7 @@ isc__nm_udp_cancelread(isc_nmhandle_t *handle) {
void
isc__nm_async_udpcancel(isc__networker_t *worker, isc__netievent_t *ev0) {
isc__netievent_udpcancel_t *ievent = (isc__netievent_udpcancel_t *)ev0;
isc_nmsocket_t *sock;
isc_nmsocket_t *sock = NULL;
UNUSED(worker);