mirror of
https://gitlab.isc.org/isc-projects/bind9
synced 2025-08-30 22:15:20 +00:00
Distribute queries among threads even on platforms without lb sockets
On platforms without load-balancing socket all the queries would be handle by a single thread. Currently, the support for load-balanced sockets is present in Linux with SO_REUSEPORT and FreeBSD 12 with SO_REUSEPORT_LB. This commit adds workaround for such platforms that: 1. setups single shared listening socket for all listening nmthreads for UDP, TCP and TCPDNS netmgr transports 2. Calls uv_udp_bind/uv_tcp_bind on the underlying socket just once and for rest of the nmthreads only copy the internal libuv flags (should be just UV_HANDLE_BOUND and optionally UV_HANDLE_IPV6). 3. start reading on UDP socket or listening on TCP socket The load distribution among the nmthreads is uneven, but it's still better than utilizing just one thread for processing all the incoming queries
This commit is contained in:
@@ -421,27 +421,27 @@ isc_nm_tcpdnsconnect(isc_nm_t *mgr, isc_nmiface_t *local, isc_nmiface_t *peer,
|
||||
return (result);
|
||||
}
|
||||
|
||||
static isc_result_t
|
||||
isc__nm_tcpdns_lb_socket(sa_family_t sa_family, uv_os_sock_t *sockp) {
|
||||
static uv_os_sock_t
|
||||
isc__nm_tcpdns_lb_socket(sa_family_t sa_family) {
|
||||
isc_result_t result;
|
||||
uv_os_sock_t sock;
|
||||
|
||||
result = isc__nm_socket(sa_family, SOCK_STREAM, 0, &sock);
|
||||
REQUIRE(result == ISC_R_SUCCESS);
|
||||
RUNTIME_CHECK(result == ISC_R_SUCCESS);
|
||||
|
||||
(void)isc__nm_socket_incoming_cpu(sock);
|
||||
|
||||
/* FIXME: set mss */
|
||||
|
||||
result = isc__nm_socket_reuse(sock);
|
||||
REQUIRE(result == ISC_R_SUCCESS || result == ISC_R_NOTIMPLEMENTED);
|
||||
RUNTIME_CHECK(result == ISC_R_SUCCESS);
|
||||
|
||||
#if HAVE_SO_REUSEPORT_LB
|
||||
result = isc__nm_socket_reuse_lb(sock);
|
||||
REQUIRE(result == ISC_R_SUCCESS || result == ISC_R_NOTIMPLEMENTED);
|
||||
RUNTIME_CHECK(result == ISC_R_SUCCESS);
|
||||
#endif
|
||||
|
||||
*sockp = sock;
|
||||
|
||||
return (result);
|
||||
return (sock);
|
||||
}
|
||||
|
||||
isc_result_t
|
||||
@@ -454,6 +454,7 @@ isc_nm_listentcpdns(isc_nm_t *mgr, isc_nmiface_t *iface,
|
||||
isc_nmsocket_t *sock = NULL;
|
||||
sa_family_t sa_family = iface->addr.type.sa.sa_family;
|
||||
size_t children_size = 0;
|
||||
uv_os_sock_t fd = -1;
|
||||
|
||||
REQUIRE(VALID_NM(mgr));
|
||||
|
||||
@@ -461,7 +462,11 @@ isc_nm_listentcpdns(isc_nm_t *mgr, isc_nmiface_t *iface,
|
||||
isc__nmsocket_init(sock, mgr, isc_nm_tcpdnslistener, iface);
|
||||
|
||||
sock->rchildren = 0;
|
||||
#if defined(WIN32)
|
||||
sock->nchildren = 1;
|
||||
#else
|
||||
sock->nchildren = mgr->nworkers;
|
||||
#endif
|
||||
children_size = sock->nchildren * sizeof(sock->children[0]);
|
||||
sock->children = isc_mem_get(mgr->mctx, children_size);
|
||||
memset(sock->children, 0, children_size);
|
||||
@@ -470,6 +475,10 @@ isc_nm_listentcpdns(isc_nm_t *mgr, isc_nmiface_t *iface,
|
||||
sock->tid = isc_random_uniform(mgr->nworkers);
|
||||
sock->fd = -1;
|
||||
|
||||
#if !HAVE_SO_REUSEPORT_LB && !defined(WIN32)
|
||||
fd = isc__nm_tcpdns_lb_socket(sa_family);
|
||||
#endif
|
||||
|
||||
for (size_t i = 0; i < mgr->nworkers; i++) {
|
||||
isc__netievent_tcpdnslisten_t *ievent = NULL;
|
||||
isc_nmsocket_t *csock = &sock->children[i];
|
||||
@@ -490,9 +499,11 @@ isc_nm_listentcpdns(isc_nm_t *mgr, isc_nmiface_t *iface,
|
||||
csock->pquota = quota;
|
||||
isc_quota_cb_init(&csock->quotacb, quota_accept_cb, csock);
|
||||
|
||||
result = isc__nm_tcpdns_lb_socket(sa_family, &csock->fd);
|
||||
REQUIRE(result == ISC_R_SUCCESS ||
|
||||
result == ISC_R_NOTIMPLEMENTED);
|
||||
#if HAVE_SO_REUSEPORT_LB || defined(WIN32)
|
||||
csock->fd = isc__nm_tcpdns_lb_socket(sa_family);
|
||||
#else
|
||||
csock->fd = dup(fd);
|
||||
#endif
|
||||
REQUIRE(csock->fd >= 0);
|
||||
|
||||
ievent = isc__nm_get_netievent_tcpdnslisten(mgr, csock);
|
||||
@@ -500,6 +511,10 @@ isc_nm_listentcpdns(isc_nm_t *mgr, isc_nmiface_t *iface,
|
||||
(isc__netievent_t *)ievent);
|
||||
}
|
||||
|
||||
#if !HAVE_SO_REUSEPORT_LB && !defined(WIN32)
|
||||
isc__nm_closesocket(fd);
|
||||
#endif
|
||||
|
||||
LOCK(&sock->lock);
|
||||
while (sock->rchildren != mgr->nworkers) {
|
||||
WAIT(&sock->cond, &sock->lock);
|
||||
@@ -526,11 +541,12 @@ void
|
||||
isc__nm_async_tcpdnslisten(isc__networker_t *worker, isc__netievent_t *ev0) {
|
||||
isc__netievent_tcpdnslisten_t *ievent =
|
||||
(isc__netievent_tcpdnslisten_t *)ev0;
|
||||
isc_nmiface_t *iface;
|
||||
isc_nmiface_t *iface = NULL;
|
||||
sa_family_t sa_family;
|
||||
int r;
|
||||
int flags = 0;
|
||||
isc_nmsocket_t *sock = NULL;
|
||||
isc_result_t result = ISC_R_DEFAULT;
|
||||
|
||||
REQUIRE(VALID_NMSOCK(ievent->sock));
|
||||
REQUIRE(ievent->sock->tid == isc_nm_tid());
|
||||
@@ -557,6 +573,8 @@ isc__nm_async_tcpdnslisten(isc__networker_t *worker, isc__netievent_t *ev0) {
|
||||
RUNTIME_CHECK(r == 0);
|
||||
uv_handle_set_data((uv_handle_t *)&sock->timer, sock);
|
||||
|
||||
LOCK(&sock->parent->lock);
|
||||
|
||||
r = uv_tcp_open(&sock->uv_handle.tcp, sock->fd);
|
||||
if (r < 0) {
|
||||
isc__nm_closesocket(sock->fd);
|
||||
@@ -569,12 +587,29 @@ isc__nm_async_tcpdnslisten(isc__networker_t *worker, isc__netievent_t *ev0) {
|
||||
flags = UV_TCP_IPV6ONLY;
|
||||
}
|
||||
|
||||
#if HAVE_SO_REUSEPORT_LB || defined(WIN32)
|
||||
r = isc_uv_tcp_freebind(&sock->uv_handle.tcp,
|
||||
&sock->iface->addr.type.sa, flags);
|
||||
if (r < 0 && r != UV_EINVAL) {
|
||||
if (r < 0) {
|
||||
isc__nm_incstats(sock->mgr, sock->statsindex[STATID_BINDFAIL]);
|
||||
goto failure;
|
||||
}
|
||||
#else
|
||||
if (sock->parent->fd == -1) {
|
||||
r = isc_uv_tcp_freebind(&sock->uv_handle.tcp,
|
||||
&sock->iface->addr.type.sa, flags);
|
||||
if (r < 0) {
|
||||
isc__nm_incstats(sock->mgr,
|
||||
sock->statsindex[STATID_BINDFAIL]);
|
||||
goto failure;
|
||||
}
|
||||
sock->parent->uv_handle.tcp.flags = sock->uv_handle.tcp.flags;
|
||||
sock->parent->fd = sock->fd;
|
||||
} else {
|
||||
/* The socket is already bound, just copy the flags */
|
||||
sock->uv_handle.tcp.flags = sock->parent->uv_handle.tcp.flags;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The callback will run in the same thread uv_listen() was called
|
||||
@@ -582,7 +617,7 @@ isc__nm_async_tcpdnslisten(isc__networker_t *worker, isc__netievent_t *ev0) {
|
||||
*/
|
||||
r = uv_listen((uv_stream_t *)&sock->uv_handle.tcp, sock->backlog,
|
||||
tcpdns_connection_cb);
|
||||
if (r < 0) {
|
||||
if (r != 0) {
|
||||
isc_log_write(isc_lctx, ISC_LOGCATEGORY_GENERAL,
|
||||
ISC_LOGMODULE_NETMGR, ISC_LOG_ERROR,
|
||||
"uv_listen failed: %s",
|
||||
@@ -593,27 +628,15 @@ isc__nm_async_tcpdnslisten(isc__networker_t *worker, isc__netievent_t *ev0) {
|
||||
|
||||
atomic_store(&sock->listening, true);
|
||||
|
||||
LOCK(&sock->parent->lock);
|
||||
sock->parent->rchildren += 1;
|
||||
if (sock->parent->result == ISC_R_DEFAULT) {
|
||||
sock->parent->result = ISC_R_SUCCESS;
|
||||
}
|
||||
SIGNAL(&sock->parent->cond);
|
||||
if (!atomic_load(&sock->parent->active)) {
|
||||
WAIT(&sock->parent->scond, &sock->parent->lock);
|
||||
}
|
||||
INSIST(atomic_load(&sock->parent->active));
|
||||
UNLOCK(&sock->parent->lock);
|
||||
|
||||
return;
|
||||
|
||||
failure:
|
||||
sock->pquota = NULL;
|
||||
result = isc__nm_uverr2result(r);
|
||||
if (result != ISC_R_SUCCESS) {
|
||||
sock->pquota = NULL;
|
||||
}
|
||||
|
||||
LOCK(&sock->parent->lock);
|
||||
sock->parent->rchildren += 1;
|
||||
if (sock->parent->result == ISC_R_DEFAULT) {
|
||||
sock->parent->result = isc__nm_uverr2result(r);
|
||||
sock->parent->result = result;
|
||||
}
|
||||
SIGNAL(&sock->parent->cond);
|
||||
if (!atomic_load(&sock->parent->active)) {
|
||||
@@ -921,7 +944,7 @@ isc__nm_async_tcpdnsread(isc__networker_t *worker, isc__netievent_t *ev0) {
|
||||
static isc_result_t
|
||||
processbuffer(isc_nmsocket_t *sock) {
|
||||
size_t len;
|
||||
isc__nm_uvreq_t *req;
|
||||
isc__nm_uvreq_t *req = NULL;
|
||||
isc_nmhandle_t *handle = NULL;
|
||||
|
||||
REQUIRE(VALID_NMSOCK(sock));
|
||||
@@ -1049,14 +1072,15 @@ free:
|
||||
static void
|
||||
quota_accept_cb(isc_quota_t *quota, void *sock0) {
|
||||
isc_nmsocket_t *sock = (isc_nmsocket_t *)sock0;
|
||||
isc__netievent_tcpdnsaccept_t *ievent = NULL;
|
||||
|
||||
REQUIRE(VALID_NMSOCK(sock));
|
||||
|
||||
/*
|
||||
* Create a tcpdnsaccept event and pass it using the async channel.
|
||||
*/
|
||||
ievent = isc__nm_get_netievent_tcpdnsaccept(sock->mgr, sock, quota);
|
||||
|
||||
isc__netievent_tcpdnsaccept_t *ievent =
|
||||
isc__nm_get_netievent_tcpdnsaccept(sock->mgr, sock, quota);
|
||||
isc__nm_maybe_enqueue_ievent(&sock->mgr->workers[sock->tid],
|
||||
(isc__netievent_t *)ievent);
|
||||
}
|
||||
@@ -1068,15 +1092,14 @@ void
|
||||
isc__nm_async_tcpdnsaccept(isc__networker_t *worker, isc__netievent_t *ev0) {
|
||||
isc__netievent_tcpdnsaccept_t *ievent =
|
||||
(isc__netievent_tcpdnsaccept_t *)ev0;
|
||||
isc_nmsocket_t *sock = ievent->sock;
|
||||
isc_result_t result;
|
||||
|
||||
UNUSED(worker);
|
||||
|
||||
REQUIRE(VALID_NMSOCK(sock));
|
||||
REQUIRE(sock->tid == isc_nm_tid());
|
||||
REQUIRE(VALID_NMSOCK(ievent->sock));
|
||||
REQUIRE(ievent->sock->tid == isc_nm_tid());
|
||||
|
||||
result = accept_connection(sock, ievent->quota);
|
||||
result = accept_connection(ievent->sock, ievent->quota);
|
||||
if (result != ISC_R_SUCCESS && result != ISC_R_NOCONN) {
|
||||
if ((result != ISC_R_QUOTA && result != ISC_R_SOFTQUOTA) ||
|
||||
can_log_tcpdns_quota())
|
||||
@@ -1098,7 +1121,7 @@ accept_connection(isc_nmsocket_t *ssock, isc_quota_t *quota) {
|
||||
struct sockaddr_storage peer_ss;
|
||||
struct sockaddr_storage local_ss;
|
||||
isc_sockaddr_t local;
|
||||
isc_nmhandle_t *handle;
|
||||
isc_nmhandle_t *handle = NULL;
|
||||
|
||||
REQUIRE(VALID_NMSOCK(ssock));
|
||||
REQUIRE(ssock->tid == isc_nm_tid());
|
||||
|
Reference in New Issue
Block a user