2019-11-05 13:55:54 -08:00
|
|
|
/*
|
|
|
|
* Copyright (C) Internet Systems Consortium, Inc. ("ISC")
|
|
|
|
*
|
|
|
|
* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
|
|
*
|
|
|
|
* See the COPYRIGHT file distributed with this work for additional
|
|
|
|
* information regarding copyright ownership.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <inttypes.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <uv.h>
|
|
|
|
|
|
|
|
#include <isc/atomic.h>
|
|
|
|
#include <isc/buffer.h>
|
|
|
|
#include <isc/condition.h>
|
|
|
|
#include <isc/magic.h>
|
|
|
|
#include <isc/mem.h>
|
|
|
|
#include <isc/netmgr.h>
|
|
|
|
#include <isc/print.h>
|
2020-02-12 13:59:18 +01:00
|
|
|
#include <isc/quota.h>
|
2019-11-05 13:55:54 -08:00
|
|
|
#include <isc/random.h>
|
|
|
|
#include <isc/refcount.h>
|
|
|
|
#include <isc/region.h>
|
|
|
|
#include <isc/result.h>
|
|
|
|
#include <isc/sockaddr.h>
|
2020-01-05 01:02:12 -08:00
|
|
|
#include <isc/stats.h>
|
2019-11-05 13:55:54 -08:00
|
|
|
#include <isc/thread.h>
|
|
|
|
#include <isc/util.h>
|
|
|
|
|
|
|
|
#include "netmgr-int.h"
|
2020-02-12 13:59:18 +01:00
|
|
|
#include "uv-compat.h"
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2020-01-29 15:16:02 +01:00
|
|
|
/*%
|
|
|
|
* How many isc_nmhandles and isc_nm_uvreqs will we be
|
|
|
|
* caching for reuse in a socket.
|
|
|
|
*/
|
|
|
|
#define ISC_NM_HANDLES_STACK_SIZE 600
|
|
|
|
#define ISC_NM_REQS_STACK_SIZE 600
|
|
|
|
|
2020-01-05 01:02:12 -08:00
|
|
|
/*%
|
|
|
|
* Shortcut index arrays to get access to statistics counters.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static const isc_statscounter_t udp4statsindex[] = {
|
|
|
|
isc_sockstatscounter_udp4open,
|
|
|
|
isc_sockstatscounter_udp4openfail,
|
|
|
|
isc_sockstatscounter_udp4close,
|
|
|
|
isc_sockstatscounter_udp4bindfail,
|
|
|
|
isc_sockstatscounter_udp4connectfail,
|
|
|
|
isc_sockstatscounter_udp4connect,
|
|
|
|
-1,
|
|
|
|
-1,
|
|
|
|
isc_sockstatscounter_udp4sendfail,
|
|
|
|
isc_sockstatscounter_udp4recvfail,
|
|
|
|
isc_sockstatscounter_udp4active
|
|
|
|
};
|
|
|
|
|
|
|
|
static const isc_statscounter_t udp6statsindex[] = {
|
|
|
|
isc_sockstatscounter_udp6open,
|
|
|
|
isc_sockstatscounter_udp6openfail,
|
|
|
|
isc_sockstatscounter_udp6close,
|
|
|
|
isc_sockstatscounter_udp6bindfail,
|
|
|
|
isc_sockstatscounter_udp6connectfail,
|
|
|
|
isc_sockstatscounter_udp6connect,
|
|
|
|
-1,
|
|
|
|
-1,
|
|
|
|
isc_sockstatscounter_udp6sendfail,
|
|
|
|
isc_sockstatscounter_udp6recvfail,
|
|
|
|
isc_sockstatscounter_udp6active
|
|
|
|
};
|
|
|
|
|
|
|
|
static const isc_statscounter_t tcp4statsindex[] = {
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_sockstatscounter_tcp4open, isc_sockstatscounter_tcp4openfail,
|
|
|
|
isc_sockstatscounter_tcp4close, isc_sockstatscounter_tcp4bindfail,
|
|
|
|
isc_sockstatscounter_tcp4connectfail, isc_sockstatscounter_tcp4connect,
|
|
|
|
isc_sockstatscounter_tcp4acceptfail, isc_sockstatscounter_tcp4accept,
|
|
|
|
isc_sockstatscounter_tcp4sendfail, isc_sockstatscounter_tcp4recvfail,
|
2020-01-05 01:02:12 -08:00
|
|
|
isc_sockstatscounter_tcp4active
|
|
|
|
};
|
|
|
|
|
|
|
|
static const isc_statscounter_t tcp6statsindex[] = {
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_sockstatscounter_tcp6open, isc_sockstatscounter_tcp6openfail,
|
|
|
|
isc_sockstatscounter_tcp6close, isc_sockstatscounter_tcp6bindfail,
|
|
|
|
isc_sockstatscounter_tcp6connectfail, isc_sockstatscounter_tcp6connect,
|
|
|
|
isc_sockstatscounter_tcp6acceptfail, isc_sockstatscounter_tcp6accept,
|
|
|
|
isc_sockstatscounter_tcp6sendfail, isc_sockstatscounter_tcp6recvfail,
|
2020-01-05 01:02:12 -08:00
|
|
|
isc_sockstatscounter_tcp6active
|
|
|
|
};
|
|
|
|
|
|
|
|
#if 0
|
|
|
|
/* XXX: not currently used */
|
|
|
|
static const isc_statscounter_t unixstatsindex[] = {
|
|
|
|
isc_sockstatscounter_unixopen,
|
|
|
|
isc_sockstatscounter_unixopenfail,
|
|
|
|
isc_sockstatscounter_unixclose,
|
|
|
|
isc_sockstatscounter_unixbindfail,
|
|
|
|
isc_sockstatscounter_unixconnectfail,
|
|
|
|
isc_sockstatscounter_unixconnect,
|
|
|
|
isc_sockstatscounter_unixacceptfail,
|
|
|
|
isc_sockstatscounter_unixaccept,
|
|
|
|
isc_sockstatscounter_unixsendfail,
|
|
|
|
isc_sockstatscounter_unixrecvfail,
|
|
|
|
isc_sockstatscounter_unixactive
|
|
|
|
};
|
2020-02-13 21:48:23 +01:00
|
|
|
#endif /* if 0 */
|
2020-01-05 01:02:12 -08:00
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
/*
|
|
|
|
* libuv is not thread safe, but has mechanisms to pass messages
|
|
|
|
* between threads. Each socket is owned by a thread. For UDP
|
|
|
|
* sockets we have a set of sockets for each interface and we can
|
|
|
|
* choose a sibling and send the message directly. For TCP, or if
|
|
|
|
* we're calling from a non-networking thread, we need to pass the
|
|
|
|
* request using async_cb.
|
|
|
|
*/
|
|
|
|
|
2018-08-07 16:46:53 +02:00
|
|
|
static thread_local int isc__nm_tid_v = ISC_NETMGR_TID_UNKNOWN;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2020-02-14 08:14:03 +01:00
|
|
|
static void
|
|
|
|
nmsocket_maybe_destroy(isc_nmsocket_t *sock);
|
|
|
|
static void
|
|
|
|
nmhandle_free(isc_nmsocket_t *sock, isc_nmhandle_t *handle);
|
|
|
|
static isc_threadresult_t
|
|
|
|
nm_thread(isc_threadarg_t worker0);
|
|
|
|
static void
|
|
|
|
async_cb(uv_async_t *handle);
|
|
|
|
static void
|
|
|
|
process_queue(isc__networker_t *worker, isc_queue_t *queue);
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
int
|
2018-08-07 16:46:53 +02:00
|
|
|
isc_nm_tid(void) {
|
2019-11-05 13:55:54 -08:00
|
|
|
return (isc__nm_tid_v);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2018-08-07 16:46:53 +02:00
|
|
|
isc__nm_in_netthread(void) {
|
2019-11-05 13:55:54 -08:00
|
|
|
return (isc__nm_tid_v >= 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
isc_nm_t *
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nm_start(isc_mem_t *mctx, uint32_t workers) {
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_nm_t *mgr = NULL;
|
2020-02-13 14:44:37 -08:00
|
|
|
char name[32];
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
mgr = isc_mem_get(mctx, sizeof(*mgr));
|
2020-02-12 13:59:18 +01:00
|
|
|
*mgr = (isc_nm_t){ .nworkers = workers };
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
isc_mem_attach(mctx, &mgr->mctx);
|
|
|
|
isc_mutex_init(&mgr->lock);
|
|
|
|
isc_condition_init(&mgr->wkstatecond);
|
|
|
|
isc_refcount_init(&mgr->references, 1);
|
|
|
|
atomic_init(&mgr->workers_running, 0);
|
|
|
|
atomic_init(&mgr->workers_paused, 0);
|
|
|
|
atomic_init(&mgr->maxudp, 0);
|
|
|
|
atomic_init(&mgr->paused, false);
|
|
|
|
atomic_init(&mgr->interlocked, false);
|
|
|
|
|
2019-11-20 22:33:35 +01:00
|
|
|
/*
|
|
|
|
* Default TCP timeout values.
|
2019-11-21 17:08:06 -08:00
|
|
|
* May be updated by isc_nm_tcptimeouts().
|
2019-11-20 22:33:35 +01:00
|
|
|
*/
|
|
|
|
mgr->init = 30000;
|
|
|
|
mgr->idle = 30000;
|
|
|
|
mgr->keepalive = 30000;
|
|
|
|
mgr->advertised = 30000;
|
|
|
|
|
2019-11-21 17:08:06 -08:00
|
|
|
isc_mutex_init(&mgr->reqlock);
|
|
|
|
isc_mempool_create(mgr->mctx, sizeof(isc__nm_uvreq_t), &mgr->reqpool);
|
|
|
|
isc_mempool_setname(mgr->reqpool, "nm_reqpool");
|
2020-02-10 14:00:36 +01:00
|
|
|
isc_mempool_setfreemax(mgr->reqpool, 4096);
|
2019-11-21 17:08:06 -08:00
|
|
|
isc_mempool_associatelock(mgr->reqpool, &mgr->reqlock);
|
|
|
|
isc_mempool_setfillcount(mgr->reqpool, 32);
|
|
|
|
|
|
|
|
isc_mutex_init(&mgr->evlock);
|
|
|
|
isc_mempool_create(mgr->mctx, sizeof(isc__netievent_storage_t),
|
|
|
|
&mgr->evpool);
|
|
|
|
isc_mempool_setname(mgr->evpool, "nm_evpool");
|
2020-02-10 14:00:36 +01:00
|
|
|
isc_mempool_setfreemax(mgr->evpool, 4096);
|
2019-11-21 17:08:06 -08:00
|
|
|
isc_mempool_associatelock(mgr->evpool, &mgr->evlock);
|
|
|
|
isc_mempool_setfillcount(mgr->evpool, 32);
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
mgr->workers = isc_mem_get(mctx, workers * sizeof(isc__networker_t));
|
|
|
|
for (size_t i = 0; i < workers; i++) {
|
2020-02-13 14:44:37 -08:00
|
|
|
int r;
|
2019-11-05 13:55:54 -08:00
|
|
|
isc__networker_t *worker = &mgr->workers[i];
|
2020-02-12 13:59:18 +01:00
|
|
|
*worker = (isc__networker_t){
|
2019-11-05 13:55:54 -08:00
|
|
|
.mgr = mgr,
|
|
|
|
.id = i,
|
|
|
|
};
|
|
|
|
|
|
|
|
r = uv_loop_init(&worker->loop);
|
|
|
|
RUNTIME_CHECK(r == 0);
|
|
|
|
|
|
|
|
worker->loop.data = &mgr->workers[i];
|
|
|
|
|
|
|
|
r = uv_async_init(&worker->loop, &worker->async, async_cb);
|
|
|
|
RUNTIME_CHECK(r == 0);
|
|
|
|
|
|
|
|
isc_mutex_init(&worker->lock);
|
|
|
|
isc_condition_init(&worker->cond);
|
|
|
|
|
|
|
|
worker->ievents = isc_queue_new(mgr->mctx, 128);
|
2019-12-02 13:54:44 +01:00
|
|
|
worker->ievents_prio = isc_queue_new(mgr->mctx, 128);
|
2020-01-29 13:16:04 +01:00
|
|
|
worker->recvbuf = isc_mem_get(mctx, ISC_NETMGR_RECVBUF_SIZE);
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We need to do this here and not in nm_thread to avoid a
|
|
|
|
* race - we could exit isc_nm_start, launch nm_destroy,
|
|
|
|
* and nm_thread would still not be up.
|
|
|
|
*/
|
|
|
|
atomic_fetch_add_explicit(&mgr->workers_running, 1,
|
|
|
|
memory_order_relaxed);
|
|
|
|
isc_thread_create(nm_thread, &mgr->workers[i], &worker->thread);
|
|
|
|
|
|
|
|
snprintf(name, sizeof(name), "isc-net-%04zu", i);
|
|
|
|
isc_thread_setname(worker->thread, name);
|
|
|
|
}
|
|
|
|
|
|
|
|
mgr->magic = NM_MAGIC;
|
|
|
|
return (mgr);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free the resources of the network manager.
|
|
|
|
*/
|
|
|
|
static void
|
2020-02-13 14:44:37 -08:00
|
|
|
nm_destroy(isc_nm_t **mgr0) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NM(*mgr0));
|
|
|
|
REQUIRE(!isc__nm_in_netthread());
|
|
|
|
|
|
|
|
isc_nm_t *mgr = *mgr0;
|
2020-02-08 04:37:54 -08:00
|
|
|
*mgr0 = NULL;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2019-12-10 10:47:08 +01:00
|
|
|
isc_refcount_destroy(&mgr->references);
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
mgr->magic = 0;
|
|
|
|
|
|
|
|
for (size_t i = 0; i < mgr->nworkers; i++) {
|
|
|
|
isc__netievent_t *event = NULL;
|
|
|
|
|
|
|
|
LOCK(&mgr->workers[i].lock);
|
|
|
|
mgr->workers[i].finished = true;
|
|
|
|
UNLOCK(&mgr->workers[i].lock);
|
|
|
|
event = isc__nm_get_ievent(mgr, netievent_stop);
|
|
|
|
isc__nm_enqueue_ievent(&mgr->workers[i], event);
|
|
|
|
}
|
|
|
|
|
2019-12-12 17:43:03 +11:00
|
|
|
LOCK(&mgr->lock);
|
2019-11-05 13:55:54 -08:00
|
|
|
while (atomic_load(&mgr->workers_running) > 0) {
|
|
|
|
WAIT(&mgr->wkstatecond, &mgr->lock);
|
|
|
|
}
|
|
|
|
UNLOCK(&mgr->lock);
|
|
|
|
|
|
|
|
for (size_t i = 0; i < mgr->nworkers; i++) {
|
2019-12-02 13:54:44 +01:00
|
|
|
isc__networker_t *worker = &mgr->workers[i];
|
2019-12-03 00:07:59 -08:00
|
|
|
isc__netievent_t *ievent = NULL;
|
2020-02-13 14:44:37 -08:00
|
|
|
int r;
|
2019-12-03 00:07:59 -08:00
|
|
|
|
|
|
|
/* Empty the async event queues */
|
2020-02-12 13:59:18 +01:00
|
|
|
while ((ievent = (isc__netievent_t *)isc_queue_dequeue(
|
2020-02-13 14:44:37 -08:00
|
|
|
worker->ievents)) != NULL)
|
|
|
|
{
|
2019-12-02 13:54:44 +01:00
|
|
|
isc_mempool_put(mgr->evpool, ievent);
|
|
|
|
}
|
2019-12-03 00:07:59 -08:00
|
|
|
|
2020-02-12 13:59:18 +01:00
|
|
|
while ((ievent = (isc__netievent_t *)isc_queue_dequeue(
|
2020-02-13 14:44:37 -08:00
|
|
|
worker->ievents_prio)) != NULL)
|
|
|
|
{
|
2019-11-21 17:08:06 -08:00
|
|
|
isc_mempool_put(mgr->evpool, ievent);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
2019-12-03 00:07:59 -08:00
|
|
|
|
|
|
|
r = uv_loop_close(&worker->loop);
|
2019-11-15 12:15:03 +01:00
|
|
|
INSIST(r == 0);
|
2019-12-03 00:07:59 -08:00
|
|
|
|
2019-12-02 13:54:44 +01:00
|
|
|
isc_queue_destroy(worker->ievents);
|
|
|
|
isc_queue_destroy(worker->ievents_prio);
|
2020-05-28 12:34:37 +02:00
|
|
|
isc_mutex_destroy(&worker->lock);
|
|
|
|
isc_condition_destroy(&worker->cond);
|
|
|
|
|
2020-01-29 13:16:04 +01:00
|
|
|
isc_mem_put(mgr->mctx, worker->recvbuf,
|
|
|
|
ISC_NETMGR_RECVBUF_SIZE);
|
2019-12-02 13:54:44 +01:00
|
|
|
isc_thread_join(worker->thread, NULL);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2020-01-05 01:02:12 -08:00
|
|
|
if (mgr->stats != NULL) {
|
|
|
|
isc_stats_detach(&mgr->stats);
|
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_condition_destroy(&mgr->wkstatecond);
|
|
|
|
isc_mutex_destroy(&mgr->lock);
|
2019-11-21 17:08:06 -08:00
|
|
|
|
|
|
|
isc_mempool_destroy(&mgr->evpool);
|
|
|
|
isc_mutex_destroy(&mgr->evlock);
|
|
|
|
|
|
|
|
isc_mempool_destroy(&mgr->reqpool);
|
|
|
|
isc_mutex_destroy(&mgr->reqlock);
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_mem_put(mgr->mctx, mgr->workers,
|
|
|
|
mgr->nworkers * sizeof(isc__networker_t));
|
|
|
|
isc_mem_putanddetach(&mgr->mctx, mgr, sizeof(*mgr));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nm_pause(isc_nm_t *mgr) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
REQUIRE(!isc__nm_in_netthread());
|
|
|
|
|
|
|
|
atomic_store(&mgr->paused, true);
|
|
|
|
isc__nm_acquire_interlocked_force(mgr);
|
|
|
|
|
|
|
|
for (size_t i = 0; i < mgr->nworkers; i++) {
|
|
|
|
isc__netievent_t *event = NULL;
|
|
|
|
|
|
|
|
LOCK(&mgr->workers[i].lock);
|
|
|
|
mgr->workers[i].paused = true;
|
|
|
|
UNLOCK(&mgr->workers[i].lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We have to issue a stop, otherwise the uv_run loop will
|
|
|
|
* run indefinitely!
|
|
|
|
*/
|
|
|
|
event = isc__nm_get_ievent(mgr, netievent_stop);
|
|
|
|
isc__nm_enqueue_ievent(&mgr->workers[i], event);
|
|
|
|
}
|
|
|
|
|
|
|
|
LOCK(&mgr->lock);
|
|
|
|
while (atomic_load_relaxed(&mgr->workers_paused) !=
|
2020-02-13 14:44:37 -08:00
|
|
|
atomic_load_relaxed(&mgr->workers_running))
|
|
|
|
{
|
2019-11-05 13:55:54 -08:00
|
|
|
WAIT(&mgr->wkstatecond, &mgr->lock);
|
|
|
|
}
|
|
|
|
UNLOCK(&mgr->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nm_resume(isc_nm_t *mgr) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
REQUIRE(!isc__nm_in_netthread());
|
|
|
|
|
|
|
|
for (size_t i = 0; i < mgr->nworkers; i++) {
|
|
|
|
LOCK(&mgr->workers[i].lock);
|
|
|
|
mgr->workers[i].paused = false;
|
|
|
|
SIGNAL(&mgr->workers[i].cond);
|
|
|
|
UNLOCK(&mgr->workers[i].lock);
|
|
|
|
}
|
|
|
|
isc__nm_drop_interlocked(mgr);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We're not waiting for all the workers to come back to life;
|
|
|
|
* they eventually will, we don't care.
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nm_attach(isc_nm_t *mgr, isc_nm_t **dst) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
REQUIRE(dst != NULL && *dst == NULL);
|
|
|
|
|
2020-01-14 09:43:37 +01:00
|
|
|
isc_refcount_increment(&mgr->references);
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
*dst = mgr;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nm_detach(isc_nm_t **mgr0) {
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_nm_t *mgr = NULL;
|
|
|
|
|
|
|
|
REQUIRE(mgr0 != NULL);
|
|
|
|
REQUIRE(VALID_NM(*mgr0));
|
|
|
|
|
|
|
|
mgr = *mgr0;
|
|
|
|
*mgr0 = NULL;
|
|
|
|
|
2020-01-14 09:43:37 +01:00
|
|
|
if (isc_refcount_decrement(&mgr->references) == 1) {
|
2019-11-05 13:55:54 -08:00
|
|
|
nm_destroy(&mgr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-22 15:57:42 -08:00
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nm_closedown(isc_nm_t *mgr) {
|
2019-11-22 15:57:42 -08:00
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
|
|
|
|
atomic_store(&mgr->closing, true);
|
|
|
|
for (size_t i = 0; i < mgr->nworkers; i++) {
|
|
|
|
isc__netievent_t *event = NULL;
|
|
|
|
event = isc__nm_get_ievent(mgr, netievent_shutdown);
|
|
|
|
isc__nm_enqueue_ievent(&mgr->workers[i], event);
|
|
|
|
}
|
|
|
|
}
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nm_destroy(isc_nm_t **mgr0) {
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_nm_t *mgr = NULL;
|
|
|
|
|
|
|
|
REQUIRE(mgr0 != NULL);
|
|
|
|
REQUIRE(VALID_NM(*mgr0));
|
|
|
|
|
|
|
|
mgr = *mgr0;
|
|
|
|
|
2019-11-22 15:57:42 -08:00
|
|
|
/*
|
|
|
|
* Close active connections.
|
|
|
|
*/
|
|
|
|
isc_nm_closedown(mgr);
|
2019-11-22 14:13:19 +01:00
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
/*
|
2019-11-22 15:57:42 -08:00
|
|
|
* Wait for the manager to be dereferenced elsewhere.
|
2019-11-05 13:55:54 -08:00
|
|
|
*/
|
|
|
|
while (isc_refcount_current(&mgr->references) > 1) {
|
2019-12-08 21:14:08 +01:00
|
|
|
/*
|
|
|
|
* Sometimes libuv gets stuck, pausing and unpausing
|
|
|
|
* netmgr goes over all events in async queue for all
|
|
|
|
* the workers, and since it's done only on shutdown it
|
|
|
|
* doesn't cost us anything.
|
|
|
|
*/
|
|
|
|
isc_nm_pause(mgr);
|
|
|
|
isc_nm_resume(mgr);
|
2019-11-05 13:55:54 -08:00
|
|
|
#ifdef WIN32
|
2020-04-10 16:59:24 -07:00
|
|
|
_sleep(10);
|
2020-02-13 21:48:23 +01:00
|
|
|
#else /* ifdef WIN32 */
|
2020-04-10 16:59:24 -07:00
|
|
|
usleep(10000);
|
2020-02-13 21:48:23 +01:00
|
|
|
#endif /* ifdef WIN32 */
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
2019-11-22 15:57:42 -08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Detach final reference.
|
|
|
|
*/
|
|
|
|
isc_nm_detach(mgr0);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nm_maxudp(isc_nm_t *mgr, uint32_t maxudp) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
|
|
|
|
atomic_store(&mgr->maxudp, maxudp);
|
|
|
|
}
|
|
|
|
|
2019-11-20 22:33:35 +01:00
|
|
|
void
|
|
|
|
isc_nm_tcp_settimeouts(isc_nm_t *mgr, uint32_t init, uint32_t idle,
|
2020-02-13 14:44:37 -08:00
|
|
|
uint32_t keepalive, uint32_t advertised) {
|
2019-11-20 22:33:35 +01:00
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
|
|
|
|
mgr->init = init * 100;
|
|
|
|
mgr->idle = idle * 100;
|
|
|
|
mgr->keepalive = keepalive * 100;
|
|
|
|
mgr->advertised = advertised * 100;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc_nm_tcp_gettimeouts(isc_nm_t *mgr, uint32_t *initial, uint32_t *idle,
|
2020-02-13 14:44:37 -08:00
|
|
|
uint32_t *keepalive, uint32_t *advertised) {
|
2019-11-20 22:33:35 +01:00
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
|
|
|
|
if (initial != NULL) {
|
|
|
|
*initial = mgr->init / 100;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (idle != NULL) {
|
|
|
|
*idle = mgr->idle / 100;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (keepalive != NULL) {
|
|
|
|
*keepalive = mgr->keepalive / 100;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (advertised != NULL) {
|
|
|
|
*advertised = mgr->advertised / 100;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
/*
|
|
|
|
* nm_thread is a single worker thread, that runs uv_run event loop
|
|
|
|
* until asked to stop.
|
|
|
|
*/
|
2019-12-04 10:41:40 +01:00
|
|
|
static isc_threadresult_t
|
2020-02-13 14:44:37 -08:00
|
|
|
nm_thread(isc_threadarg_t worker0) {
|
2020-02-12 13:59:18 +01:00
|
|
|
isc__networker_t *worker = (isc__networker_t *)worker0;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
isc__nm_tid_v = worker->id;
|
|
|
|
isc_thread_setaffinity(isc__nm_tid_v);
|
|
|
|
|
|
|
|
while (true) {
|
2020-02-13 14:44:37 -08:00
|
|
|
int r = uv_run(&worker->loop, UV_RUN_DEFAULT);
|
2019-11-05 13:55:54 -08:00
|
|
|
bool pausing = false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* or there's nothing to do. In the first case - wait
|
|
|
|
* for condition. In the latter - timedwait
|
|
|
|
*/
|
|
|
|
LOCK(&worker->lock);
|
|
|
|
while (worker->paused) {
|
|
|
|
LOCK(&worker->mgr->lock);
|
|
|
|
if (!pausing) {
|
|
|
|
atomic_fetch_add_explicit(
|
2020-02-12 13:59:18 +01:00
|
|
|
&worker->mgr->workers_paused, 1,
|
|
|
|
memory_order_acquire);
|
2019-11-05 13:55:54 -08:00
|
|
|
pausing = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
SIGNAL(&worker->mgr->wkstatecond);
|
|
|
|
UNLOCK(&worker->mgr->lock);
|
|
|
|
|
|
|
|
WAIT(&worker->cond, &worker->lock);
|
2019-12-02 13:54:44 +01:00
|
|
|
|
|
|
|
/* Process priority events */
|
|
|
|
process_queue(worker, worker->ievents_prio);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
if (pausing) {
|
|
|
|
uint32_t wp = atomic_fetch_sub_explicit(
|
2020-02-12 13:59:18 +01:00
|
|
|
&worker->mgr->workers_paused, 1,
|
|
|
|
memory_order_release);
|
2019-11-05 13:55:54 -08:00
|
|
|
if (wp == 1) {
|
|
|
|
atomic_store(&worker->mgr->paused, false);
|
|
|
|
}
|
|
|
|
}
|
2019-11-26 15:21:25 +01:00
|
|
|
bool finished = worker->finished;
|
2019-11-05 13:55:54 -08:00
|
|
|
UNLOCK(&worker->lock);
|
|
|
|
|
2019-11-26 15:21:25 +01:00
|
|
|
if (finished) {
|
2019-11-15 12:15:03 +01:00
|
|
|
/*
|
|
|
|
* We need to launch the loop one more time
|
2019-11-15 13:22:13 -08:00
|
|
|
* in UV_RUN_NOWAIT mode to make sure that
|
|
|
|
* worker->async is closed, so that we can
|
|
|
|
* close the loop cleanly. We don't care
|
|
|
|
* about the callback, as in this case we can
|
|
|
|
* be certain that uv_run() will eat the event.
|
|
|
|
*
|
|
|
|
* XXX: We may need to take steps here to ensure
|
|
|
|
* that all netmgr handles are freed.
|
2019-11-15 12:15:03 +01:00
|
|
|
*/
|
|
|
|
uv_close((uv_handle_t *)&worker->async, NULL);
|
|
|
|
uv_run(&worker->loop, UV_RUN_NOWAIT);
|
2019-11-05 13:55:54 -08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (r == 0) {
|
|
|
|
/*
|
2019-11-15 13:22:13 -08:00
|
|
|
* XXX: uv_run() in UV_RUN_DEFAULT mode returns
|
|
|
|
* zero if there are still active uv_handles.
|
|
|
|
* This shouldn't happen, but if it does, we just
|
2019-11-21 17:08:06 -08:00
|
|
|
* keep checking until they're done. We nap for a
|
2019-11-15 13:22:13 -08:00
|
|
|
* tenth of a second on each loop so as not to burn
|
|
|
|
* CPU. (We do a conditional wait instead, but it
|
|
|
|
* seems like overkill for this case.)
|
2019-11-05 13:55:54 -08:00
|
|
|
*/
|
|
|
|
#ifdef WIN32
|
|
|
|
_sleep(100);
|
2020-02-13 21:48:23 +01:00
|
|
|
#else /* ifdef WIN32 */
|
2019-11-05 13:55:54 -08:00
|
|
|
usleep(100000);
|
2020-02-13 21:48:23 +01:00
|
|
|
#endif /* ifdef WIN32 */
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Empty the async queue.
|
|
|
|
*/
|
2019-12-02 13:54:44 +01:00
|
|
|
process_queue(worker, worker->ievents_prio);
|
|
|
|
process_queue(worker, worker->ievents);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
LOCK(&worker->mgr->lock);
|
|
|
|
atomic_fetch_sub_explicit(&worker->mgr->workers_running, 1,
|
|
|
|
memory_order_relaxed);
|
|
|
|
SIGNAL(&worker->mgr->wkstatecond);
|
|
|
|
UNLOCK(&worker->mgr->lock);
|
2019-12-04 10:41:40 +01:00
|
|
|
|
|
|
|
return ((isc_threadresult_t)0);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2019-11-21 17:08:06 -08:00
|
|
|
* async_cb is a universal callback for 'async' events sent to event loop.
|
|
|
|
* It's the only way to safely pass data to the libuv event loop. We use a
|
|
|
|
* single async event and a lockless queue of 'isc__netievent_t' structures
|
|
|
|
* passed from other threads.
|
2019-11-05 13:55:54 -08:00
|
|
|
*/
|
|
|
|
static void
|
2020-02-13 14:44:37 -08:00
|
|
|
async_cb(uv_async_t *handle) {
|
2020-02-12 13:59:18 +01:00
|
|
|
isc__networker_t *worker = (isc__networker_t *)handle->loop->data;
|
2019-12-02 13:54:44 +01:00
|
|
|
process_queue(worker, worker->ievents_prio);
|
|
|
|
process_queue(worker, worker->ievents);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-02-13 14:44:37 -08:00
|
|
|
process_queue(isc__networker_t *worker, isc_queue_t *queue) {
|
2019-12-03 00:07:59 -08:00
|
|
|
isc__netievent_t *ievent = NULL;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2020-02-13 14:44:37 -08:00
|
|
|
while ((ievent = (isc__netievent_t *)isc_queue_dequeue(queue)) != NULL)
|
|
|
|
{
|
2019-11-05 13:55:54 -08:00
|
|
|
switch (ievent->type) {
|
|
|
|
case netievent_stop:
|
2019-12-02 13:54:44 +01:00
|
|
|
uv_stop(&worker->loop);
|
2019-11-21 17:08:06 -08:00
|
|
|
isc_mempool_put(worker->mgr->evpool, ievent);
|
2019-11-05 13:55:54 -08:00
|
|
|
return;
|
|
|
|
case netievent_udplisten:
|
|
|
|
isc__nm_async_udplisten(worker, ievent);
|
|
|
|
break;
|
2019-12-09 12:24:46 -08:00
|
|
|
case netievent_udpstop:
|
|
|
|
isc__nm_async_udpstop(worker, ievent);
|
2019-11-05 13:55:54 -08:00
|
|
|
break;
|
|
|
|
case netievent_udpsend:
|
|
|
|
isc__nm_async_udpsend(worker, ievent);
|
|
|
|
break;
|
|
|
|
case netievent_tcpconnect:
|
|
|
|
isc__nm_async_tcpconnect(worker, ievent);
|
|
|
|
break;
|
|
|
|
case netievent_tcplisten:
|
|
|
|
isc__nm_async_tcplisten(worker, ievent);
|
|
|
|
break;
|
2020-03-24 13:38:51 +01:00
|
|
|
case netievent_tcpchildaccept:
|
|
|
|
isc__nm_async_tcpchildaccept(worker, ievent);
|
|
|
|
break;
|
|
|
|
case netievent_tcpaccept:
|
|
|
|
isc__nm_async_tcpaccept(worker, ievent);
|
2019-11-28 10:21:34 +01:00
|
|
|
break;
|
2019-11-05 13:55:54 -08:00
|
|
|
case netievent_tcpstartread:
|
2020-03-20 11:55:10 +01:00
|
|
|
isc__nm_async_tcp_startread(worker, ievent);
|
2019-11-05 13:55:54 -08:00
|
|
|
break;
|
|
|
|
case netievent_tcppauseread:
|
2020-03-20 11:55:10 +01:00
|
|
|
isc__nm_async_tcp_pauseread(worker, ievent);
|
2019-11-05 13:55:54 -08:00
|
|
|
break;
|
|
|
|
case netievent_tcpsend:
|
|
|
|
isc__nm_async_tcpsend(worker, ievent);
|
|
|
|
break;
|
2019-12-09 12:24:46 -08:00
|
|
|
case netievent_tcpstop:
|
|
|
|
isc__nm_async_tcpstop(worker, ievent);
|
2019-11-05 13:55:54 -08:00
|
|
|
break;
|
|
|
|
case netievent_tcpclose:
|
|
|
|
isc__nm_async_tcpclose(worker, ievent);
|
|
|
|
break;
|
2019-12-06 22:25:52 +01:00
|
|
|
case netievent_tcpdnsclose:
|
|
|
|
isc__nm_async_tcpdnsclose(worker, ievent);
|
|
|
|
break;
|
2019-11-19 11:56:00 +01:00
|
|
|
case netievent_closecb:
|
|
|
|
isc__nm_async_closecb(worker, ievent);
|
|
|
|
break;
|
2019-11-22 14:13:19 +01:00
|
|
|
case netievent_shutdown:
|
|
|
|
isc__nm_async_shutdown(worker, ievent);
|
|
|
|
break;
|
2019-11-05 13:55:54 -08:00
|
|
|
default:
|
|
|
|
INSIST(0);
|
|
|
|
ISC_UNREACHABLE();
|
|
|
|
}
|
2019-11-19 11:56:00 +01:00
|
|
|
|
|
|
|
isc__nm_put_ievent(worker->mgr, ievent);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
2020-02-13 14:44:37 -08:00
|
|
|
isc__nm_get_ievent(isc_nm_t *mgr, isc__netievent_type type) {
|
2019-11-21 17:08:06 -08:00
|
|
|
isc__netievent_storage_t *event = isc_mempool_get(mgr->evpool);
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2020-02-12 13:59:18 +01:00
|
|
|
*event = (isc__netievent_storage_t){ .ni.type = type };
|
2019-11-05 13:55:54 -08:00
|
|
|
return (event);
|
|
|
|
}
|
|
|
|
|
2019-11-19 11:56:00 +01:00
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc__nm_put_ievent(isc_nm_t *mgr, void *ievent) {
|
2019-11-21 17:08:06 -08:00
|
|
|
isc_mempool_put(mgr->evpool, ievent);
|
2019-11-19 11:56:00 +01:00
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc__nm_enqueue_ievent(isc__networker_t *worker, isc__netievent_t *event) {
|
2019-12-02 13:54:44 +01:00
|
|
|
if (event->type > netievent_prio) {
|
|
|
|
/*
|
|
|
|
* We need to make sure this signal will be delivered and
|
|
|
|
* the queue will be processed.
|
|
|
|
*/
|
|
|
|
LOCK(&worker->lock);
|
|
|
|
isc_queue_enqueue(worker->ievents_prio, (uintptr_t)event);
|
|
|
|
SIGNAL(&worker->cond);
|
|
|
|
UNLOCK(&worker->lock);
|
|
|
|
} else {
|
|
|
|
isc_queue_enqueue(worker->ievents, (uintptr_t)event);
|
|
|
|
}
|
2019-11-05 13:55:54 -08:00
|
|
|
uv_async_send(&worker->async);
|
|
|
|
}
|
|
|
|
|
2020-01-16 11:52:58 +01:00
|
|
|
bool
|
2020-02-13 14:44:37 -08:00
|
|
|
isc__nmsocket_active(isc_nmsocket_t *sock) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
if (sock->parent != NULL) {
|
|
|
|
return (atomic_load(&sock->parent->active));
|
|
|
|
}
|
|
|
|
|
|
|
|
return (atomic_load(&sock->active));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-06-04 14:54:36 -07:00
|
|
|
isc__nmsocket_attach(isc_nmsocket_t *sock, isc_nmsocket_t **target) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(target != NULL && *target == NULL);
|
|
|
|
|
|
|
|
if (sock->parent != NULL) {
|
|
|
|
INSIST(sock->parent->parent == NULL); /* sanity check */
|
2020-01-14 09:43:37 +01:00
|
|
|
isc_refcount_increment0(&sock->parent->references);
|
2019-11-05 13:55:54 -08:00
|
|
|
} else {
|
2020-01-14 09:43:37 +01:00
|
|
|
isc_refcount_increment0(&sock->references);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
*target = sock;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free all resources inside a socket (including its children if any).
|
|
|
|
*/
|
|
|
|
static void
|
2020-02-13 14:44:37 -08:00
|
|
|
nmsocket_cleanup(isc_nmsocket_t *sock, bool dofree) {
|
|
|
|
isc_nmhandle_t *handle = NULL;
|
2019-11-05 13:55:54 -08:00
|
|
|
isc__nm_uvreq_t *uvreq = NULL;
|
|
|
|
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(!isc__nmsocket_active(sock));
|
|
|
|
|
|
|
|
atomic_store(&sock->destroying, true);
|
|
|
|
|
|
|
|
if (sock->parent == NULL && sock->children != NULL) {
|
|
|
|
/*
|
|
|
|
* We shouldn't be here unless there are no active handles,
|
|
|
|
* so we can clean up and free the children.
|
|
|
|
*/
|
|
|
|
for (int i = 0; i < sock->nchildren; i++) {
|
|
|
|
if (!atomic_load(&sock->children[i].destroying)) {
|
|
|
|
nmsocket_cleanup(&sock->children[i], false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This was a parent socket; free the children.
|
|
|
|
*/
|
|
|
|
isc_mem_put(sock->mgr->mctx, sock->children,
|
|
|
|
sock->nchildren * sizeof(*sock));
|
|
|
|
sock->children = NULL;
|
|
|
|
sock->nchildren = 0;
|
|
|
|
}
|
2020-02-28 11:57:51 +01:00
|
|
|
if (sock->statsindex != NULL) {
|
2020-02-28 13:58:13 +01:00
|
|
|
isc__nm_decstats(sock->mgr, sock->statsindex[STATID_ACTIVE]);
|
2020-02-28 11:57:51 +01:00
|
|
|
}
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2020-06-04 23:13:54 -07:00
|
|
|
sock->tcphandle = NULL;
|
|
|
|
|
|
|
|
if (sock->outerhandle != NULL) {
|
|
|
|
isc_nmhandle_unref(sock->outerhandle);
|
|
|
|
sock->outerhandle = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sock->outer != NULL) {
|
|
|
|
isc__nmsocket_detach(&sock->outer);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
while ((handle = isc_astack_pop(sock->inactivehandles)) != NULL) {
|
|
|
|
nmhandle_free(sock, handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sock->buf != NULL) {
|
2019-11-08 10:52:49 -08:00
|
|
|
isc_mem_free(sock->mgr->mctx, sock->buf);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (sock->quota != NULL) {
|
|
|
|
isc_quota_detach(&sock->quota);
|
|
|
|
}
|
|
|
|
|
2019-11-22 13:19:45 +01:00
|
|
|
sock->pquota = NULL;
|
|
|
|
|
2019-11-19 11:56:00 +01:00
|
|
|
if (sock->timer_initialized) {
|
|
|
|
sock->timer_initialized = false;
|
2020-01-15 14:53:01 +01:00
|
|
|
/* We might be in timer callback */
|
2020-02-12 13:59:18 +01:00
|
|
|
if (!uv_is_closing((uv_handle_t *)&sock->timer)) {
|
2020-01-15 14:53:01 +01:00
|
|
|
uv_timer_stop(&sock->timer);
|
|
|
|
uv_close((uv_handle_t *)&sock->timer, NULL);
|
|
|
|
}
|
2019-11-19 11:56:00 +01:00
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_astack_destroy(sock->inactivehandles);
|
|
|
|
|
|
|
|
while ((uvreq = isc_astack_pop(sock->inactivereqs)) != NULL) {
|
2019-11-21 17:08:06 -08:00
|
|
|
isc_mempool_put(sock->mgr->reqpool, uvreq);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
isc_astack_destroy(sock->inactivereqs);
|
2020-05-28 14:47:04 +02:00
|
|
|
sock->magic = 0;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
isc_mem_free(sock->mgr->mctx, sock->ah_frees);
|
|
|
|
isc_mem_free(sock->mgr->mctx, sock->ah_handles);
|
2020-05-28 12:34:37 +02:00
|
|
|
isc_mutex_destroy(&sock->lock);
|
|
|
|
isc_condition_destroy(&sock->cond);
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
if (dofree) {
|
|
|
|
isc_nm_t *mgr = sock->mgr;
|
|
|
|
isc_mem_put(mgr->mctx, sock, sizeof(*sock));
|
|
|
|
isc_nm_detach(&mgr);
|
|
|
|
} else {
|
|
|
|
isc_nm_detach(&sock->mgr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-02-13 14:44:37 -08:00
|
|
|
nmsocket_maybe_destroy(isc_nmsocket_t *sock) {
|
|
|
|
int active_handles;
|
2019-11-05 13:55:54 -08:00
|
|
|
bool destroy = false;
|
|
|
|
|
|
|
|
if (sock->parent != NULL) {
|
|
|
|
/*
|
|
|
|
* This is a child socket and cannot be destroyed except
|
|
|
|
* as a side effect of destroying the parent, so let's go
|
|
|
|
* see if the parent is ready to be destroyed.
|
|
|
|
*/
|
|
|
|
nmsocket_maybe_destroy(sock->parent);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is a parent socket (or a standalone). See whether the
|
|
|
|
* children have active handles before deciding whether to
|
|
|
|
* accept destruction.
|
|
|
|
*/
|
|
|
|
LOCK(&sock->lock);
|
2019-12-08 22:44:08 +01:00
|
|
|
if (atomic_load(&sock->active) || atomic_load(&sock->destroying) ||
|
2020-02-13 14:44:37 -08:00
|
|
|
!atomic_load(&sock->closed) || atomic_load(&sock->references) != 0)
|
|
|
|
{
|
2019-12-08 22:44:08 +01:00
|
|
|
UNLOCK(&sock->lock);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
active_handles = atomic_load(&sock->ah);
|
2019-11-05 13:55:54 -08:00
|
|
|
if (sock->children != NULL) {
|
|
|
|
for (int i = 0; i < sock->nchildren; i++) {
|
|
|
|
LOCK(&sock->children[i].lock);
|
2019-11-19 11:56:00 +01:00
|
|
|
active_handles += atomic_load(&sock->children[i].ah);
|
2019-11-05 13:55:54 -08:00
|
|
|
UNLOCK(&sock->children[i].lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-09 10:49:37 -08:00
|
|
|
if (active_handles == 0 || sock->tcphandle != NULL) {
|
2019-11-05 13:55:54 -08:00
|
|
|
destroy = true;
|
|
|
|
}
|
|
|
|
UNLOCK(&sock->lock);
|
|
|
|
|
|
|
|
if (destroy) {
|
|
|
|
nmsocket_cleanup(sock, true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc__nmsocket_prep_destroy(isc_nmsocket_t *sock) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(sock->parent == NULL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The final external reference to the socket is gone. We can try
|
|
|
|
* destroying the socket, but we have to wait for all the inflight
|
|
|
|
* handles to finish first.
|
|
|
|
*/
|
|
|
|
atomic_store(&sock->active, false);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the socket has children, they'll need to be marked inactive
|
|
|
|
* so they can be cleaned up too.
|
|
|
|
*/
|
|
|
|
if (sock->children != NULL) {
|
|
|
|
for (int i = 0; i < sock->nchildren; i++) {
|
|
|
|
atomic_store(&sock->children[i].active, false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we're here then we already stopped listening; otherwise
|
|
|
|
* we'd have a hanging reference from the listening process.
|
|
|
|
*
|
|
|
|
* If it's a regular socket we may need to close it.
|
|
|
|
*/
|
|
|
|
if (!atomic_load(&sock->closed)) {
|
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_tcpsocket:
|
|
|
|
isc__nm_tcp_close(sock);
|
|
|
|
break;
|
|
|
|
case isc_nm_tcpdnssocket:
|
|
|
|
isc__nm_tcpdns_close(sock);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
nmsocket_maybe_destroy(sock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-06-04 14:54:36 -07:00
|
|
|
isc__nmsocket_detach(isc_nmsocket_t **sockp) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(sockp != NULL && *sockp != NULL);
|
|
|
|
REQUIRE(VALID_NMSOCK(*sockp));
|
|
|
|
|
|
|
|
isc_nmsocket_t *sock = *sockp, *rsock = NULL;
|
|
|
|
*sockp = NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the socket is a part of a set (a child socket) we are
|
|
|
|
* counting references for the whole set at the parent.
|
|
|
|
*/
|
|
|
|
if (sock->parent != NULL) {
|
|
|
|
rsock = sock->parent;
|
|
|
|
INSIST(rsock->parent == NULL); /* Sanity check */
|
|
|
|
} else {
|
|
|
|
rsock = sock;
|
|
|
|
}
|
|
|
|
|
2020-01-14 09:43:37 +01:00
|
|
|
if (isc_refcount_decrement(&rsock->references) == 1) {
|
2019-11-05 13:55:54 -08:00
|
|
|
isc__nmsocket_prep_destroy(rsock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-04 14:54:36 -07:00
|
|
|
void
|
|
|
|
isc_nmsocket_close(isc_nmsocket_t **sockp) {
|
|
|
|
REQUIRE(sockp != NULL);
|
|
|
|
REQUIRE(VALID_NMSOCK(*sockp));
|
|
|
|
REQUIRE((*sockp)->type == isc_nm_udplistener ||
|
|
|
|
(*sockp)->type == isc_nm_tcplistener ||
|
|
|
|
(*sockp)->type == isc_nm_tcpdnslistener);
|
|
|
|
|
|
|
|
isc__nmsocket_detach(sockp);
|
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
void
|
2020-02-12 13:59:18 +01:00
|
|
|
isc__nmsocket_init(isc_nmsocket_t *sock, isc_nm_t *mgr, isc_nmsocket_type type,
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nmiface_t *iface) {
|
2020-01-05 01:02:12 -08:00
|
|
|
uint16_t family;
|
|
|
|
|
|
|
|
REQUIRE(sock != NULL);
|
|
|
|
REQUIRE(mgr != NULL);
|
2020-02-12 13:59:18 +01:00
|
|
|
REQUIRE(iface != NULL);
|
2020-01-05 01:02:12 -08:00
|
|
|
|
|
|
|
family = iface->addr.type.sa.sa_family;
|
|
|
|
|
2020-01-29 15:16:02 +01:00
|
|
|
*sock = (isc_nmsocket_t){ .type = type,
|
|
|
|
.iface = iface,
|
|
|
|
.fd = -1,
|
|
|
|
.ah_size = 32,
|
|
|
|
.inactivehandles = isc_astack_new(
|
|
|
|
mgr->mctx, ISC_NM_HANDLES_STACK_SIZE),
|
|
|
|
.inactivereqs = isc_astack_new(
|
|
|
|
mgr->mctx, ISC_NM_REQS_STACK_SIZE) };
|
2020-01-05 01:02:12 -08:00
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_nm_attach(mgr, &sock->mgr);
|
|
|
|
sock->uv_handle.handle.data = sock;
|
|
|
|
|
2020-02-13 14:44:37 -08:00
|
|
|
sock->ah_frees = isc_mem_allocate(mgr->mctx,
|
|
|
|
sock->ah_size * sizeof(size_t));
|
2020-02-12 13:59:18 +01:00
|
|
|
sock->ah_handles = isc_mem_allocate(
|
|
|
|
mgr->mctx, sock->ah_size * sizeof(isc_nmhandle_t *));
|
2020-03-24 13:38:51 +01:00
|
|
|
ISC_LINK_INIT(&sock->quotacb, link);
|
2019-11-05 13:55:54 -08:00
|
|
|
for (size_t i = 0; i < 32; i++) {
|
|
|
|
sock->ah_frees[i] = i;
|
|
|
|
sock->ah_handles[i] = NULL;
|
|
|
|
}
|
|
|
|
|
2020-01-05 01:02:12 -08:00
|
|
|
switch (type) {
|
|
|
|
case isc_nm_udpsocket:
|
|
|
|
case isc_nm_udplistener:
|
|
|
|
if (family == AF_INET) {
|
|
|
|
sock->statsindex = udp4statsindex;
|
|
|
|
} else {
|
|
|
|
sock->statsindex = udp6statsindex;
|
|
|
|
}
|
2020-01-06 20:26:47 -08:00
|
|
|
isc__nm_incstats(sock->mgr, sock->statsindex[STATID_ACTIVE]);
|
2020-01-05 01:02:12 -08:00
|
|
|
break;
|
|
|
|
case isc_nm_tcpsocket:
|
|
|
|
case isc_nm_tcplistener:
|
|
|
|
if (family == AF_INET) {
|
|
|
|
sock->statsindex = tcp4statsindex;
|
|
|
|
} else {
|
|
|
|
sock->statsindex = tcp6statsindex;
|
|
|
|
}
|
2020-01-06 20:26:47 -08:00
|
|
|
isc__nm_incstats(sock->mgr, sock->statsindex[STATID_ACTIVE]);
|
2020-02-05 15:50:29 +11:00
|
|
|
break;
|
2020-01-05 01:02:12 -08:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_mutex_init(&sock->lock);
|
|
|
|
isc_condition_init(&sock->cond);
|
|
|
|
isc_refcount_init(&sock->references, 1);
|
2019-11-08 10:52:49 -08:00
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
atomic_init(&sock->active, true);
|
2019-11-08 10:52:49 -08:00
|
|
|
atomic_init(&sock->sequential, false);
|
|
|
|
atomic_init(&sock->overlimit, false);
|
|
|
|
atomic_init(&sock->processing, false);
|
|
|
|
atomic_init(&sock->readpaused, false);
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
sock->magic = NMSOCK_MAGIC;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc__nm_alloc_cb(uv_handle_t *handle, size_t size, uv_buf_t *buf) {
|
|
|
|
isc_nmsocket_t *sock = uv_handle_get_data(handle);
|
2019-11-05 13:55:54 -08:00
|
|
|
isc__networker_t *worker = NULL;
|
|
|
|
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
REQUIRE(isc__nm_in_netthread());
|
2020-01-29 13:16:04 +01:00
|
|
|
REQUIRE(size <= ISC_NETMGR_RECVBUF_SIZE);
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
worker = &sock->mgr->workers[sock->tid];
|
2019-11-15 13:22:13 -08:00
|
|
|
INSIST(!worker->recvbuf_inuse);
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2019-11-15 13:22:13 -08:00
|
|
|
buf->base = worker->recvbuf;
|
|
|
|
worker->recvbuf_inuse = true;
|
2020-01-29 13:16:04 +01:00
|
|
|
buf->len = ISC_NETMGR_RECVBUF_SIZE;
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc__nm_free_uvbuf(isc_nmsocket_t *sock, const uv_buf_t *buf) {
|
2019-11-05 13:55:54 -08:00
|
|
|
isc__networker_t *worker = NULL;
|
|
|
|
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
2019-11-08 10:52:49 -08:00
|
|
|
if (buf->base == NULL) {
|
|
|
|
/* Empty buffer: might happen in case of error. */
|
|
|
|
return;
|
|
|
|
}
|
2019-11-05 13:55:54 -08:00
|
|
|
worker = &sock->mgr->workers[sock->tid];
|
|
|
|
|
2019-11-15 13:22:13 -08:00
|
|
|
REQUIRE(worker->recvbuf_inuse);
|
2020-01-29 13:16:04 +01:00
|
|
|
if (buf->base > worker->recvbuf &&
|
|
|
|
buf->base <= worker->recvbuf + ISC_NETMGR_RECVBUF_SIZE)
|
|
|
|
{
|
2020-04-29 15:19:32 +02:00
|
|
|
/* Can happen in case of out-of-order recvmmsg in libuv1.36 */
|
2020-01-29 13:16:04 +01:00
|
|
|
return;
|
|
|
|
}
|
2019-11-15 13:22:13 -08:00
|
|
|
REQUIRE(buf->base == worker->recvbuf);
|
|
|
|
worker->recvbuf_inuse = false;
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static isc_nmhandle_t *
|
2020-02-13 14:44:37 -08:00
|
|
|
alloc_handle(isc_nmsocket_t *sock) {
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_nmhandle_t *handle =
|
|
|
|
isc_mem_get(sock->mgr->mctx,
|
|
|
|
sizeof(isc_nmhandle_t) + sock->extrahandlesize);
|
|
|
|
|
2020-02-12 13:59:18 +01:00
|
|
|
*handle = (isc_nmhandle_t){ .magic = NMHANDLE_MAGIC };
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_refcount_init(&handle->references, 1);
|
|
|
|
|
|
|
|
return (handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
isc_nmhandle_t *
|
|
|
|
isc__nmhandle_get(isc_nmsocket_t *sock, isc_sockaddr_t *peer,
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_sockaddr_t *local) {
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_nmhandle_t *handle = NULL;
|
2020-02-13 14:44:37 -08:00
|
|
|
size_t handlenum;
|
|
|
|
int pos;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
|
|
|
|
handle = isc_astack_pop(sock->inactivehandles);
|
|
|
|
|
|
|
|
if (handle == NULL) {
|
|
|
|
handle = alloc_handle(sock);
|
|
|
|
} else {
|
|
|
|
INSIST(VALID_NMHANDLE(handle));
|
2020-01-14 09:43:37 +01:00
|
|
|
isc_refcount_increment0(&handle->references);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2020-06-04 23:13:54 -07:00
|
|
|
isc__nmsocket_attach(sock, &handle->sock);
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
if (peer != NULL) {
|
|
|
|
memcpy(&handle->peer, peer, sizeof(isc_sockaddr_t));
|
|
|
|
} else {
|
|
|
|
memcpy(&handle->peer, &sock->peer, sizeof(isc_sockaddr_t));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (local != NULL) {
|
|
|
|
memcpy(&handle->local, local, sizeof(isc_sockaddr_t));
|
|
|
|
} else if (sock->iface != NULL) {
|
|
|
|
memcpy(&handle->local, &sock->iface->addr,
|
|
|
|
sizeof(isc_sockaddr_t));
|
|
|
|
} else {
|
|
|
|
INSIST(0);
|
|
|
|
ISC_UNREACHABLE();
|
|
|
|
}
|
|
|
|
|
|
|
|
LOCK(&sock->lock);
|
|
|
|
/* We need to add this handle to the list of active handles */
|
2020-02-12 13:59:18 +01:00
|
|
|
if ((size_t)atomic_load(&sock->ah) == sock->ah_size) {
|
2019-11-05 13:55:54 -08:00
|
|
|
sock->ah_frees =
|
|
|
|
isc_mem_reallocate(sock->mgr->mctx, sock->ah_frees,
|
2020-02-12 13:59:18 +01:00
|
|
|
sock->ah_size * 2 * sizeof(size_t));
|
|
|
|
sock->ah_handles = isc_mem_reallocate(
|
|
|
|
sock->mgr->mctx, sock->ah_handles,
|
|
|
|
sock->ah_size * 2 * sizeof(isc_nmhandle_t *));
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
for (size_t i = sock->ah_size; i < sock->ah_size * 2; i++) {
|
|
|
|
sock->ah_frees[i] = i;
|
|
|
|
sock->ah_handles[i] = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
sock->ah_size *= 2;
|
|
|
|
}
|
|
|
|
|
2019-11-19 11:56:00 +01:00
|
|
|
handlenum = atomic_fetch_add(&sock->ah, 1);
|
|
|
|
pos = sock->ah_frees[handlenum];
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
INSIST(sock->ah_handles[pos] == NULL);
|
|
|
|
sock->ah_handles[pos] = handle;
|
|
|
|
handle->ah_pos = pos;
|
|
|
|
UNLOCK(&sock->lock);
|
|
|
|
|
|
|
|
if (sock->type == isc_nm_tcpsocket) {
|
|
|
|
INSIST(sock->tcphandle == NULL);
|
|
|
|
sock->tcphandle = handle;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nmhandle_ref(isc_nmhandle_t *handle) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
2020-01-14 09:43:37 +01:00
|
|
|
isc_refcount_increment(&handle->references);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nmhandle_is_stream(isc_nmhandle_t *handle) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
|
|
|
return (handle->sock->type == isc_nm_tcpsocket ||
|
2020-02-12 13:59:18 +01:00
|
|
|
handle->sock->type == isc_nm_tcpdnssocket);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-02-13 14:44:37 -08:00
|
|
|
nmhandle_free(isc_nmsocket_t *sock, isc_nmhandle_t *handle) {
|
2019-11-05 13:55:54 -08:00
|
|
|
size_t extra = sock->extrahandlesize;
|
|
|
|
|
2019-12-10 11:09:56 +01:00
|
|
|
isc_refcount_destroy(&handle->references);
|
|
|
|
|
2019-11-08 10:52:49 -08:00
|
|
|
if (handle->dofree != NULL) {
|
2019-11-05 13:55:54 -08:00
|
|
|
handle->dofree(handle->opaque);
|
|
|
|
}
|
|
|
|
|
2020-02-12 13:59:18 +01:00
|
|
|
*handle = (isc_nmhandle_t){ .magic = 0 };
|
2019-11-19 11:56:00 +01:00
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
isc_mem_put(sock->mgr->mctx, handle, sizeof(isc_nmhandle_t) + extra);
|
|
|
|
}
|
|
|
|
|
2019-12-08 22:44:08 +01:00
|
|
|
static void
|
2020-02-13 14:44:37 -08:00
|
|
|
nmhandle_deactivate(isc_nmsocket_t *sock, isc_nmhandle_t *handle) {
|
2019-12-16 18:24:55 -08:00
|
|
|
size_t handlenum;
|
|
|
|
bool reuse = false;
|
|
|
|
|
2019-12-08 22:44:08 +01:00
|
|
|
/*
|
|
|
|
* We do all of this under lock to avoid races with socket
|
|
|
|
* destruction. We have to do this now, because at this point the
|
|
|
|
* socket is either unused or still attached to event->sock.
|
|
|
|
*/
|
|
|
|
LOCK(&sock->lock);
|
|
|
|
|
|
|
|
INSIST(sock->ah_handles[handle->ah_pos] == handle);
|
|
|
|
INSIST(sock->ah_size > handle->ah_pos);
|
|
|
|
INSIST(atomic_load(&sock->ah) > 0);
|
|
|
|
|
|
|
|
sock->ah_handles[handle->ah_pos] = NULL;
|
2019-12-16 18:24:55 -08:00
|
|
|
handlenum = atomic_fetch_sub(&sock->ah, 1) - 1;
|
2019-12-08 22:44:08 +01:00
|
|
|
sock->ah_frees[handlenum] = handle->ah_pos;
|
|
|
|
handle->ah_pos = 0;
|
|
|
|
if (atomic_load(&sock->active)) {
|
2020-02-12 13:59:18 +01:00
|
|
|
reuse = isc_astack_trypush(sock->inactivehandles, handle);
|
2019-12-08 22:44:08 +01:00
|
|
|
}
|
|
|
|
if (!reuse) {
|
|
|
|
nmhandle_free(sock, handle);
|
|
|
|
}
|
|
|
|
UNLOCK(&sock->lock);
|
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nmhandle_unref(isc_nmhandle_t *handle) {
|
2020-06-04 23:13:54 -07:00
|
|
|
isc_nmsocket_t *sock = NULL;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
2020-01-14 09:43:37 +01:00
|
|
|
if (isc_refcount_decrement(&handle->references) > 1) {
|
2019-11-19 11:56:00 +01:00
|
|
|
return;
|
|
|
|
}
|
2020-06-11 13:01:26 +02:00
|
|
|
/* We need an acquire memory barrier here */
|
|
|
|
(void)isc_refcount_current(&handle->references);
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2019-11-19 11:56:00 +01:00
|
|
|
sock = handle->sock;
|
|
|
|
handle->sock = NULL;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2019-11-19 11:56:00 +01:00
|
|
|
if (handle->doreset != NULL) {
|
|
|
|
handle->doreset(handle->opaque);
|
|
|
|
}
|
2019-11-05 13:55:54 -08:00
|
|
|
|
2020-03-26 14:25:06 +01:00
|
|
|
nmhandle_deactivate(sock, handle);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The handle is gone now. If the socket has a callback configured
|
2019-12-06 20:45:00 +01:00
|
|
|
* for that (e.g., to perform cleanup after request processing),
|
2019-12-06 22:25:52 +01:00
|
|
|
* call it now, or schedule it to run asynchronously.
|
2019-12-06 20:45:00 +01:00
|
|
|
*/
|
|
|
|
if (sock->closehandle_cb != NULL) {
|
|
|
|
if (sock->tid == isc_nm_tid()) {
|
|
|
|
sock->closehandle_cb(sock);
|
|
|
|
} else {
|
2020-02-12 13:59:18 +01:00
|
|
|
isc__netievent_closecb_t *event = isc__nm_get_ievent(
|
|
|
|
sock->mgr, netievent_closecb);
|
2020-03-26 14:25:06 +01:00
|
|
|
/*
|
|
|
|
* The socket will be finally detached by the closecb
|
|
|
|
* event handler.
|
|
|
|
*/
|
2020-06-04 14:54:36 -07:00
|
|
|
isc__nmsocket_attach(sock, &event->sock);
|
2019-12-06 20:45:00 +01:00
|
|
|
isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid],
|
2020-02-12 13:59:18 +01:00
|
|
|
(isc__netievent_t *)event);
|
2019-12-06 20:45:00 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-04 23:13:54 -07:00
|
|
|
isc__nmsocket_detach(&sock);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nmhandle_getdata(isc_nmhandle_t *handle) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
|
|
|
return (handle->opaque);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc_nmhandle_setdata(isc_nmhandle_t *handle, void *arg,
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nm_opaquecb_t doreset, isc_nm_opaquecb_t dofree) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
|
|
|
handle->opaque = arg;
|
|
|
|
handle->doreset = doreset;
|
|
|
|
handle->dofree = dofree;
|
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nmhandle_getextra(isc_nmhandle_t *handle) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
|
|
|
return (handle->extra);
|
|
|
|
}
|
|
|
|
|
|
|
|
isc_sockaddr_t
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nmhandle_peeraddr(isc_nmhandle_t *handle) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
|
|
|
return (handle->peer);
|
|
|
|
}
|
|
|
|
|
|
|
|
isc_sockaddr_t
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nmhandle_localaddr(isc_nmhandle_t *handle) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
|
|
|
return (handle->local);
|
|
|
|
}
|
|
|
|
|
2019-11-20 22:33:35 +01:00
|
|
|
isc_nm_t *
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nmhandle_netmgr(isc_nmhandle_t *handle) {
|
2019-11-20 22:33:35 +01:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
REQUIRE(VALID_NMSOCK(handle->sock));
|
|
|
|
|
|
|
|
return (handle->sock->mgr);
|
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
isc__nm_uvreq_t *
|
2020-02-13 14:44:37 -08:00
|
|
|
isc__nm_uvreq_get(isc_nm_t *mgr, isc_nmsocket_t *sock) {
|
2019-11-05 13:55:54 -08:00
|
|
|
isc__nm_uvreq_t *req = NULL;
|
|
|
|
|
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
|
|
|
|
if (sock != NULL && atomic_load(&sock->active)) {
|
|
|
|
/* Try to reuse one */
|
|
|
|
req = isc_astack_pop(sock->inactivereqs);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (req == NULL) {
|
2019-11-21 17:08:06 -08:00
|
|
|
req = isc_mempool_get(mgr->reqpool);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
2020-02-12 13:59:18 +01:00
|
|
|
*req = (isc__nm_uvreq_t){ .magic = 0 };
|
2019-11-05 13:55:54 -08:00
|
|
|
req->uv_req.req.data = req;
|
2020-06-04 14:54:36 -07:00
|
|
|
isc__nmsocket_attach(sock, &req->sock);
|
2019-11-05 13:55:54 -08:00
|
|
|
req->magic = UVREQ_MAGIC;
|
|
|
|
|
|
|
|
return (req);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc__nm_uvreq_put(isc__nm_uvreq_t **req0, isc_nmsocket_t *sock) {
|
2019-11-05 13:55:54 -08:00
|
|
|
isc__nm_uvreq_t *req = NULL;
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nmhandle_t *handle = NULL;
|
2019-11-05 13:55:54 -08:00
|
|
|
|
|
|
|
REQUIRE(req0 != NULL);
|
|
|
|
REQUIRE(VALID_UVREQ(*req0));
|
|
|
|
|
|
|
|
req = *req0;
|
|
|
|
*req0 = NULL;
|
|
|
|
|
|
|
|
INSIST(sock == req->sock);
|
|
|
|
|
|
|
|
req->magic = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We need to save this first to make sure that handle,
|
|
|
|
* sock, and the netmgr won't all disappear.
|
|
|
|
*/
|
|
|
|
handle = req->handle;
|
|
|
|
req->handle = NULL;
|
|
|
|
|
|
|
|
if (!atomic_load(&sock->active) ||
|
2020-02-12 13:59:18 +01:00
|
|
|
!isc_astack_trypush(sock->inactivereqs, req)) {
|
2019-11-21 17:08:06 -08:00
|
|
|
isc_mempool_put(sock->mgr->reqpool, req);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (handle != NULL) {
|
|
|
|
isc_nmhandle_unref(handle);
|
|
|
|
}
|
|
|
|
|
2020-06-04 14:54:36 -07:00
|
|
|
isc__nmsocket_detach(&sock);
|
2019-11-05 13:55:54 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
isc_result_t
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_nm_send(isc_nmhandle_t *handle, isc_region_t *region, isc_nm_cb_t cb,
|
2020-02-13 14:44:37 -08:00
|
|
|
void *cbarg) {
|
2019-11-05 13:55:54 -08:00
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
|
|
|
switch (handle->sock->type) {
|
|
|
|
case isc_nm_udpsocket:
|
|
|
|
case isc_nm_udplistener:
|
|
|
|
return (isc__nm_udp_send(handle, region, cb, cbarg));
|
|
|
|
case isc_nm_tcpsocket:
|
|
|
|
return (isc__nm_tcp_send(handle, region, cb, cbarg));
|
|
|
|
case isc_nm_tcpdnssocket:
|
|
|
|
return (isc__nm_tcpdns_send(handle, region, cb, cbarg));
|
|
|
|
default:
|
|
|
|
INSIST(0);
|
|
|
|
ISC_UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-20 11:55:10 +01:00
|
|
|
isc_result_t
|
|
|
|
isc_nm_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg) {
|
|
|
|
REQUIRE(VALID_NMHANDLE(handle));
|
|
|
|
|
|
|
|
switch (handle->sock->type) {
|
|
|
|
case isc_nm_tcpsocket:
|
|
|
|
return (isc__nm_tcp_read(handle, cb, cbarg));
|
|
|
|
default:
|
|
|
|
INSIST(0);
|
|
|
|
ISC_UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
isc_result_t
|
|
|
|
isc_nm_pauseread(isc_nmsocket_t *sock) {
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_tcpsocket:
|
|
|
|
return (isc__nm_tcp_pauseread(sock));
|
|
|
|
default:
|
|
|
|
INSIST(0);
|
|
|
|
ISC_UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
isc_result_t
|
|
|
|
isc_nm_resumeread(isc_nmsocket_t *sock) {
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_tcpsocket:
|
|
|
|
return (isc__nm_tcp_resumeread(sock));
|
|
|
|
default:
|
|
|
|
INSIST(0);
|
|
|
|
ISC_UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc_nm_stoplistening(isc_nmsocket_t *sock) {
|
|
|
|
REQUIRE(VALID_NMSOCK(sock));
|
|
|
|
switch (sock->type) {
|
|
|
|
case isc_nm_udplistener:
|
|
|
|
isc__nm_udp_stoplistening(sock);
|
|
|
|
break;
|
|
|
|
case isc_nm_tcpdnslistener:
|
|
|
|
isc__nm_tcpdns_stoplistening(sock);
|
|
|
|
break;
|
|
|
|
case isc_nm_tcplistener:
|
|
|
|
isc__nm_tcp_stoplistening(sock);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
INSIST(0);
|
|
|
|
ISC_UNREACHABLE();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-19 11:56:00 +01:00
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc__nm_async_closecb(isc__networker_t *worker, isc__netievent_t *ev0) {
|
2020-02-12 13:59:18 +01:00
|
|
|
isc__netievent_closecb_t *ievent = (isc__netievent_closecb_t *)ev0;
|
2019-11-19 11:56:00 +01:00
|
|
|
|
|
|
|
REQUIRE(VALID_NMSOCK(ievent->sock));
|
|
|
|
REQUIRE(ievent->sock->tid == isc_nm_tid());
|
|
|
|
REQUIRE(ievent->sock->closehandle_cb != NULL);
|
|
|
|
|
|
|
|
UNUSED(worker);
|
|
|
|
|
|
|
|
ievent->sock->closehandle_cb(ievent->sock);
|
2020-06-04 14:54:36 -07:00
|
|
|
isc__nmsocket_detach(&ievent->sock);
|
2019-11-19 11:56:00 +01:00
|
|
|
}
|
|
|
|
|
2019-11-22 14:13:19 +01:00
|
|
|
static void
|
2020-02-13 14:44:37 -08:00
|
|
|
shutdown_walk_cb(uv_handle_t *handle, void *arg) {
|
2019-11-22 14:13:19 +01:00
|
|
|
UNUSED(arg);
|
|
|
|
|
2020-02-12 13:59:18 +01:00
|
|
|
switch (handle->type) {
|
2019-11-22 14:13:19 +01:00
|
|
|
case UV_TCP:
|
2019-12-02 11:19:55 +01:00
|
|
|
isc__nm_tcp_shutdown(uv_handle_get_data(handle));
|
2019-11-22 14:13:19 +01:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc__nm_async_shutdown(isc__networker_t *worker, isc__netievent_t *ev0) {
|
2019-12-09 12:24:46 -08:00
|
|
|
UNUSED(ev0);
|
2019-11-22 14:13:19 +01:00
|
|
|
uv_walk(&worker->loop, shutdown_walk_cb, NULL);
|
|
|
|
}
|
|
|
|
|
2019-11-05 13:55:54 -08:00
|
|
|
bool
|
2020-02-13 14:44:37 -08:00
|
|
|
isc__nm_acquire_interlocked(isc_nm_t *mgr) {
|
2019-11-05 13:55:54 -08:00
|
|
|
LOCK(&mgr->lock);
|
|
|
|
bool success = atomic_compare_exchange_strong(&mgr->interlocked,
|
2020-02-12 13:59:18 +01:00
|
|
|
&(bool){ false }, true);
|
2019-11-05 13:55:54 -08:00
|
|
|
UNLOCK(&mgr->lock);
|
|
|
|
return (success);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc__nm_drop_interlocked(isc_nm_t *mgr) {
|
2019-11-05 13:55:54 -08:00
|
|
|
LOCK(&mgr->lock);
|
|
|
|
bool success = atomic_compare_exchange_strong(&mgr->interlocked,
|
2020-02-12 13:59:18 +01:00
|
|
|
&(bool){ true }, false);
|
2020-03-30 13:47:58 -07:00
|
|
|
INSIST(success);
|
2019-11-05 13:55:54 -08:00
|
|
|
BROADCAST(&mgr->wkstatecond);
|
|
|
|
UNLOCK(&mgr->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc__nm_acquire_interlocked_force(isc_nm_t *mgr) {
|
2019-11-05 13:55:54 -08:00
|
|
|
LOCK(&mgr->lock);
|
|
|
|
while (!atomic_compare_exchange_strong(&mgr->interlocked,
|
2020-02-13 14:44:37 -08:00
|
|
|
&(bool){ false }, true))
|
|
|
|
{
|
2019-11-05 13:55:54 -08:00
|
|
|
WAIT(&mgr->wkstatecond, &mgr->lock);
|
|
|
|
}
|
|
|
|
UNLOCK(&mgr->lock);
|
|
|
|
}
|
2020-01-05 01:02:12 -08:00
|
|
|
|
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_nm_setstats(isc_nm_t *mgr, isc_stats_t *stats) {
|
2020-01-05 01:02:12 -08:00
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
REQUIRE(mgr->stats == NULL);
|
|
|
|
REQUIRE(isc_stats_ncounters(stats) == isc_sockstatscounter_max);
|
|
|
|
|
|
|
|
isc_stats_attach(stats, &mgr->stats);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc__nm_incstats(isc_nm_t *mgr, isc_statscounter_t counterid) {
|
2020-01-05 01:02:12 -08:00
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
REQUIRE(counterid != -1);
|
|
|
|
|
|
|
|
if (mgr->stats != NULL) {
|
|
|
|
isc_stats_increment(mgr->stats, counterid);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc__nm_decstats(isc_nm_t *mgr, isc_statscounter_t counterid) {
|
2020-01-05 01:02:12 -08:00
|
|
|
REQUIRE(VALID_NM(mgr));
|
|
|
|
REQUIRE(counterid != -1);
|
|
|
|
|
|
|
|
if (mgr->stats != NULL) {
|
|
|
|
isc_stats_decrement(mgr->stats, counterid);
|
|
|
|
}
|
|
|
|
}
|