From b69e783164cd50e3306364668558e460617ee8fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Sur=C3=BD?= Date: Tue, 26 Jul 2022 13:03:45 +0200 Subject: [PATCH] Update netmgr, tasks, and applications to use isc_loopmgr Previously: * applications were using isc_app as the base unit for running the application and signal handling. * networking was handled in the netmgr layer, which would start a number of threads, each with a uv_loop event loop. * task/event handling was done in the isc_task unit, which used netmgr event loops to run the isc_event calls. In this refactoring: * the network manager now uses isc_loop instead of maintaining its own worker threads and event loops. * the taskmgr that manages isc_task instances now also uses isc_loopmgr, and every isc_task runs on a specific isc_loop bound to the specific thread. * applications have been updated as necessary to use the new API. * new ISC_LOOP_TEST macros have been added to enable unit tests to run isc_loop event loops. unit tests have been updated to use this where needed. --- bin/delv/delv.c | 189 ++-- bin/dig/dighost.c | 21 +- bin/dig/dighost.h | 1 + bin/dnssec/dnssec-signzone.c | 153 ++- bin/dnssec/dnssec-verify.c | 4 - bin/named/control.c | 6 +- bin/named/controlconf.c | 9 +- bin/named/fuzz.c | 14 +- bin/named/include/named/server.h | 8 +- bin/named/log.c | 5 +- bin/named/main.c | 66 +- bin/named/server.c | 756 ++++++++------ bin/nsupdate/nsupdate.c | 74 +- bin/rndc/rndc.c | 41 +- bin/tests/system/dyndb/driver/Makefile.am | 2 - bin/tests/system/dyndb/driver/lock.c | 81 -- bin/tests/system/dyndb/driver/lock.h | 39 - bin/tests/system/dyndb/driver/zone.c | 7 +- bin/tests/system/pipelined/pipequeries.c | 33 +- bin/tests/system/resolve.c | 134 ++- bin/tests/system/statistics/tests.sh | 2 +- bin/tests/system/tkey/keycreate.c | 32 +- bin/tests/system/tkey/keydelete.c | 31 +- bin/tests/test_client.c | 33 +- bin/tests/test_server.c | 32 +- bin/tools/mdig.c | 52 +- doc/design/zone | 7 - lib/bind9/include/bind9/getaddresses.h | 5 +- lib/dns/adb.c | 2 +- lib/dns/cache.c | 49 +- lib/dns/catz.c | 2 +- lib/dns/client.c | 323 ++---- lib/dns/dnstap.c | 3 +- lib/dns/include/dns/client.h | 10 +- lib/dns/masterdump.c | 15 +- lib/dns/nta.c | 2 +- lib/dns/resolver.c | 2 +- lib/dns/rpz.c | 2 +- lib/dns/view.c | 2 +- lib/dns/zone.c | 6 +- lib/isc/httpd.c | 6 +- lib/isc/include/isc/managers.h | 9 +- lib/isc/include/isc/netmgr.h | 52 +- lib/isc/include/isc/task.h | 194 ++-- lib/isc/include/isc/types.h | 10 +- lib/isc/include/isc/util.h | 5 + lib/isc/log.c | 5 - lib/isc/managers.c | 94 +- lib/isc/netmgr/http.c | 209 ++-- lib/isc/netmgr/netmgr-int.h | 412 +++----- lib/isc/netmgr/netmgr.c | 1132 +++++---------------- lib/isc/netmgr/tcp.c | 451 ++++---- lib/isc/netmgr/tcpdns.c | 406 ++++---- lib/isc/netmgr/timer.c | 11 +- lib/isc/netmgr/tlsdns.c | 462 ++++----- lib/isc/netmgr/tlsstream.c | 123 +-- lib/isc/netmgr/udp.c | 897 ++++++---------- lib/isc/netmgr_p.h | 38 - lib/isc/task.c | 785 +++++++------- lib/isc/task_p.h | 106 -- lib/isc/timer.c | 2 +- lib/ns/client.c | 58 +- lib/ns/include/ns/client.h | 2 +- lib/ns/include/ns/interfacemgr.h | 4 +- lib/ns/interfacemgr.c | 20 +- tests/dns/.gitignore | 1 + tests/dns/dispatch_test.c | 490 ++++----- tests/dns/keytable_test.c | 84 +- tests/dns/resolver_test.c | 54 +- tests/dns/tsig_test.c | 8 + tests/dns/zonemgr_test.c | 84 +- tests/dns/zt_test.c | 288 ++---- tests/include/tests/dns.h | 1 + tests/include/tests/isc.h | 104 +- tests/include/tests/ns.h | 14 +- tests/isc/Makefile.am | 13 - tests/isc/doh_test.c | 959 +++++------------ tests/isc/lex_test.c | 1 - tests/isc/task_test.c | 452 +++----- tests/isccfg/duration_test.c | 1 - tests/isccfg/parser_test.c | 1 - tests/libtest/dns.c | 4 +- tests/libtest/isc.c | 83 +- tests/libtest/ns.c | 97 +- tests/ns/Makefile.am | 8 + tests/ns/listenlist_test.c | 22 +- tests/ns/netmgr_wrap.c | 90 ++ tests/ns/notify_test.c | 22 +- tests/ns/plugin_test.c | 7 +- tests/ns/query_test.c | 62 +- 90 files changed, 4056 insertions(+), 6642 deletions(-) delete mode 100644 bin/tests/system/dyndb/driver/lock.c delete mode 100644 bin/tests/system/dyndb/driver/lock.h delete mode 100644 lib/isc/netmgr_p.h delete mode 100644 lib/isc/task_p.h create mode 100644 tests/ns/netmgr_wrap.c diff --git a/bin/delv/delv.c b/bin/delv/delv.c index 5c3240c574..eb0ad59407 100644 --- a/bin/delv/delv.c +++ b/bin/delv/delv.c @@ -25,7 +25,6 @@ #include #include -#include #include #include #include @@ -81,6 +80,11 @@ char *progname; static isc_mem_t *mctx = NULL; static isc_log_t *lctx = NULL; +/* Managers */ +static isc_nm_t *netmgr = NULL; +static isc_loopmgr_t *loopmgr = NULL; +static isc_taskmgr_t *taskmgr = NULL; + /* Configurables */ static char *server = NULL; static const char *port = "53"; @@ -114,6 +118,9 @@ static int num_keys = 0; static dns_fixedname_t afn; static dns_name_t *anchor_name = NULL; +static dns_master_style_t *style = NULL; +static dns_fixedname_t qfn; + /* Default bind.keys contents */ static char anchortext[] = TRUST_ANCHORS; @@ -417,8 +424,7 @@ print_status(dns_rdataset_t *rdataset) { } static isc_result_t -printdata(dns_rdataset_t *rdataset, dns_name_t *owner, - dns_master_style_t *style) { +printdata(dns_rdataset_t *rdataset, dns_name_t *owner) { isc_result_t result = ISC_R_SUCCESS; static dns_trust_t trust; static bool first = true; @@ -512,11 +518,8 @@ cleanup: } static isc_result_t -setup_style(dns_master_style_t **stylep) { +setup_style(void) { isc_result_t result; - dns_master_style_t *style = NULL; - - REQUIRE(stylep != NULL && *stylep == NULL); styleflags |= DNS_STYLEFLAG_REL_OWNER; if (yaml) { @@ -557,9 +560,6 @@ setup_style(dns_master_style_t **stylep) { 48, 80, 8, splitwidth, mctx); } - if (result == ISC_R_SUCCESS) { - *stylep = style; - } return (result); } @@ -1709,20 +1709,101 @@ get_reverse(char *reverse, size_t len, char *value, bool strict) { } } +static void +resolve_cb(dns_client_t *client, const dns_name_t *query_name, + dns_namelist_t *namelist, isc_result_t result) { + char namestr[DNS_NAME_FORMATSIZE]; + dns_rdataset_t *rdataset; + + if (result != ISC_R_SUCCESS && !yaml) { + delv_log(ISC_LOG_ERROR, "resolution failed: %s", + isc_result_totext(result)); + } + + if (yaml) { + printf("type: DELV_RESULT\n"); + dns_name_format(query_name, namestr, sizeof(namestr)); + printf("query_name: %s\n", namestr); + printf("status: %s\n", isc_result_totext(result)); + printf("records:\n"); + } + + for (dns_name_t *response_name = ISC_LIST_HEAD(*namelist); + response_name != NULL; + response_name = ISC_LIST_NEXT(response_name, link)) + { + for (rdataset = ISC_LIST_HEAD(response_name->list); + rdataset != NULL; rdataset = ISC_LIST_NEXT(rdataset, link)) + { + result = printdata(rdataset, response_name); + if (result != ISC_R_SUCCESS) { + delv_log(ISC_LOG_ERROR, "print data failed"); + } + } + } + + dns_client_freeresanswer(client, namelist); + isc_mem_put(mctx, namelist, sizeof(*namelist)); + + dns_client_detach(&client); + + isc_loopmgr_shutdown(loopmgr); +} + +static void +resolve(void *arg) { + dns_client_t *client = arg; + dns_namelist_t *namelist; + unsigned int resopt; + isc_result_t result; + dns_name_t *query_name; + + namelist = isc_mem_get(mctx, sizeof(*namelist)); + ISC_LIST_INIT(*namelist); + + /* Construct QNAME */ + CHECK(convert_name(&qfn, &query_name, qname)); + + /* Set up resolution options */ + resopt = DNS_CLIENTRESOPT_NOCDFLAG; + if (no_sigs) { + resopt |= DNS_CLIENTRESOPT_NODNSSEC; + } + if (!root_validation) { + resopt |= DNS_CLIENTRESOPT_NOVALIDATE; + } + if (cdflag) { + resopt &= ~DNS_CLIENTRESOPT_NOCDFLAG; + } + if (use_tcp) { + resopt |= DNS_CLIENTRESOPT_TCP; + } + + /* Perform resolution */ + result = dns_client_resolve(client, query_name, dns_rdataclass_in, + qtype, resopt, namelist, resolve_cb); + + if (result != ISC_R_SUCCESS) { + goto cleanup; + } + + return; +cleanup: + if (!yaml) { + delv_log(ISC_LOG_ERROR, "resolution failed: %s", + isc_result_totext(result)); + } + + isc_mem_put(mctx, namelist, sizeof(*namelist)); + isc_loopmgr_shutdown(loopmgr); + + dns_client_detach(&client); +} + int main(int argc, char *argv[]) { dns_client_t *client = NULL; isc_result_t result; - dns_fixedname_t qfn; - dns_name_t *query_name, *response_name; - char namestr[DNS_NAME_FORMATSIZE]; - dns_rdataset_t *rdataset; - dns_namelist_t namelist; - unsigned int resopt; - isc_loopmgr_t *loopmgr = NULL; - isc_nm_t *netmgr = NULL; - isc_taskmgr_t *taskmgr = NULL; - dns_master_style_t *style = NULL; progname = argv[0]; preparse_args(argc, argv); @@ -1730,18 +1811,16 @@ main(int argc, char *argv[]) { argc--; argv++; - isc_mem_create(&mctx); + isc_managers_create(&mctx, 1, &loopmgr, &netmgr, &taskmgr); result = dst_lib_init(mctx, NULL); if (result != ISC_R_SUCCESS) { fatal("dst_lib_init failed: %d", result); } - isc_managers_create(mctx, 1, 0, &loopmgr, &netmgr, &taskmgr); - parse_args(argc, argv); - CHECK(setup_style(&style)); + CHECK(setup_style()); setup_logging(stderr); @@ -1763,55 +1842,9 @@ main(int argc, char *argv[]) { CHECK(setup_dnsseckeys(client)); - /* Construct QNAME */ - CHECK(convert_name(&qfn, &query_name, qname)); + isc_loop_setup(isc_loop_main(loopmgr), resolve, client); - /* Set up resolution options */ - resopt = DNS_CLIENTRESOPT_NOCDFLAG; - if (no_sigs) { - resopt |= DNS_CLIENTRESOPT_NODNSSEC; - } - if (!root_validation) { - resopt |= DNS_CLIENTRESOPT_NOVALIDATE; - } - if (cdflag) { - resopt &= ~DNS_CLIENTRESOPT_NOCDFLAG; - } - if (use_tcp) { - resopt |= DNS_CLIENTRESOPT_TCP; - } - - /* Perform resolution */ - ISC_LIST_INIT(namelist); - result = dns_client_resolve(client, query_name, dns_rdataclass_in, - qtype, resopt, &namelist); - if (result != ISC_R_SUCCESS && !yaml) { - delv_log(ISC_LOG_ERROR, "resolution failed: %s", - isc_result_totext(result)); - } - - if (yaml) { - printf("type: DELV_RESULT\n"); - dns_name_format(query_name, namestr, sizeof(namestr)); - printf("query_name: %s\n", namestr); - printf("status: %s\n", isc_result_totext(result)); - printf("records:\n"); - } - - for (response_name = ISC_LIST_HEAD(namelist); response_name != NULL; - response_name = ISC_LIST_NEXT(response_name, link)) - { - for (rdataset = ISC_LIST_HEAD(response_name->list); - rdataset != NULL; rdataset = ISC_LIST_NEXT(rdataset, link)) - { - result = printdata(rdataset, response_name, style); - if (result != ISC_R_SUCCESS) { - delv_log(ISC_LOG_ERROR, "print data failed"); - } - } - } - - dns_client_freeresanswer(client, &namelist); + isc_loopmgr_run(loopmgr); cleanup: if (trust_anchor != NULL) { @@ -1826,18 +1859,12 @@ cleanup: if (style != NULL) { dns_master_styledestroy(&style, mctx); } - if (client != NULL) { - dns_client_detach(&client); - } - isc_managers_destroy(&loopmgr, &netmgr, &taskmgr); - - if (lctx != NULL) { - isc_log_destroy(&lctx); - } - isc_mem_detach(&mctx); + isc_log_destroy(&lctx); dst_lib_destroy(); + isc_managers_destroy(&mctx, &loopmgr, &netmgr, &taskmgr); + return (0); } diff --git a/bin/dig/dighost.c b/bin/dig/dighost.c index f0771b2d8b..d1e8e5784d 100644 --- a/bin/dig/dighost.c +++ b/bin/dig/dighost.c @@ -53,6 +53,7 @@ #include #include #include +#include #include #include @@ -1359,10 +1360,7 @@ setup_libs(void) { fatal("can't find either v4 or v6 networking"); } - isc_mem_create(&mctx); - isc_mem_setname(mctx, "dig"); - - isc_managers_create(mctx, 1, 0, &loopmgr, &netmgr, &taskmgr); + isc_managers_create(&mctx, 1, &loopmgr, &netmgr, &taskmgr); isc_log_create(mctx, &lctx, &logconfig); isc_log_setcontext(lctx); @@ -1374,6 +1372,7 @@ setup_libs(void) { isc_log_setdebuglevel(lctx, 0); + isc_mem_setname(mctx, "dig"); mainloop = isc_loop_main(loopmgr); result = dst_lib_init(mctx, NULL); @@ -3136,7 +3135,6 @@ udp_ready(isc_nmhandle_t *handle, isc_result_t eresult, void *arg) { debug("udp_ready(%p, %s, %p)", handle, isc_result_totext(eresult), query); - LOCK_LOOKUP; lookup_attach(query->lookup, &l); if (eresult == ISC_R_CANCELED || query->canceled) { @@ -3147,7 +3145,6 @@ udp_ready(isc_nmhandle_t *handle, isc_result_t eresult, void *arg) { query_detach(&query); lookup_detach(&l); clear_current_lookup(); - UNLOCK_LOOKUP; return; } @@ -3168,7 +3165,6 @@ udp_ready(isc_nmhandle_t *handle, isc_result_t eresult, void *arg) { nssearch_next(l, query); check_if_done(); - UNLOCK_LOOKUP; return; } @@ -3203,7 +3199,6 @@ udp_ready(isc_nmhandle_t *handle, isc_result_t eresult, void *arg) { } check_if_done(); - UNLOCK_LOOKUP; return; } @@ -3231,12 +3226,12 @@ udp_ready(isc_nmhandle_t *handle, isc_result_t eresult, void *arg) { query_detach(&query); lookup_detach(&l); - UNLOCK_LOOKUP; } /*% * Send a UDP packet to the remote nameserver, possible starting the - * recv action as well. + * recv action as well. Also make sure that the timer is running and + * is properly reset. */ static void start_udp(dig_query_t *query) { @@ -3563,9 +3558,7 @@ tcp_connected(isc_nmhandle_t *handle, isc_result_t eresult, void *arg) { */ if (l->ns_search_only && !l->trace_root) { nssearch_next(l, query); - check_if_done(); - UNLOCK_LOOKUP; return; } @@ -4347,7 +4340,7 @@ recv_done(isc_nmhandle_t *handle, isc_result_t eresult, isc_region_t *region, * the timeout to much longer, so brief network * outages won't cause the XFR to abort */ - if (timeout != INT_MAX) { + if (timeout != INT_MAX && query->timer != NULL) { unsigned int local_timeout; if (timeout == 0) { @@ -4683,7 +4676,7 @@ destroy_libs(void) { isc_mem_stats(mctx, stderr); } - isc_managers_destroy(&loopmgr, &netmgr, &taskmgr); + isc_managers_destroy(&mctx, &loopmgr, &netmgr, &taskmgr); } #ifdef HAVE_LIBIDN2 diff --git a/bin/dig/dighost.h b/bin/dig/dighost.h index f2ba9239e4..ecc20832a0 100644 --- a/bin/dig/dighost.h +++ b/bin/dig/dighost.h @@ -219,6 +219,7 @@ struct dig_query { isc_time_t time_sent; isc_time_t time_recv; uint64_t byte_count; + isc_timer_t *timer; }; struct dig_server { diff --git a/bin/dnssec/dnssec-signzone.c b/bin/dnssec/dnssec-signzone.c index b63a2b9632..7bf82fdcd9 100644 --- a/bin/dnssec/dnssec-signzone.c +++ b/bin/dnssec/dnssec-signzone.c @@ -33,7 +33,6 @@ #include #include -#include #include #include #include @@ -43,6 +42,8 @@ #include #include #include +#include +#include #include #include #include @@ -57,6 +58,7 @@ #include #include #include +#include #include #include @@ -144,8 +146,8 @@ static unsigned int nsigned = 0, nretained = 0, ndropped = 0; static unsigned int nverified = 0, nverifyfailed = 0; static const char *directory = NULL, *dsdir = NULL; static isc_mutex_t namelock, statslock; -static isc_loopmgr_t *loopmgr = NULL; static isc_nm_t *netmgr = NULL; +static isc_loopmgr_t *loopmgr = NULL; static isc_taskmgr_t *taskmgr = NULL; static dns_db_t *gdb; /* The database */ static dns_dbversion_t *gversion; /* The database version */ @@ -157,7 +159,7 @@ static dns_iterations_t nsec3iter = 0U; static unsigned char saltbuf[255]; static unsigned char *gsalt = saltbuf; static size_t salt_length = 0; -static isc_task_t *main_task = NULL; +static isc_task_t *write_task = NULL; static unsigned int ntasks = 0; static atomic_bool shuttingdown; static atomic_bool finished; @@ -1568,7 +1570,9 @@ signapex(void) { result = dns_dbiterator_current(gdbiter, &node, name); check_dns_dbiterator_current(result); signname(node, name); + LOCK(&namelock); dumpnode(name, node); + UNLOCK(&namelock); cleannode(gdb, gversion, node); dns_db_detachnode(gdb, &node); result = dns_dbiterator_first(gdbiter); @@ -1585,11 +1589,11 @@ signapex(void) { * lock. */ static void -assignwork(isc_task_t *task, isc_task_t *worker) { - dns_fixedname_t *fname; - dns_name_t *name; - dns_dbnode_t *node; - sevent_t *sevent; +assignwork(isc_task_t *task) { + dns_fixedname_t *fname = NULL; + dns_name_t *name = NULL; + dns_dbnode_t *node = NULL; + sevent_t *sevent = NULL; dns_rdataset_t nsec; bool found; isc_result_t result; @@ -1605,8 +1609,8 @@ assignwork(isc_task_t *task, isc_task_t *worker) { if (atomic_load(&finished)) { ended++; if (ended == ntasks) { - isc_task_detach(&task); - isc_app_shutdown(); + isc_task_detach(&write_task); + isc_loopmgr_shutdown(loopmgr); } goto unlock; } @@ -1680,8 +1684,8 @@ assignwork(isc_task_t *task, isc_task_t *worker) { if (!found) { ended++; if (ended == ntasks) { - isc_task_detach(&task); - isc_app_shutdown(); + isc_task_detach(&write_task); + isc_loopmgr_shutdown(loopmgr); } isc_mem_put(mctx, fname, sizeof(dns_fixedname_t)); goto unlock; @@ -1691,7 +1695,7 @@ assignwork(isc_task_t *task, isc_task_t *worker) { sevent->node = node; sevent->fname = fname; - isc_task_send(worker, ISC_EVENT_PTR(&sevent)); + isc_task_send(task, ISC_EVENT_PTR(&sevent)); unlock: UNLOCK(&namelock); } @@ -1700,12 +1704,30 @@ unlock: * Start a worker task */ static void -startworker(isc_task_t *task, isc_event_t *event) { - isc_task_t *worker; +startworker(void *arg) { + isc_task_t **tasks = (isc_task_t **)arg; + isc_result_t result; + int tid; - worker = (isc_task_t *)event->ev_arg; - assignwork(task, worker); - isc_event_free(&event); + REQUIRE(tasks != NULL); + + tid = isc_tid(); + result = isc_task_create(taskmgr, &tasks[tid], tid); + if (result != ISC_R_SUCCESS) { + fatal("failed to create task: %s", isc_result_totext(result)); + } + + assignwork(tasks[tid]); +} + +/*% + * Finish a worker task + */ +static void +workerdone(void *arg) { + isc_task_t **tasks = (isc_task_t **)arg; + + isc_task_detach(&tasks[isc_tid()]); } /*% @@ -1713,15 +1735,15 @@ startworker(isc_task_t *task, isc_event_t *event) { */ static void writenode(isc_task_t *task, isc_event_t *event) { - isc_task_t *worker; sevent_t *sevent = (sevent_t *)event; - worker = (isc_task_t *)event->ev_sender; + LOCK(&namelock); dumpnode(dns_fixedname_name(sevent->fname), sevent->node); + UNLOCK(&namelock); cleannode(gdb, gversion, sevent->node); dns_db_detachnode(gdb, &sevent->node); isc_mem_put(mctx, sevent->fname, sizeof(dns_fixedname_t)); - assignwork(task, worker); + assignwork(task); isc_event_free(&event); } @@ -1734,18 +1756,20 @@ sign(isc_task_t *task, isc_event_t *event) { dns_dbnode_t *node; sevent_t *sevent, *wevent; + UNUSED(task); + sevent = (sevent_t *)event; node = sevent->node; fname = sevent->fname; isc_event_free(&event); signname(node, dns_fixedname_name(fname)); - wevent = (sevent_t *)isc_event_allocate(mctx, task, SIGNER_EVENT_WRITE, - writenode, NULL, - sizeof(sevent_t)); + wevent = (sevent_t *)isc_event_allocate(mctx, write_task, + SIGNER_EVENT_WRITE, writenode, + NULL, sizeof(sevent_t)); wevent->node = node; wevent->fname = fname; - isc_task_send(main_task, ISC_EVENT_PTR(&wevent)); + isc_task_send(write_task, ISC_EVENT_PTR(&wevent)); } /*% @@ -3325,7 +3349,7 @@ print_stats(isc_time_t *timer_start, isc_time_t *timer_finish, int main(int argc, char *argv[]) { - int i, ch; + int ch; char *startstr = NULL, *endstr = NULL, *classname = NULL; char *dnskey_endstr = NULL; char *origin = NULL, *file = NULL, *output = NULL; @@ -3386,12 +3410,7 @@ main(int argc, char *argv[]) { masterstyle = &dns_master_style_explicitttl; - check_result(isc_app_start(), "isc_app_start"); - - isc_mem_create(&mctx); - isc_commandline_errprint = false; - while ((ch = isc_commandline_parse(argc, argv, CMDLINE_FLAGS)) != -1) { switch (ch) { case '3': @@ -3669,12 +3688,6 @@ main(int argc, char *argv[]) { } } - result = dst_lib_init(mctx, engine); - if (result != ISC_R_SUCCESS) { - fatal("could not initialize dst: %s", - isc_result_totext(result)); - } - isc_stdtime_get(&now); if (startstr != NULL) { @@ -3704,7 +3717,7 @@ main(int argc, char *argv[]) { } if (ntasks == 0) { - ntasks = isc_os_ncpus() * 2; + ntasks = isc_os_ncpus(); } vbprintf(4, "using %d cpus\n", ntasks); @@ -3714,6 +3727,16 @@ main(int argc, char *argv[]) { directory = "."; } + isc_managers_create(&mctx, ntasks, &loopmgr, &netmgr, &taskmgr); + + isc_task_create(taskmgr, &write_task, 0); + + result = dst_lib_init(mctx, engine); + if (result != ISC_R_SUCCESS) { + fatal("could not initialize dst: %s", + isc_result_totext(result)); + } + setup_logging(mctx, &log); argc -= isc_commandline_index; @@ -3996,24 +4019,6 @@ main(int argc, char *argv[]) { print_time(outfp); print_version(outfp); - isc_managers_create(mctx, ntasks, 0, &loopmgr, &netmgr, &taskmgr); - - main_task = NULL; - result = isc_task_create(taskmgr, 0, &main_task, 0); - if (result != ISC_R_SUCCESS) { - fatal("failed to create task: %s", isc_result_totext(result)); - } - - tasks = isc_mem_get(mctx, ntasks * sizeof(isc_task_t *)); - for (i = 0; i < (int)ntasks; i++) { - tasks[i] = NULL; - result = isc_task_create(taskmgr, 0, &tasks[i], i); - if (result != ISC_R_SUCCESS) { - fatal("failed to create task: %s", - isc_result_totext(result)); - } - } - isc_mutex_init(&namelock); if (printstats) { @@ -4028,27 +4033,21 @@ main(int argc, char *argv[]) { * There is more work to do. Spread it out over multiple * processors if possible. */ - for (i = 0; i < (int)ntasks; i++) { - result = isc_app_onrun(mctx, main_task, startworker, - tasks[i]); - if (result != ISC_R_SUCCESS) { - fatal("failed to start task: %s", - isc_result_totext(result)); - } - } - (void)isc_app_run(); + tasks = isc_mem_get(mctx, ntasks * sizeof(isc_task_t *)); + memset(tasks, 0, ntasks * sizeof(isc_task_t *)); + + isc_loopmgr_setup(loopmgr, startworker, tasks); + isc_loopmgr_teardown(loopmgr, workerdone, tasks); + + isc_loopmgr_run(loopmgr); + if (!atomic_load(&finished)) { fatal("process aborted by user"); } - } else { - isc_task_detach(&main_task); + + isc_mem_put(mctx, tasks, ntasks * sizeof(isc_task_t *)); } atomic_store(&shuttingdown, true); - for (i = 0; i < (int)ntasks; i++) { - isc_task_detach(&tasks[i]); - } - isc_managers_destroy(&loopmgr, &netmgr, &taskmgr); - isc_mem_put(mctx, tasks, ntasks * sizeof(isc_task_t *)); postsign(); TIME_NOW(&sign_finish); @@ -4080,11 +4079,6 @@ main(int argc, char *argv[]) { check_result(result, "dns_master_dumptostream3"); } - isc_mutex_destroy(&namelock); - if (printstats) { - isc_mutex_destroy(&statslock); - } - if (!output_stdout) { result = isc_stdio_close(outfp); check_result(result, "isc_stdio_close"); @@ -4128,15 +4122,16 @@ main(int argc, char *argv[]) { if (verbose > 10) { isc_mem_stats(mctx, stdout); } - isc_mem_destroy(&mctx); - (void)isc_app_finish(); + isc_managers_destroy(&mctx, &loopmgr, &netmgr, &taskmgr); if (printstats) { TIME_NOW(&timer_finish); print_stats(&timer_start, &timer_finish, &sign_start, &sign_finish); + isc_mutex_destroy(&statslock); } + isc_mutex_destroy(&namelock); return (vresult == ISC_R_SUCCESS ? 0 : 1); } diff --git a/bin/dnssec/dnssec-verify.c b/bin/dnssec/dnssec-verify.c index 870c24c208..db228b2e2b 100644 --- a/bin/dnssec/dnssec-verify.c +++ b/bin/dnssec/dnssec-verify.c @@ -17,7 +17,6 @@ #include #include -#include #include #include #include @@ -206,7 +205,6 @@ main(int argc, char *argv[]) { } } isc_commandline_reset = true; - check_result(isc_app_start(), "isc_app_start"); isc_mem_create(&mctx); @@ -346,7 +344,5 @@ main(int argc, char *argv[]) { } isc_mem_destroy(&mctx); - (void)isc_app_finish(); - return (result == ISC_R_SUCCESS ? 0 : 1); } diff --git a/bin/named/control.c b/bin/named/control.c index 0b742407fe..8f2e1f0678 100644 --- a/bin/named/control.c +++ b/bin/named/control.c @@ -15,7 +15,6 @@ #include -#include #include #include #include @@ -175,9 +174,8 @@ named_control_docommand(isccc_sexpr_t *message, bool readonly, named_smf_want_disable = 1; } /* - * If named_smf_got_instance = 0, named_smf_chroot - * is not relevant and we fall through to - * isc_app_shutdown below. + * If named_smf_got_instance = 0, named_smf_chroot is + * not relevant and we fall through to shutdown below. */ #endif /* ifdef HAVE_LIBSCF */ /* Do not flush master files */ diff --git a/bin/named/controlconf.c b/bin/named/controlconf.c index 7adae569e1..2319a91e68 100644 --- a/bin/named/controlconf.c +++ b/bin/named/controlconf.c @@ -16,7 +16,6 @@ #include #include -#include #include #include #include @@ -49,6 +48,7 @@ #include #include #include +#include #include typedef struct controlkey controlkey_t; @@ -149,9 +149,7 @@ free_listener(controllistener_t *listener) { isc_refcount_destroy(&listener->refs); - if (listener->sock != NULL) { - isc_nmsocket_close(&listener->sock); - } + REQUIRE(listener->sock == NULL); free_controlkeylist(&listener->keys, listener->mctx); @@ -192,6 +190,7 @@ shutdown_listener(controllistener_t *listener) { } isc_nm_stoplistening(listener->sock); + isc_nmsocket_close(&listener->sock); maybe_free_listener(listener); } @@ -226,7 +225,7 @@ control_senddone(isc_nmhandle_t *handle, isc_result_t result, void *arg) { conn->sending = false; if (conn->result == ISC_R_SHUTTINGDOWN) { - isc_app_shutdown(); + isc_loopmgr_shutdown(named_g_loopmgr); goto cleanup_sendhandle; } diff --git a/bin/named/fuzz.c b/bin/named/fuzz.c index fb0d56fc73..b011aae095 100644 --- a/bin/named/fuzz.c +++ b/bin/named/fuzz.c @@ -25,8 +25,8 @@ #include #include -#include #include +#include #include #include #include @@ -126,7 +126,7 @@ fuzz_thread_client(void *arg) { close(sockfd); named_server_flushonshutdown(named_g_server, false); - isc_app_shutdown(); + isc_loopmgr_shutdown(named_g_loopmgr); return (NULL); } raise(SIGSTOP); @@ -159,7 +159,7 @@ fuzz_thread_client(void *arg) { close(sockfd); named_server_flushonshutdown(named_g_server, false); - isc_app_shutdown(); + isc_loopmgr_shutdown(named_g_loopmgr); return (NULL); } @@ -374,7 +374,7 @@ fuzz_thread_resolver(void *arg) { close(listenfd); named_server_flushonshutdown(named_g_server, false); - isc_app_shutdown(); + isc_loopmgr_shutdown(named_g_loopmgr); return (NULL); } raise(SIGSTOP); @@ -574,7 +574,7 @@ fuzz_thread_resolver(void *arg) { close(sockfd); close(listenfd); named_server_flushonshutdown(named_g_server, false); - isc_app_shutdown(); + isc_loopmgr_shutdown(named_g_loopmgr); #ifdef __AFL_LOOP /* @@ -716,7 +716,7 @@ fuzz_thread_tcp(void *arg) { free(buf); close(sockfd); named_server_flushonshutdown(named_g_server, false); - isc_app_shutdown(); + isc_loopmgr_shutdown(named_g_loopmgr); return (NULL); } @@ -733,7 +733,7 @@ named_fuzz_notify(void) { #ifdef ENABLE_AFL if (getenv("AFL_CMIN")) { named_server_flushonshutdown(named_g_server, false); - isc_app_shutdown(); + isc_loopmgr_shutdown(named_g_loopmgr); return; } diff --git a/bin/named/include/named/server.h b/bin/named/include/named/server.h index ce45b440c1..62e9c3c95b 100644 --- a/bin/named/include/named/server.h +++ b/bin/named/include/named/server.h @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -42,7 +43,8 @@ #define NAMED_EVENT_DELZONE (NAMED_EVENTCLASS + 1) #define NAMED_EVENT_COMMAND (NAMED_EVENTCLASS + 2) #define NAMED_EVENT_TATSEND (NAMED_EVENTCLASS + 3) -#define NAMED_EVENT_SHUTDOWN (NAMED_EVENTCLASS + 4) +#define NAMED_EVENT_RUN (NAMED_EVENTCLASS + 4) +#define NAMED_EVENT_SHUTDOWN (NAMED_EVENTCLASS + 5) /*% * Name server state. Better here than in lots of separate global variables. @@ -116,6 +118,8 @@ struct named_server { isc_tlsctx_cache_t *tlsctx_server_cache; isc_tlsctx_cache_t *tlsctx_client_cache; + + isc_signal_t *sighup; }; #define NAMED_SERVER_MAGIC ISC_MAGIC('S', 'V', 'E', 'R') @@ -142,7 +146,7 @@ named_server_shutdown(named_server_t *server); */ void -named_server_reloadwanted(named_server_t *server); +named_server_reloadwanted(void *arg, int signum); /*%< * Inform a server that a reload is wanted. This function * may be called asynchronously, from outside the server's task. diff --git a/bin/named/log.c b/bin/named/log.c index ff76c36a7a..4270549b6b 100644 --- a/bin/named/log.c +++ b/bin/named/log.c @@ -51,6 +51,7 @@ isc_result_t named_log_init(bool safe) { isc_result_t result; isc_logconfig_t *lcfg = NULL; + isc_mem_t *log_mctx = NULL; named_g_categories = categories; named_g_modules = modules; @@ -58,7 +59,9 @@ named_log_init(bool safe) { /* * Setup a logging context. */ - isc_log_create(named_g_mctx, &named_g_lctx, &lcfg); + isc_mem_create(&log_mctx); + isc_log_create(log_mctx, &named_g_lctx, &lcfg); + isc_mem_detach(&log_mctx); /* * named-checktool.c:setup_logging() needs to be kept in sync. diff --git a/bin/named/main.c b/bin/named/main.c index b57b608211..6cf8ad821e 100644 --- a/bin/named/main.c +++ b/bin/named/main.c @@ -24,7 +24,6 @@ #include #endif -#include #include #include #include @@ -38,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -899,13 +899,14 @@ parse_command_line(int argc, char *argv[]) { static isc_result_t create_managers(void) { - isc_result_t result; - + /* + * Set the default named_g_cpus if it was not set from the command line + */ INSIST(named_g_cpus_detected > 0); - if (named_g_cpus == 0) { named_g_cpus = named_g_cpus_detected; } + isc_log_write( named_g_lctx, NAMED_LOGCATEGORY_GENERAL, NAMED_LOGMODULE_SERVER, ISC_LOG_INFO, "found %u CPU%s, using %u worker thread%s", @@ -922,24 +923,14 @@ create_managers(void) { "using %u UDP listener%s per interface", named_g_udpdisp, named_g_udpdisp == 1 ? "" : "s"); - result = isc_managers_create(named_g_mctx, named_g_cpus, - 0 /* quantum */, &named_g_loopmgr, - &named_g_netmgr, &named_g_taskmgr); - if (result != ISC_R_SUCCESS) { - return (result); - } + isc_managers_create(&named_g_mctx, named_g_cpus, &named_g_loopmgr, + &named_g_netmgr, &named_g_taskmgr); isc_nm_maxudp(named_g_netmgr, maxudp); return (ISC_R_SUCCESS); } -static void -destroy_managers(void) { - isc_managers_destroy(&named_g_loopmgr, &named_g_netmgr, - &named_g_taskmgr); -} - static void setup(void) { isc_result_t result; @@ -1011,16 +1002,6 @@ setup(void) { named_os_daemonize(); } - /* - * We call isc_app_start() here as some versions of FreeBSD's fork() - * destroys all the signal handling it sets up. - */ - result = isc_app_start(); - if (result != ISC_R_SUCCESS) { - named_main_earlyfatal("isc_app_start() failed: %s", - isc_result_totext(result)); - } - isc_log_write(named_g_lctx, NAMED_LOGCATEGORY_GENERAL, NAMED_LOGMODULE_MAIN, ISC_LOG_NOTICE, "starting %s%s ", PACKAGE_STRING, @@ -1258,10 +1239,6 @@ setup(void) { static void cleanup(void) { - named_server_shutdown(named_g_server); - - destroy_managers(); - if (named_g_mapped != NULL) { dns_acl_detach(&named_g_mapped); } @@ -1453,30 +1430,15 @@ main(int argc, char *argv[]) { } } - isc_mem_create(&named_g_mctx); + setup(); isc_mem_setname(named_g_mctx, "main"); - setup(); - /* - * Start things running and then wait for a shutdown request - * or reload. + * Start things running */ - do { - result = isc_app_run(); + isc_signal_start(named_g_server->sighup); - if (result == ISC_R_RELOAD) { - named_server_reloadwanted(named_g_server); - } else if (result != ISC_R_SUCCESS) { - UNEXPECTED_ERROR(__FILE__, __LINE__, - "isc_app_run(): %s", - isc_result_totext(result)); - /* - * Force exit. - */ - result = ISC_R_SUCCESS; - } - } while (result != ISC_R_SUCCESS); + isc_loopmgr_run(named_g_loopmgr); #ifdef HAVE_LIBSCF if (named_smf_want_disable == 1) { @@ -1510,13 +1472,13 @@ main(int argc, char *argv[]) { (void)isc_stdio_close(fp); } } - isc_mem_destroy(&named_g_mctx); + + isc_managers_destroy(&named_g_mctx, &named_g_loopmgr, &named_g_netmgr, + &named_g_taskmgr); isc_mem_checkdestroyed(stderr); named_main_setmemstats(NULL); - isc_app_finish(); - named_os_closedevnull(); named_os_shutdown(); diff --git a/bin/named/server.c b/bin/named/server.c index 73f85bf535..899b0e027a 100644 --- a/bin/named/server.c +++ b/bin/named/server.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -27,7 +28,6 @@ #endif #include -#include #include #include #include @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -48,6 +49,7 @@ #include #include #include +#include #include #include #include @@ -2803,8 +2805,7 @@ catz_addmodzone_taskaction(isc_task_t *task, isc_event_t *event0) { /* Mark view unfrozen so that zone can be added */ - result = isc_task_beginexclusive(task); - RUNTIME_CHECK(result == ISC_R_SUCCESS); + isc_task_beginexclusive(task); dns_view_thaw(ev->view); result = configure_zone(cfg->config, zoneobj, cfg->vconfig, ev->view, &ev->cbd->server->viewlist, @@ -2874,8 +2875,7 @@ catz_delzone_taskaction(isc_task_t *task, isc_event_t *event0) { char cname[DNS_NAME_FORMATSIZE]; const char *file; - result = isc_task_beginexclusive(task); - RUNTIME_CHECK(result == ISC_R_SUCCESS); + isc_task_beginexclusive(task); dns_name_format(dns_catz_entry_getname(ev->entry), cname, DNS_NAME_FORMATSIZE); @@ -7381,7 +7381,7 @@ pps_timer_tick(void *arg) { * string or NULL, with a dynamically allocated copy of the * null-terminated string pointed to by 'value', or NULL. */ -static isc_result_t +static void setstring(named_server_t *server, char **field, const char *value) { char *copy; @@ -7396,7 +7396,6 @@ setstring(named_server_t *server, char **field, const char *value) { } *field = copy; - return (ISC_R_SUCCESS); } /* @@ -7404,12 +7403,12 @@ setstring(named_server_t *server, char **field, const char *value) { * string or NULL, with another dynamically allocated string * or NULL if whether 'obj' is a string or void value, respectively. */ -static isc_result_t +static void setoptstring(named_server_t *server, char **field, const cfg_obj_t *obj) { if (cfg_obj_isvoid(obj)) { - return (setstring(server, field, NULL)); + setstring(server, field, NULL); } else { - return (setstring(server, field, cfg_obj_asstring(obj))); + setstring(server, field, cfg_obj_asstring(obj)); } } @@ -8352,13 +8351,12 @@ load_configuration(const char *filename, named_server_t *server, dns_kasp_t *default_kasp = NULL; dns_kasplist_t tmpkasplist, kasplist; const cfg_obj_t *views; - dns_view_t *view = NULL; + dns_view_t *view_next = NULL; dns_viewlist_t tmpviewlist; dns_viewlist_t viewlist, builtin_viewlist; in_port_t listen_port, udpport_low, udpport_high; int i, backlog; - bool exclusive = false; isc_interval_t interval; isc_logconfig_t *logc = NULL; isc_portset_t *v4portset = NULL; @@ -8380,6 +8378,7 @@ load_configuration(const char *filename, named_server_t *server, uint32_t max; uint64_t initial, idle, keepalive, advertised; bool loadbalancesockets; + bool exclusive = true; dns_aclenv_t *env = ns_interfacemgr_getaclenv(named_g_server->interfacemgr); @@ -8389,11 +8388,17 @@ load_configuration(const char *filename, named_server_t *server, ISC_LIST_INIT(cachelist); ISC_LIST_INIT(altsecrets); + /* Ensure exclusive access to configuration data. */ + isc_task_beginexclusive(server->task); + /* Create the ACL configuration context */ if (named_g_aclconfctx != NULL) { cfg_aclconfctx_detach(&named_g_aclconfctx); } - CHECK(cfg_aclconfctx_create(named_g_mctx, &named_g_aclconfctx)); + result = cfg_aclconfctx_create(named_g_mctx, &named_g_aclconfctx); + if (result != ISC_R_SUCCESS) { + goto cleanup_exclusive; + } /* * Shut down all dyndb instances. @@ -8422,12 +8427,17 @@ load_configuration(const char *filename, named_server_t *server, isc_log_write(named_g_lctx, NAMED_LOGCATEGORY_GENERAL, NAMED_LOGMODULE_SERVER, ISC_LOG_INFO, "loading configuration from '%s'", filename); - CHECK(cfg_parser_create(named_g_mctx, named_g_lctx, &conf_parser)); + result = cfg_parser_create(named_g_mctx, named_g_lctx, &conf_parser); + if (result != ISC_R_SUCCESS) { + goto cleanup_exclusive; + } + cfg_parser_setcallback(conf_parser, directory_callback, NULL); result = cfg_parse_file(conf_parser, filename, &cfg_type_namedconf, &config); - - CHECK(result); + if (result != ISC_R_SUCCESS) { + goto cleanup_conf_parser; + } /* * Check the validity of the configuration. @@ -8436,7 +8446,11 @@ load_configuration(const char *filename, named_server_t *server, * checked later when the modules are actually loaded and * registered.) */ - CHECK(bind9_check_namedconf(config, false, named_g_lctx, named_g_mctx)); + result = bind9_check_namedconf(config, false, named_g_lctx, + named_g_mctx); + if (result != ISC_R_SUCCESS) { + goto cleanup_config; + } /* Let's recreate the TLS context cache */ if (server->tlsctx_server_cache != NULL) { @@ -8496,8 +8510,7 @@ load_configuration(const char *filename, named_server_t *server, obj = NULL; result = named_config_get(maps, "bindkeys-file", &obj); INSIST(result == ISC_R_SUCCESS); - CHECKM(setstring(server, &server->bindkeysfile, cfg_obj_asstring(obj)), - "strdup"); + setstring(server, &server->bindkeysfile, cfg_obj_asstring(obj)); INSIST(server->bindkeysfile != NULL); if (access(server->bindkeysfile, R_OK) == 0) { @@ -8507,8 +8520,11 @@ load_configuration(const char *filename, named_server_t *server, "from file '%s'", server->bindkeysfile); - CHECK(cfg_parser_create(named_g_mctx, named_g_lctx, - &bindkeys_parser)); + result = cfg_parser_create(named_g_mctx, named_g_lctx, + &bindkeys_parser); + if (result != ISC_R_SUCCESS) { + goto cleanup_config; + } result = cfg_parse_file(bindkeys_parser, server->bindkeysfile, &cfg_type_bindkeys, &bindkeys); @@ -8528,13 +8544,6 @@ load_configuration(const char *filename, named_server_t *server, server->bindkeysfile); } - /* Ensure exclusive access to configuration data. */ - if (!exclusive) { - result = isc_task_beginexclusive(server->task); - RUNTIME_CHECK(result == ISC_R_SUCCESS); - exclusive = true; - } - /* * Set process limits, which (usually) needs to be done as root. */ @@ -8543,7 +8552,10 @@ load_configuration(const char *filename, named_server_t *server, /* * Check the process lockfile. */ - CHECK(check_lockfile(server, config, first_time)); + result = check_lockfile(server, config, first_time); + if (result != ISC_R_SUCCESS) { + goto cleanup_bindkeys_parser; + } #if defined(HAVE_GEOIP2) /* @@ -8586,7 +8598,9 @@ load_configuration(const char *filename, named_server_t *server, "'recursive-clients %d' too low when " "running with %d worker threads", max, named_g_cpus); - CHECK(ISC_R_RANGE); + result = ISC_R_RANGE; + + goto cleanup_bindkeys_parser; } softquota = max - margin; } else { @@ -8599,9 +8613,13 @@ load_configuration(const char *filename, named_server_t *server, * Set "blackhole". Only legal at options level; there is * no default. */ - CHECK(configure_view_acl(NULL, config, NULL, "blackhole", NULL, - named_g_aclconfctx, named_g_mctx, - &server->sctx->blackholeacl)); + result = configure_view_acl(NULL, config, NULL, "blackhole", NULL, + named_g_aclconfctx, named_g_mctx, + &server->sctx->blackholeacl); + if (result != ISC_R_SUCCESS) { + goto cleanup_bindkeys_parser; + } + if (server->sctx->blackholeacl != NULL) { dns_dispatchmgr_setblackhole(named_g_dispatchmgr, server->sctx->blackholeacl); @@ -8612,10 +8630,6 @@ load_configuration(const char *filename, named_server_t *server, INSIST(result == ISC_R_SUCCESS); env->match_mapped = cfg_obj_asboolean(obj); - CHECKM(named_statschannels_configure(named_g_server, config, - named_g_aclconfctx), - "configuring statistics server(s)"); - /* * Configure the network manager */ @@ -8729,10 +8743,22 @@ load_configuration(const char *filename, named_server_t *server, /* * Configure sets of UDP query source ports. */ - CHECKM(isc_portset_create(named_g_mctx, &v4portset), "creating UDP " - "port set"); - CHECKM(isc_portset_create(named_g_mctx, &v6portset), "creating UDP " - "port set"); + result = isc_portset_create(named_g_mctx, &v4portset); + if (result != ISC_R_SUCCESS) { + isc_log_write(named_g_lctx, NAMED_LOGCATEGORY_GENERAL, + NAMED_LOGMODULE_SERVER, ISC_LOG_ERROR, + "creating UDP/IPv4 port set: %s", + isc_result_totext(result)); + goto cleanup_bindkeys_parser; + } + isc_portset_create(named_g_mctx, &v6portset); + if (result != ISC_R_SUCCESS) { + isc_log_write(named_g_lctx, NAMED_LOGCATEGORY_GENERAL, + NAMED_LOGMODULE_SERVER, ISC_LOG_ERROR, + "creating UDP/IPv6 port set: %s", + isc_result_totext(result)); + goto cleanup_v4portset; + } usev4ports = NULL; usev6ports = NULL; @@ -8743,9 +8769,16 @@ load_configuration(const char *filename, named_server_t *server, if (usev4ports != NULL) { portset_fromconf(v4portset, usev4ports, true); } else { - CHECKM(isc_net_getudpportrange(AF_INET, &udpport_low, - &udpport_high), - "get the default UDP/IPv4 port range"); + result = isc_net_getudpportrange(AF_INET, &udpport_low, + &udpport_high); + if (result != ISC_R_SUCCESS) { + isc_log_write(named_g_lctx, NAMED_LOGCATEGORY_GENERAL, + NAMED_LOGMODULE_SERVER, ISC_LOG_ERROR, + "get the default UDP/IPv4 port range: %s", + isc_result_totext(result)); + goto cleanup_v6portset; + } + if (udpport_low == udpport_high) { isc_portset_add(v4portset, udpport_low); } else { @@ -8769,9 +8802,15 @@ load_configuration(const char *filename, named_server_t *server, if (usev6ports != NULL) { portset_fromconf(v6portset, usev6ports, true); } else { - CHECKM(isc_net_getudpportrange(AF_INET6, &udpport_low, - &udpport_high), - "get the default UDP/IPv6 port range"); + result = isc_net_getudpportrange(AF_INET6, &udpport_low, + &udpport_high); + if (result != ISC_R_SUCCESS) { + isc_log_write(named_g_lctx, NAMED_LOGCATEGORY_GENERAL, + NAMED_LOGMODULE_SERVER, ISC_LOG_ERROR, + "get the default UDP/IPv6 port range: %s", + isc_result_totext(result)); + goto cleanup_v6portset; + } if (udpport_low == udpport_high) { isc_portset_add(v6portset, udpport_low); } else { @@ -8857,14 +8896,22 @@ load_configuration(const char *filename, named_server_t *server, if (named_g_port != 0) { listen_port = named_g_port; } else { - CHECKM(named_config_getport(config, "port", &listen_port), - "port"); + result = named_config_getport(config, "port", &listen_port); + if (result != ISC_R_SUCCESS) { + goto cleanup_v6portset; + } } /* * Determining the default DSCP code point. */ - CHECKM(named_config_getdscp(config, &named_g_dscp), "dscp"); + result = named_config_getdscp(config, &named_g_dscp); + if (result != ISC_R_SUCCESS) { + isc_log_write(named_g_lctx, NAMED_LOGCATEGORY_GENERAL, + NAMED_LOGMODULE_SERVER, ISC_LOG_ERROR, "dscp: %s", + isc_result_totext(result)); + goto cleanup_v6portset; + } /* * Find the listen queue depth. @@ -8914,18 +8961,22 @@ load_configuration(const char *filename, named_server_t *server, (void)cfg_map_get(options, "listen-on", &clistenon); } if (clistenon != NULL) { - CHECK(listenlist_fromconfig( + result = listenlist_fromconfig( clistenon, config, named_g_aclconfctx, named_g_mctx, AF_INET, - server->tlsctx_server_cache, &listenon)); + server->tlsctx_server_cache, &listenon); } else { /* * Not specified, use default. */ - CHECK(ns_listenlist_default(named_g_mctx, listen_port, - -1, true, AF_INET, - &listenon)); + result = ns_listenlist_default(named_g_mctx, + listen_port, -1, true, + AF_INET, &listenon); } + if (result != ISC_R_SUCCESS) { + goto cleanup_v6portset; + } + if (listenon != NULL) { ns_interfacemgr_setlistenon4(server->interfacemgr, listenon); @@ -8944,17 +8995,20 @@ load_configuration(const char *filename, named_server_t *server, (void)cfg_map_get(options, "listen-on-v6", &clistenon); } if (clistenon != NULL) { - CHECK(listenlist_fromconfig( + result = listenlist_fromconfig( clistenon, config, named_g_aclconfctx, named_g_mctx, AF_INET6, - server->tlsctx_server_cache, &listenon)); + server->tlsctx_server_cache, &listenon); } else { /* * Not specified, use default. */ - CHECK(ns_listenlist_default(named_g_mctx, listen_port, - -1, true, AF_INET6, - &listenon)); + result = ns_listenlist_default(named_g_mctx, + listen_port, -1, true, + AF_INET6, &listenon); + } + if (result != ISC_R_SUCCESS) { + goto cleanup_v6portset; } if (listenon != NULL) { ns_interfacemgr_setlistenon6(server->interfacemgr, @@ -8963,27 +9017,6 @@ load_configuration(const char *filename, named_server_t *server, } } - /* - * Rescan the interface list to pick up changes in the - * listen-on option. It's important that we do this before we try - * to configure the query source, since the dispatcher we use might - * be shared with an interface. - */ - result = ns_interfacemgr_scan(server->interfacemgr, true, true); - - /* - * Check that named is able to TCP listen on at least one - * interface. Otherwise, another named process could be running - * and we should fail. - */ - if (first_time && (result == ISC_R_ADDRINUSE)) { - isc_log_write(named_g_lctx, NAMED_LOGCATEGORY_GENERAL, - NAMED_LOGMODULE_SERVER, ISC_LOG_ERROR, - "unable to listen on any configured interfaces"); - result = ISC_R_FAILURE; - goto cleanup; - } - /* * Arrange for further interface scanning to occur periodically * as specified by the "interface-interval" option. @@ -9070,8 +9103,12 @@ load_configuration(const char *filename, named_server_t *server, cfg_obj_t *kconfig = cfg_listelt_value(element); kasp = NULL; - CHECK(cfg_kasp_fromconfig(kconfig, default_kasp, named_g_mctx, - named_g_lctx, &kasplist, &kasp)); + result = cfg_kasp_fromconfig(kconfig, default_kasp, + named_g_mctx, named_g_lctx, + &kasplist, &kasp); + if (result != ISC_R_SUCCESS) { + goto cleanup_kasplist; + } INSIST(kasp != NULL); dns_kasp_freeze(kasp); @@ -9095,8 +9132,12 @@ load_configuration(const char *filename, named_server_t *server, { cfg_obj_t *kconfig = cfg_listelt_value(element); kasp = NULL; - CHECK(cfg_kasp_fromconfig(kconfig, default_kasp, named_g_mctx, - named_g_lctx, &kasplist, &kasp)); + result = cfg_kasp_fromconfig(kconfig, default_kasp, + named_g_mctx, named_g_lctx, + &kasplist, &kasp); + if (result != ISC_R_SUCCESS) { + goto cleanup_kasplist; + } INSIST(kasp != NULL); dns_kasp_freeze(kasp); dns_kasp_detach(&kasp); @@ -9129,16 +9170,21 @@ load_configuration(const char *filename, named_server_t *server, element = cfg_list_next(element)) { cfg_obj_t *vconfig = cfg_listelt_value(element); + dns_view_t *view = NULL; - view = NULL; - - CHECK(create_view(vconfig, &viewlist, &view)); + result = create_view(vconfig, &viewlist, &view); + if (result != ISC_R_SUCCESS) { + goto cleanup_viewlist; + } INSIST(view != NULL); - CHECK(setup_newzones(view, config, vconfig, conf_parser, - named_g_aclconfctx)); - + result = setup_newzones(view, config, vconfig, conf_parser, + named_g_aclconfctx); dns_view_detach(&view); + + if (result != ISC_R_SUCCESS) { + goto cleanup_viewlist; + } } /* @@ -9146,13 +9192,21 @@ load_configuration(const char *filename, named_server_t *server, * view here. */ if (views == NULL) { - CHECK(create_view(NULL, &viewlist, &view)); + dns_view_t *view = NULL; + + result = create_view(NULL, &viewlist, &view); + if (result != ISC_R_SUCCESS) { + goto cleanup_viewlist; + } INSIST(view != NULL); - CHECK(setup_newzones(view, config, NULL, conf_parser, - named_g_aclconfctx)); + result = setup_newzones(view, config, NULL, conf_parser, + named_g_aclconfctx); dns_view_detach(&view); + if (result != ISC_R_SUCCESS) { + goto cleanup_viewlist; + } } /* @@ -9164,12 +9218,21 @@ load_configuration(const char *filename, named_server_t *server, element = cfg_list_next(element)) { cfg_obj_t *vconfig = cfg_listelt_value(element); + dns_view_t *view = NULL; view = NULL; - CHECK(find_view(vconfig, &viewlist, &view)); - CHECK(configure_view(view, &viewlist, config, vconfig, - &cachelist, &server->kasplist, bindkeys, - named_g_mctx, named_g_aclconfctx, true)); + result = find_view(vconfig, &viewlist, &view); + if (result != ISC_R_SUCCESS) { + goto cleanup_cachelist; + } + + result = configure_view(view, &viewlist, config, vconfig, + &cachelist, &server->kasplist, bindkeys, + named_g_mctx, named_g_aclconfctx, true); + if (result != ISC_R_SUCCESS) { + dns_view_detach(&view); + goto cleanup_cachelist; + } dns_view_freeze(view); dns_view_detach(&view); } @@ -9179,11 +9242,18 @@ load_configuration(const char *filename, named_server_t *server, * were no explicit views. */ if (views == NULL) { - view = NULL; - CHECK(find_view(NULL, &viewlist, &view)); - CHECK(configure_view(view, &viewlist, config, NULL, &cachelist, - &server->kasplist, bindkeys, named_g_mctx, - named_g_aclconfctx, true)); + dns_view_t *view = NULL; + result = find_view(NULL, &viewlist, &view); + if (result != ISC_R_SUCCESS) { + goto cleanup_cachelist; + } + result = configure_view(view, &viewlist, config, NULL, + &cachelist, &server->kasplist, bindkeys, + named_g_mctx, named_g_aclconfctx, true); + if (result != ISC_R_SUCCESS) { + dns_view_detach(&view); + goto cleanup_cachelist; + } dns_view_freeze(view); dns_view_detach(&view); } @@ -9198,14 +9268,23 @@ load_configuration(const char *filename, named_server_t *server, element = cfg_list_next(element)) { cfg_obj_t *vconfig = cfg_listelt_value(element); + dns_view_t *view = NULL; - CHECK(create_view(vconfig, &builtin_viewlist, &view)); - CHECK(configure_view(view, &viewlist, config, vconfig, - &cachelist, &server->kasplist, bindkeys, - named_g_mctx, named_g_aclconfctx, false)); + result = create_view(vconfig, &builtin_viewlist, &view); + if (result != ISC_R_SUCCESS) { + goto cleanup_cachelist; + } + + result = configure_view(view, &viewlist, config, vconfig, + &cachelist, &server->kasplist, bindkeys, + named_g_mctx, named_g_aclconfctx, + false); + if (result != ISC_R_SUCCESS) { + dns_view_detach(&view); + goto cleanup_cachelist; + } dns_view_freeze(view); dns_view_detach(&view); - view = NULL; } /* Now combine the two viewlists into one */ @@ -9215,7 +9294,7 @@ load_configuration(const char *filename, named_server_t *server, * Commit any dns_zone_setview() calls on all zones in the new * view. */ - for (view = ISC_LIST_HEAD(viewlist); view != NULL; + for (dns_view_t *view = ISC_LIST_HEAD(viewlist); view != NULL; view = ISC_LIST_NEXT(view, link)) { dns_view_setviewcommit(view); @@ -9227,10 +9306,10 @@ load_configuration(const char *filename, named_server_t *server, viewlist = tmpviewlist; /* Make the view list available to each of the views */ - view = ISC_LIST_HEAD(server->viewlist); - while (view != NULL) { + for (dns_view_t *view = ISC_LIST_HEAD(server->viewlist); view != NULL; + view = ISC_LIST_NEXT(view, link)) + { view->viewlist = &server->viewlist; - view = ISC_LIST_NEXT(view, link); } /* Swap our new cache list with the production one. */ @@ -9240,22 +9319,23 @@ load_configuration(const char *filename, named_server_t *server, /* Load the TKEY information from the configuration. */ if (options != NULL) { - dns_tkeyctx_t *t = NULL; - CHECKM(named_tkeyctx_fromconfig(options, named_g_mctx, &t), - "configuring TKEY"); + dns_tkeyctx_t *tkeyctx = NULL; + + result = named_tkeyctx_fromconfig(options, named_g_mctx, + &tkeyctx); + if (result != ISC_R_SUCCESS) { + isc_log_write(named_g_lctx, NAMED_LOGCATEGORY_GENERAL, + NAMED_LOGMODULE_SERVER, ISC_LOG_ERROR, + "configuring TKEY: %s", + isc_result_totext(result)); + goto cleanup_cachelist; + } if (server->sctx->tkeyctx != NULL) { dns_tkeyctx_destroy(&server->sctx->tkeyctx); } - server->sctx->tkeyctx = t; + server->sctx->tkeyctx = tkeyctx; } - /* - * Bind the control port(s). - */ - CHECKM(named_controls_configure(named_g_server->controls, config, - named_g_aclconfctx), - "binding control channel(s)"); - #ifdef HAVE_LMDB /* * If we're using LMDB, we may have created newzones databases @@ -9264,8 +9344,8 @@ load_configuration(const char *filename, named_server_t *server, * after relinquishing privileges them. */ if (first_time) { - for (view = ISC_LIST_HEAD(server->viewlist); view != NULL; - view = ISC_LIST_NEXT(view, link)) + for (dns_view_t *view = ISC_LIST_HEAD(server->viewlist); + view != NULL; view = ISC_LIST_NEXT(view, link)) { nzd_env_close(view); } @@ -9287,7 +9367,7 @@ load_configuration(const char *filename, named_server_t *server, NAMED_LOGMODULE_SERVER, ISC_LOG_ERROR, "the working directory is not writable"); result = ISC_R_NOPERM; - goto cleanup; + goto cleanup_cachelist; } #ifdef HAVE_LMDB @@ -9295,8 +9375,8 @@ load_configuration(const char *filename, named_server_t *server, * Reopen NZD databases. */ if (first_time) { - for (view = ISC_LIST_HEAD(server->viewlist); view != NULL; - view = ISC_LIST_NEXT(view, link)) + for (dns_view_t *view = ISC_LIST_HEAD(server->viewlist); + view != NULL; view = ISC_LIST_NEXT(view, link)) { nzd_env_reopen(view); } @@ -9329,7 +9409,7 @@ load_configuration(const char *filename, named_server_t *server, "checking logging configuration " "failed: %s", isc_result_totext(result)); - goto cleanup; + goto cleanup_cachelist; } } } else { @@ -9340,15 +9420,38 @@ load_configuration(const char *filename, named_server_t *server, logobj = NULL; (void)cfg_map_get(config, "logging", &logobj); if (logobj != NULL) { - CHECKM(named_logconfig(logc, logobj), - "configuring logging"); + result = named_logconfig(logc, logobj); + if (result != ISC_R_SUCCESS) { + isc_log_write( + named_g_lctx, NAMED_LOGCATEGORY_GENERAL, + NAMED_LOGMODULE_SERVER, ISC_LOG_ERROR, + "configuring logging: %s", + isc_result_totext(result)); + goto cleanup_logc; + } } else { named_log_setdefaultchannels(logc); named_log_setdefaultsslkeylogfile(logc); - CHECKM(named_log_setunmatchedcategory(logc), - "setting up default 'category unmatched'"); - CHECKM(named_log_setdefaultcategory(logc), - "setting up default 'category default'"); + result = named_log_setunmatchedcategory(logc); + if (result != ISC_R_SUCCESS) { + isc_log_write( + named_g_lctx, NAMED_LOGCATEGORY_GENERAL, + NAMED_LOGMODULE_SERVER, ISC_LOG_ERROR, + "setting up default 'category " + "unmatched': %s", + isc_result_totext(result)); + goto cleanup_logc; + } + result = named_log_setdefaultcategory(logc); + if (result != ISC_R_SUCCESS) { + isc_log_write( + named_g_lctx, NAMED_LOGCATEGORY_GENERAL, + NAMED_LOGMODULE_SERVER, ISC_LOG_ERROR, + "setting up default 'category " + "default': %s", + isc_result_totext(result)); + goto cleanup_logc; + } } isc_logconfig_use(named_g_lctx, logc); @@ -9425,31 +9528,27 @@ load_configuration(const char *filename, named_server_t *server, obj = NULL; result = named_config_get(maps, "statistics-file", &obj); INSIST(result == ISC_R_SUCCESS); - CHECKM(setstring(server, &server->statsfile, cfg_obj_asstring(obj)), - "strdup"); + setstring(server, &server->statsfile, cfg_obj_asstring(obj)); obj = NULL; result = named_config_get(maps, "dump-file", &obj); INSIST(result == ISC_R_SUCCESS); - CHECKM(setstring(server, &server->dumpfile, cfg_obj_asstring(obj)), - "strdup"); + setstring(server, &server->dumpfile, cfg_obj_asstring(obj)); obj = NULL; result = named_config_get(maps, "secroots-file", &obj); INSIST(result == ISC_R_SUCCESS); - CHECKM(setstring(server, &server->secrootsfile, cfg_obj_asstring(obj)), - "strdup"); + setstring(server, &server->secrootsfile, cfg_obj_asstring(obj)); obj = NULL; result = named_config_get(maps, "recursing-file", &obj); INSIST(result == ISC_R_SUCCESS); - CHECKM(setstring(server, &server->recfile, cfg_obj_asstring(obj)), - "strdup"); + setstring(server, &server->recfile, cfg_obj_asstring(obj)); obj = NULL; result = named_config_get(maps, "version", &obj); if (result == ISC_R_SUCCESS) { - CHECKM(setoptstring(server, &server->version, obj), "strdup"); + setoptstring(server, &server->version, obj); server->version_set = true; } else { server->version_set = false; @@ -9458,7 +9557,7 @@ load_configuration(const char *filename, named_server_t *server, obj = NULL; result = named_config_get(maps, "hostname", &obj); if (result == ISC_R_SUCCESS) { - CHECKM(setoptstring(server, &server->hostname, obj), "strdup"); + setoptstring(server, &server->hostname, obj); server->hostname_set = true; } else { server->hostname_set = false; @@ -9527,7 +9626,7 @@ load_configuration(const char *filename, named_server_t *server, result = isc_hex_decodestring(str, &b); if (result != ISC_R_SUCCESS && result != ISC_R_NOSPACE) { - goto cleanup; + goto cleanup_altsecrets; } first = false; } else { @@ -9541,7 +9640,7 @@ load_configuration(const char *filename, named_server_t *server, isc_mem_put(server->sctx->mctx, altsecret, sizeof(*altsecret)); - goto cleanup; + goto cleanup_altsecrets; } ISC_LIST_INITANDAPPEND(altsecrets, altsecret, link); @@ -9552,16 +9651,31 @@ load_configuration(const char *filename, named_server_t *server, case ns_cookiealg_siphash24: expectedlength = ISC_SIPHASH24_KEY_LENGTH; if (usedlength != expectedlength) { - CHECKM(ISC_R_RANGE, "SipHash-2-4 " - "cookie-secret " - "must be 128 bits"); + result = ISC_R_RANGE; + isc_log_write( + named_g_lctx, + NAMED_LOGCATEGORY_GENERAL, + NAMED_LOGMODULE_SERVER, + ISC_LOG_ERROR, + "SipHash-2-4 cookie-secret " + "must be 128 bits: %s", + isc_result_totext(result)); + goto cleanup_altsecrets; } break; case ns_cookiealg_aes: expectedlength = ISC_AES128_KEYLENGTH; if (usedlength != expectedlength) { - CHECKM(ISC_R_RANGE, "AES cookie-secret " - "must be 128 bits"); + result = ISC_R_RANGE; + isc_log_write( + named_g_lctx, + NAMED_LOGCATEGORY_GENERAL, + NAMED_LOGMODULE_SERVER, + ISC_LOG_ERROR, + "AES cookie-secret must be 128 " + "bits: %s", + isc_result_totext(result)); + goto cleanup_altsecrets; } break; } @@ -9591,56 +9705,96 @@ load_configuration(const char *filename, named_server_t *server, result = dns_dnsrps_connect(view->rpzs); if (result != ISC_R_SUCCESS) { view = NULL; - goto cleanup; + goto cleanup_altsecrets; } } #endif /* ifdef USE_DNSRPS */ - result = ISC_R_SUCCESS; + /* + * Record the time of most recent configuration + */ + tresult = isc_time_now(&named_g_configtime); + if (tresult != ISC_R_SUCCESS) { + named_main_earlyfatal("isc_time_now() failed: %s", + isc_result_totext(result)); + } -cleanup: + isc_task_endexclusive(server->task); + exclusive = false; + + /* Configure the statistics channel(s) */ + result = named_statschannels_configure(named_g_server, config, + named_g_aclconfctx); + if (result != ISC_R_SUCCESS) { + isc_log_write(named_g_lctx, NAMED_LOGCATEGORY_GENERAL, + NAMED_LOGMODULE_SERVER, ISC_LOG_ERROR, + "configuring statistics server(s): %s", + isc_result_totext(result)); + goto cleanup_altsecrets; + } + + /* + * Bind the control port(s). + */ + result = named_controls_configure(named_g_server->controls, config, + named_g_aclconfctx); + if (result != ISC_R_SUCCESS) { + isc_log_write(named_g_lctx, NAMED_LOGCATEGORY_GENERAL, + NAMED_LOGMODULE_SERVER, ISC_LOG_ERROR, + "binding control channel(s): %s", + isc_result_totext(result)); + goto cleanup_altsecrets; + } + + /* + * Rescan the interface list to pick up changes in the + * listen-on option. It's important that we do this before we try + * to configure the query source, since the dispatcher we use might + * be shared with an interface. + */ + result = ns_interfacemgr_scan(server->interfacemgr, true, true); + + /* + * Check that named is able to TCP listen on at least one + * interface. Otherwise, another named process could be running + * and we should fail. + */ + if (first_time && (result == ISC_R_ADDRINUSE)) { + isc_log_write(named_g_lctx, NAMED_LOGCATEGORY_GENERAL, + NAMED_LOGMODULE_SERVER, ISC_LOG_ERROR, + "unable to listen on any configured interfaces"); + result = ISC_R_FAILURE; + goto cleanup_altsecrets; + } + + /* + * These cleans up either the old production view list + * or our temporary list depending on whether they + * were swapped above or not. + */ +cleanup_altsecrets: + while ((altsecret = ISC_LIST_HEAD(altsecrets)) != NULL) { + ISC_LIST_UNLINK(altsecrets, altsecret, link); + isc_mem_put(server->sctx->mctx, altsecret, sizeof(*altsecret)); + } + +cleanup_logc: if (logc != NULL) { isc_logconfig_destroy(&logc); } - if (v4portset != NULL) { - isc_portset_destroy(named_g_mctx, &v4portset); - } - - if (v6portset != NULL) { - isc_portset_destroy(named_g_mctx, &v6portset); - } - - if (conf_parser != NULL) { - if (config != NULL) { - cfg_obj_destroy(conf_parser, &config); - } - cfg_parser_destroy(&conf_parser); - } - - if (bindkeys_parser != NULL) { - if (bindkeys != NULL) { - cfg_obj_destroy(bindkeys_parser, &bindkeys); - } - cfg_parser_destroy(&bindkeys_parser); - } - - if (view != NULL) { - dns_view_detach(&view); - } - - if (kasp != NULL) { - dns_kasp_detach(&kasp); +cleanup_cachelist: + while ((nsc = ISC_LIST_HEAD(cachelist)) != NULL) { + ISC_LIST_UNLINK(cachelist, nsc, link); + dns_cache_detach(&nsc->cache); + isc_mem_put(server->mctx, nsc, sizeof(*nsc)); } ISC_LIST_APPENDLIST(viewlist, builtin_viewlist, link); - /* - * This cleans up either the old production view list - * or our temporary list depending on whether they - * were swapped above or not. - */ - for (view = ISC_LIST_HEAD(viewlist); view != NULL; view = view_next) { +cleanup_viewlist: + for (dns_view_t *view = ISC_LIST_HEAD(viewlist); view != NULL; + view = view_next) { view_next = ISC_LIST_NEXT(view, link); ISC_LIST_UNLINK(viewlist, view, link); if (result == ISC_R_SUCCESS && strcmp(view->name, "_bind") != 0) @@ -9652,38 +9806,35 @@ cleanup: dns_view_detach(&view); } - /* - * Same cleanup for kasp list. - */ +cleanup_kasplist: for (kasp = ISC_LIST_HEAD(kasplist); kasp != NULL; kasp = kasp_next) { kasp_next = ISC_LIST_NEXT(kasp, link); ISC_LIST_UNLINK(kasplist, kasp, link); dns_kasp_detach(&kasp); } - /* Same cleanup for cache list. */ - while ((nsc = ISC_LIST_HEAD(cachelist)) != NULL) { - ISC_LIST_UNLINK(cachelist, nsc, link); - dns_cache_detach(&nsc->cache); - isc_mem_put(server->mctx, nsc, sizeof(*nsc)); +cleanup_v6portset: + isc_portset_destroy(named_g_mctx, &v6portset); + +cleanup_v4portset: + isc_portset_destroy(named_g_mctx, &v4portset); + +cleanup_bindkeys_parser: + + if (bindkeys_parser != NULL) { + if (bindkeys != NULL) { + cfg_obj_destroy(bindkeys_parser, &bindkeys); + } + cfg_parser_destroy(&bindkeys_parser); } - /* Cleanup for altsecrets list. */ - while ((altsecret = ISC_LIST_HEAD(altsecrets)) != NULL) { - ISC_LIST_UNLINK(altsecrets, altsecret, link); - isc_mem_put(server->sctx->mctx, altsecret, sizeof(*altsecret)); - } +cleanup_config: + cfg_obj_destroy(conf_parser, &config); - /* - * Record the time of most recent configuration - */ - tresult = isc_time_now(&named_g_configtime); - if (tresult != ISC_R_SUCCESS) { - named_main_earlyfatal("isc_time_now() failed: %s", - isc_result_totext(result)); - } +cleanup_conf_parser: + cfg_parser_destroy(&conf_parser); - /* Relinquish exclusive access to configuration data. */ +cleanup_exclusive: if (exclusive) { isc_task_endexclusive(server->task); } @@ -9754,7 +9905,7 @@ view_loaded(void *arg) { static isc_result_t load_zones(named_server_t *server, bool reconfig) { - isc_result_t result; + isc_result_t result = ISC_R_SUCCESS; ns_zoneload_t *zl = NULL; dns_view_t *view = NULL; @@ -9762,8 +9913,7 @@ load_zones(named_server_t *server, bool reconfig) { zl->server = server; zl->reconfig = reconfig; - result = isc_task_beginexclusive(server->task); - RUNTIME_CHECK(result == ISC_R_SUCCESS); + isc_task_beginexclusive(server->task); isc_refcount_init(&zl->refs, 1); @@ -9819,7 +9969,9 @@ static void run_server(isc_task_t *task, isc_event_t *event) { isc_result_t result; named_server_t *server = (named_server_t *)event->ev_arg; - dns_geoip_databases_t *geoip; + dns_geoip_databases_t *geoip = NULL; + + fprintf(stderr, "%s\n", __func__); INSIST(task == server->task); @@ -9838,8 +9990,8 @@ run_server(isc_task_t *task, isc_event_t *event) { #endif /* if defined(HAVE_GEOIP2) */ CHECKFATAL(ns_interfacemgr_create( - named_g_mctx, server->sctx, named_g_taskmgr, - named_g_loopmgr, named_g_netmgr, named_g_dispatchmgr, + named_g_mctx, server->sctx, named_g_loopmgr, + named_g_taskmgr, named_g_netmgr, named_g_dispatchmgr, server->task, geoip, true, &server->interfacemgr), "creating interface manager"); @@ -9886,6 +10038,15 @@ run_server(isc_task_t *task, isc_event_t *event) { #endif /* ifdef ENABLE_AFL */ } +static void +launch_server(void *arg) { + named_server_t *server = (named_server_t *)arg; + isc_event_t *event = isc_event_allocate(named_g_mctx, server->task, + NAMED_EVENT_RUN, run_server, + server, sizeof(*event)); + isc_task_send(server->task, &event); +} + void named_server_flushonshutdown(named_server_t *server, bool flush) { REQUIRE(NAMED_SERVER_VALID(server)); @@ -9895,31 +10056,32 @@ named_server_flushonshutdown(named_server_t *server, bool flush) { static void shutdown_server(isc_task_t *task, isc_event_t *event) { - isc_result_t result; dns_view_t *view, *view_next = NULL; dns_kasp_t *kasp, *kasp_next = NULL; named_server_t *server = (named_server_t *)event->ev_arg; bool flush = server->flushonshutdown; named_cache_t *nsc; - UNUSED(task); INSIST(task == server->task); + isc_event_free(&event); + /* * We need to shutdown the interface before going * exclusive (which would pause the netmgr). */ ns_interfacemgr_shutdown(server->interfacemgr); - result = isc_task_beginexclusive(server->task); - RUNTIME_CHECK(result == ISC_R_SUCCESS); + named_controls_shutdown(server->controls); + + named_statschannels_shutdown(server); + + isc_task_beginexclusive(server->task); isc_log_write(named_g_lctx, NAMED_LOGCATEGORY_GENERAL, NAMED_LOGMODULE_SERVER, ISC_LOG_INFO, "shutting down%s", flush ? ": flushing changes" : ""); - named_statschannels_shutdown(server); - named_controls_shutdown(server->controls); end_reserved_dispatches(server, true); cleanup_session_key(server, server->mctx); @@ -9985,8 +10147,23 @@ shutdown_server(isc_task_t *task, isc_event_t *event) { isc_task_endexclusive(server->task); isc_task_detach(&server->task); +} - isc_event_free(&event); +static void +close_server(void *arg) { + named_server_t *server = arg; + + /* + * Cleanup loopmgr resources directly, because shuttingdown the server + * happens on async task + */ + isc_signal_stop(server->sighup); + isc_signal_destroy(&server->sighup); + + isc_event_t *event = isc_event_allocate( + named_g_mctx, server->task, NAMED_EVENT_SHUTDOWN, + shutdown_server, server, sizeof(*event)); + isc_task_send(server->task, &event); } void @@ -10080,7 +10257,7 @@ named_server_create(isc_mem_t *mctx, named_server_t **serverp) { * startup and shutdown of the server, as well as all exclusive * tasks. */ - CHECKFATAL(isc_task_create(named_g_taskmgr, 0, &server->task, 0), + CHECKFATAL(isc_task_create(named_g_taskmgr, &server->task, 0), "creating server task"); isc_task_setname(server->task, "server", server); isc_taskmgr_setexcltask(named_g_taskmgr, server->task); @@ -10105,9 +10282,12 @@ named_server_create(isc_mem_t *mctx, named_server_t **serverp) { named_g_mainloop = isc_loop_main(named_g_loopmgr); - CHECKFATAL( - isc_app_onrun(named_g_mctx, server->task, run_server, server), - "isc_app_onrun"); + isc_loop_setup(named_g_mainloop, launch_server, server); + isc_loop_teardown(named_g_mainloop, close_server, server); + + /* Add SIGHUP reload handler */ + server->sighup = isc_signal_new( + named_g_loopmgr, named_server_reloadwanted, server, SIGHUP); server->interface_timer = NULL; server->heartbeat_timer = NULL; @@ -10268,7 +10448,7 @@ fatal(named_server_t *server, const char *msg, isc_result_t result) { * function and any other OpenSSL calls from other tasks * by requesting exclusive access to the task manager. */ - (void)isc_task_beginexclusive(server->task); + isc_task_beginexclusive(server->task); } isc_log_write(named_g_lctx, NAMED_LOGCATEGORY_GENERAL, NAMED_LOGMODULE_SERVER, ISC_LOG_CRITICAL, "%s: %s", msg, @@ -10411,7 +10591,6 @@ named_server_reload(isc_task_t *task, isc_event_t *event) { named_server_t *server = (named_server_t *)event->ev_sender; INSIST(task == server->task); - UNUSED(task); isc_log_write(named_g_lctx, NAMED_LOGCATEGORY_GENERAL, NAMED_LOGMODULE_SERVER, ISC_LOG_INFO, @@ -10422,7 +10601,11 @@ named_server_reload(isc_task_t *task, isc_event_t *event) { } void -named_server_reloadwanted(named_server_t *server) { +named_server_reloadwanted(void *arg, int signum) { + named_server_t *server = (named_server_t *)arg; + + REQUIRE(signum == SIGHUP); + isc_event_t *event = isc_event_allocate( named_g_mctx, server, NAMED_EVENT_RELOAD, named_server_reload, NULL, sizeof(isc_event_t)); @@ -11891,8 +12074,7 @@ named_server_validation(named_server_t *server, isc_lex_t *lex, /* Look for the view name. */ ptr = next_token(lex, text); - result = isc_task_beginexclusive(server->task); - RUNTIME_CHECK(result == ISC_R_SUCCESS); + isc_task_beginexclusive(server->task); for (view = ISC_LIST_HEAD(server->viewlist); view != NULL; view = ISC_LIST_NEXT(view, link)) { @@ -11952,8 +12134,7 @@ named_server_flushcache(named_server_t *server, isc_lex_t *lex) { /* Look for the view name. */ ptr = next_token(lex, NULL); - result = isc_task_beginexclusive(server->task); - RUNTIME_CHECK(result == ISC_R_SUCCESS); + isc_task_beginexclusive(server->task); flushed = true; found = false; @@ -12115,8 +12296,7 @@ named_server_flushnode(named_server_t *server, isc_lex_t *lex, bool tree) { /* Look for the view name. */ viewname = next_token(lex, NULL); - result = isc_task_beginexclusive(server->task); - RUNTIME_CHECK(result == ISC_R_SUCCESS); + isc_task_beginexclusive(server->task); flushed = true; found = false; for (view = ISC_LIST_HEAD(server->viewlist); view != NULL; @@ -12419,8 +12599,7 @@ named_server_tsigdelete(named_server_t *server, isc_lex_t *lex, viewname = next_token(lex, text); - result = isc_task_beginexclusive(server->task); - RUNTIME_CHECK(result == ISC_R_SUCCESS); + isc_task_beginexclusive(server->task); for (view = ISC_LIST_HEAD(server->viewlist); view != NULL; view = ISC_LIST_NEXT(view, link)) { @@ -12683,8 +12862,7 @@ named_server_sync(named_server_t *server, isc_lex_t *lex, isc_buffer_t **text) { } if (zone == NULL) { - result = isc_task_beginexclusive(server->task); - RUNTIME_CHECK(result == ISC_R_SUCCESS); + isc_task_beginexclusive(server->task); tresult = ISC_R_SUCCESS; for (view = ISC_LIST_HEAD(server->viewlist); view != NULL; view = ISC_LIST_NEXT(view, link)) @@ -12705,8 +12883,7 @@ named_server_sync(named_server_t *server, isc_lex_t *lex, isc_buffer_t **text) { return (tresult); } - result = isc_task_beginexclusive(server->task); - RUNTIME_CHECK(result == ISC_R_SUCCESS); + isc_task_beginexclusive(server->task); result = synczone(zone, &cleanup); isc_task_endexclusive(server->task); @@ -12754,8 +12931,7 @@ named_server_freeze(named_server_t *server, bool freeze, isc_lex_t *lex, return (result); } if (mayberaw == NULL) { - result = isc_task_beginexclusive(server->task); - RUNTIME_CHECK(result == ISC_R_SUCCESS); + isc_task_beginexclusive(server->task); tresult = ISC_R_SUCCESS; for (view = ISC_LIST_HEAD(server->viewlist); view != NULL; view = ISC_LIST_NEXT(view, link)) @@ -12791,8 +12967,7 @@ named_server_freeze(named_server_t *server, bool freeze, isc_lex_t *lex, return (DNS_R_NOTDYNAMIC); } - result = isc_task_beginexclusive(server->task); - RUNTIME_CHECK(result == ISC_R_SUCCESS); + isc_task_beginexclusive(server->task); frozen = dns_zone_getupdatedisabled(mayberaw); if (freeze) { if (frozen) { @@ -13732,8 +13907,7 @@ do_addzone(named_server_t *server, ns_cfgctx_t *cfg, dns_view_t *view, goto cleanup; } - result = isc_task_beginexclusive(server->task); - RUNTIME_CHECK(result == ISC_R_SUCCESS); + isc_task_beginexclusive(server->task); #ifndef HAVE_LMDB /* @@ -13887,7 +14061,6 @@ do_modzone(named_server_t *server, ns_cfgctx_t *cfg, dns_view_t *view, isc_result_t result, tresult; dns_zone_t *zone = NULL; bool added; - bool exclusive = false; #ifndef HAVE_LMDB FILE *fp = NULL; cfg_obj_t *z; @@ -13923,9 +14096,7 @@ do_modzone(named_server_t *server, ns_cfgctx_t *cfg, dns_view_t *view, } #endif /* ifndef HAVE_LMDB */ - result = isc_task_beginexclusive(server->task); - RUNTIME_CHECK(result == ISC_R_SUCCESS); - exclusive = true; + isc_task_beginexclusive(server->task); #ifndef HAVE_LMDB /* Make sure we can open the configuration save file */ @@ -13935,6 +14106,7 @@ do_modzone(named_server_t *server, ns_cfgctx_t *cfg, dns_view_t *view, TCHECK(putstr(text, view->new_zone_file)); TCHECK(putstr(text, "': ")); TCHECK(putstr(text, isc_result_totext(result))); + isc_task_endexclusive(server->task); goto cleanup; } (void)isc_stdio_close(fp); @@ -13949,6 +14121,7 @@ do_modzone(named_server_t *server, ns_cfgctx_t *cfg, dns_view_t *view, TCHECK(putstr(text, view->new_zone_db)); TCHECK(putstr(text, "'")); result = ISC_R_FAILURE; + isc_task_endexclusive(server->task); goto cleanup; } #endif /* HAVE_LMDB */ @@ -13960,7 +14133,6 @@ do_modzone(named_server_t *server, ns_cfgctx_t *cfg, dns_view_t *view, true, false, true); dns_view_freeze(view); - exclusive = false; isc_task_endexclusive(server->task); if (result != ISC_R_SUCCESS) { @@ -14081,9 +14253,6 @@ do_modzone(named_server_t *server, ns_cfgctx_t *cfg, dns_view_t *view, } cleanup: - if (exclusive) { - isc_task_endexclusive(server->task); - } #ifndef HAVE_LMDB if (fp != NULL) { @@ -15596,7 +15765,7 @@ named_server_nta(named_server_t *server, isc_lex_t *lex, bool readonly, const dns_name_t *ntaname; dns_name_t *fname; dns_ttl_t ntattl; - bool ttlset = false, excl = false, viewfound = false; + bool ttlset = false, viewfound = false; dns_rdataclass_t rdclass = dns_rdataclass_in; bool first = true; @@ -15740,9 +15909,7 @@ named_server_nta(named_server_t *server, isc_lex_t *lex, bool readonly, isc_stdtime_get(&now); - result = isc_task_beginexclusive(server->task); - RUNTIME_CHECK(result == ISC_R_SUCCESS); - excl = true; + isc_task_beginexclusive(server->task); for (view = ISC_LIST_HEAD(server->viewlist); view != NULL; view = ISC_LIST_NEXT(view, link)) { @@ -15812,7 +15979,7 @@ named_server_nta(named_server_t *server, isc_lex_t *lex, bool readonly, } else if (result == ISC_R_NOTFOUND) { wasremoved = false; } else { - goto cleanup; + goto cleanup_exclusive; } if (!first) { @@ -15848,20 +16015,21 @@ named_server_nta(named_server_t *server, isc_lex_t *lex, bool readonly, if (!viewfound) { msg = "No such view"; - CHECK(ISC_R_NOTFOUND); + result = ISC_R_NOTFOUND; + } else { + (void)putnull(text); } - (void)putnull(text); +cleanup_exclusive: + isc_task_endexclusive(server->task); cleanup: + if (msg != NULL) { (void)putstr(text, msg); (void)putnull(text); } - if (excl) { - isc_task_endexclusive(server->task); - } if (ntatable != NULL) { dns_ntatable_detach(&ntatable); } @@ -15931,7 +16099,6 @@ static isc_result_t mkey_destroy(named_server_t *server, dns_view_t *view, isc_buffer_t **text) { isc_result_t result; char msg[DNS_NAME_FORMATSIZE + 500] = ""; - bool exclusive = false; const char *file = NULL; dns_db_t *dbp = NULL; dns_zone_t *mkzone = NULL; @@ -15945,9 +16112,7 @@ mkey_destroy(named_server_t *server, dns_view_t *view, isc_buffer_t **text) { view->name); CHECK(putstr(text, msg)); - result = isc_task_beginexclusive(server->task); - RUNTIME_CHECK(result == ISC_R_SUCCESS); - exclusive = true; + isc_task_beginexclusive(server->task); /* Remove and clean up managed keys zone from view */ mkzone = view->managed_keys; @@ -15992,9 +16157,7 @@ mkey_destroy(named_server_t *server, dns_view_t *view, isc_buffer_t **text) { result = ISC_R_SUCCESS; cleanup: - if (exclusive) { - isc_task_endexclusive(server->task); - } + isc_task_endexclusive(server->task); return (result); } @@ -16390,8 +16553,7 @@ named_server_tcptimeouts(isc_lex_t *lex, isc_buffer_t **text) { CHECK(ISC_R_RANGE); } - result = isc_task_beginexclusive(named_g_server->task); - RUNTIME_CHECK(result == ISC_R_SUCCESS); + isc_task_beginexclusive(named_g_server->task); isc_nm_settimeouts(named_g_netmgr, initial, idle, keepalive, advertised); @@ -16429,7 +16591,6 @@ named_server_servestale(named_server_t *server, isc_lex_t *lex, dns_stale_answer_t staleanswersok = dns_stale_answer_conf; bool wantstatus = false; isc_result_t result = ISC_R_SUCCESS; - bool exclusive = false; REQUIRE(text != NULL); @@ -16487,9 +16648,7 @@ named_server_servestale(named_server_t *server, isc_lex_t *lex, } } - result = isc_task_beginexclusive(server->task); - RUNTIME_CHECK(result == ISC_R_SUCCESS); - exclusive = true; + isc_task_beginexclusive(server->task); for (view = ISC_LIST_HEAD(server->viewlist); view != NULL; view = ISC_LIST_NEXT(view, link)) @@ -16525,41 +16684,46 @@ named_server_servestale(named_server_t *server, isc_lex_t *lex, switch (view->staleanswersok) { case dns_stale_answer_yes: if (stale_ttl > 0) { - CHECK(putstr(text, "stale cache enabled; stale " + CHECK(putstr(text, "stale cache " + "enabled; stale " "answers enabled")); } else { - CHECK(putstr(text, - "stale cache disabled; stale " - "answers unavailable")); + CHECK(putstr(text, "stale cache disabled; " + "stale " + "answers unavailable")); } break; case dns_stale_answer_no: if (stale_ttl > 0) { - CHECK(putstr(text, "stale cache enabled; stale " + CHECK(putstr(text, "stale cache " + "enabled; stale " "answers disabled")); } else { - CHECK(putstr(text, - "stale cache disabled; stale " - "answers unavailable")); + CHECK(putstr(text, "stale cache disabled; " + "stale " + "answers unavailable")); } break; case dns_stale_answer_conf: if (view->staleanswersenable && stale_ttl > 0) { - CHECK(putstr(text, "stale cache enabled; stale " + CHECK(putstr(text, "stale cache " + "enabled; stale " "answers enabled")); } else if (stale_ttl > 0) { - CHECK(putstr(text, "stale cache enabled; stale " + CHECK(putstr(text, "stale cache " + "enabled; stale " "answers disabled")); } else { - CHECK(putstr(text, - "stale cache disabled; stale " - "answers unavailable")); + CHECK(putstr(text, "stale cache disabled; " + "stale " + "answers unavailable")); } break; } if (stale_ttl > 0) { snprintf(msg, sizeof(msg), - " (stale-answer-ttl=%u max-stale-ttl=%u " + " (stale-answer-ttl=%u " + "max-stale-ttl=%u " "stale-refresh-time=%u)", view->staleanswerttl, stale_ttl, stale_refresh); @@ -16573,9 +16737,7 @@ named_server_servestale(named_server_t *server, isc_lex_t *lex, } cleanup: - if (exclusive) { - isc_task_endexclusive(named_g_server->task); - } + isc_task_endexclusive(named_g_server->task); if (isc_buffer_usedlength(*text) > 0) { (void)putnull(text); diff --git a/bin/nsupdate/nsupdate.c b/bin/nsupdate/nsupdate.c index fe3a7d1ce8..625fa0925c 100644 --- a/bin/nsupdate/nsupdate.c +++ b/bin/nsupdate/nsupdate.c @@ -21,7 +21,6 @@ #include #include -#include #include #include #include @@ -29,8 +28,10 @@ #include #include #include +#include #include #include +#include #include #include #include @@ -125,10 +126,9 @@ static bool use_win2k_gsstsig = false; static bool tried_other_gsstsig = false; static bool local_only = false; static isc_nm_t *netmgr = NULL; -static isc_loopmgr_t *loopmgr = NULL; static isc_taskmgr_t *taskmgr = NULL; +static isc_loopmgr_t *loopmgr = NULL; static isc_task_t *global_task = NULL; -static isc_event_t *global_event = NULL; static isc_log_t *glctx = NULL; static isc_mem_t *gmctx = NULL; static dns_dispatchmgr_t *dispatchmgr = NULL; @@ -177,6 +177,8 @@ static bool default_ttl_set = false; static bool checknames = true; static const char *resolvconf = RESOLV_CONF; +bool done = false; + typedef struct nsu_requestinfo { dns_message_t *msg; isc_sockaddr_t *addr; @@ -188,6 +190,9 @@ sendrequest(isc_sockaddr_t *destaddr, dns_message_t *msg, static void send_update(dns_name_t *zonename, isc_sockaddr_t *primary); +static void +getinput(void *arg); + noreturn static void fatal(const char *format, ...) ISC_FORMAT_PRINTF(1, 2); @@ -682,8 +687,6 @@ setup_keyfile(isc_mem_t *mctx, isc_log_t *lctx) { static void doshutdown(void) { - isc_task_detach(&global_task); - /* * The isc_mem_put of primary_servers must be before the * isc_mem_put of servers as it sets the servers pointer @@ -753,7 +756,9 @@ maybeshutdown(void) { } static void -shutdown_program(void) { +shutdown_program(void *arg) { + UNUSED(arg); + ddebug("shutdown_program()"); shuttingdown = true; @@ -906,13 +911,10 @@ setup_system(void) { irs_resconf_destroy(&resconf); - result = isc_managers_create(gmctx, 1, 0, &loopmgr, &netmgr, &taskmgr); - check_result(result, "isc_managers_create"); - result = dns_dispatchmgr_create(gmctx, netmgr, &dispatchmgr); check_result(result, "dns_dispatchmgr_create"); - result = isc_task_create(taskmgr, 0, &global_task, 0); + result = isc_task_create(taskmgr, &global_task, 0); check_result(result, "isc_task_create"); result = dst_lib_init(gmctx, NULL); @@ -960,9 +962,9 @@ get_addresses(char *host, in_port_t port, isc_sockaddr_t *sockaddr, int count = 0; isc_result_t result; - isc_app_block(); + isc_loopmgr_blocking(loopmgr); result = bind9_getaddresses(host, port, sockaddr, naddrs, &count); - isc_app_unblock(); + isc_loopmgr_nonblocking(loopmgr); if (result != ISC_R_SUCCESS) { error("couldn't get address for '%s': %s", host, isc_result_totext(result)); @@ -2239,7 +2241,6 @@ get_next_command(void) { char cmdlinebuf[MAXCMD]; char *cmdline = NULL, *ptr = NULL; - isc_app_block(); if (interactive) { cmdline = ptr = readline("> "); if (ptr != NULL && *ptr != 0) { @@ -2248,7 +2249,6 @@ get_next_command(void) { } else { cmdline = fgets(cmdlinebuf, MAXCMD, input); } - isc_app_unblock(); if (cmdline != NULL) { char *tmp = cmdline; @@ -2286,9 +2286,9 @@ user_interaction(void) { static void done_update(void) { - isc_event_t *event = global_event; ddebug("done_update()"); - isc_task_send(global_task, &event); + + isc_job_run(loopmgr, getinput, NULL); } static void @@ -3277,9 +3277,6 @@ cleanup(void) { } UNLOCK(&answer_lock); - ddebug("Shutting down managers"); - isc_managers_destroy(&loopmgr, &netmgr, &taskmgr); - #if HAVE_GSSAPI if (tsigkey != NULL) { ddebug("detach tsigkey x%p", tsigkey); @@ -3295,9 +3292,6 @@ cleanup(void) { dst_key_free(&sig0key); } - ddebug("Destroying event"); - isc_event_free(&global_event); - #ifdef HAVE_GSSAPI /* * Cleanup GSSAPI resources after taskmgr has been destroyed. @@ -3325,7 +3319,6 @@ cleanup(void) { if (memdebugging) { isc_mem_stats(gmctx, stderr); } - isc_mem_destroy(&gmctx); isc_mutex_destroy(&answer_lock); @@ -3334,44 +3327,44 @@ cleanup(void) { dst_lib_destroy(); is_dst_up = false; } + + ddebug("Shutting down managers"); + isc_managers_destroy(&gmctx, &loopmgr, &netmgr, &taskmgr); } static void -getinput(isc_task_t *task, isc_event_t *event) { +getinput(void *arg) { bool more; - UNUSED(task); + UNUSED(arg); if (shuttingdown) { maybeshutdown(); return; } - if (global_event == NULL) { - global_event = event; - } - reset_system(); + isc_loopmgr_blocking(loopmgr); more = user_interaction(); + isc_loopmgr_nonblocking(loopmgr); if (!more) { - isc_app_shutdown(); + isc_task_detach(&global_task); + isc_loopmgr_shutdown(loopmgr); return; } + + done = false; start_update(); - return; } int main(int argc, char **argv) { - isc_result_t result; style = &dns_master_style_debug; input = stdin; interactive = isatty(0); - isc_app_start(); - if (isc_net_probeipv4() == ISC_R_SUCCESS) { have_ipv4 = true; } @@ -3384,23 +3377,18 @@ main(int argc, char **argv) { pre_parse_args(argc, argv); - isc_mem_create(&gmctx); + isc_managers_create(&gmctx, 1, &loopmgr, &netmgr, &taskmgr); parse_args(argc, argv); setup_system(); - result = isc_app_onrun(gmctx, global_task, getinput, NULL); - check_result(result, "isc_app_onrun"); - - (void)isc_app_run(); - - shutdown_program(); + isc_loopmgr_setup(loopmgr, getinput, NULL); + isc_loopmgr_teardown(loopmgr, shutdown_program, NULL); + isc_loopmgr_run(loopmgr); cleanup(); - isc_app_finish(); - if (seenerror) { return (2); } else { diff --git a/bin/rndc/rndc.c b/bin/rndc/rndc.c index 50de03175b..cabeb91e05 100644 --- a/bin/rndc/rndc.c +++ b/bin/rndc/rndc.c @@ -17,14 +17,13 @@ #include #include -#include #include #include #include #include -#include #include #include +#include #include #include #include @@ -61,8 +60,8 @@ const char *progname = NULL; bool verbose; static isc_nm_t *netmgr = NULL; -static isc_loopmgr_t *loopmgr = NULL; static isc_taskmgr_t *taskmgr = NULL; +static isc_loopmgr_t *loopmgr = NULL; static isc_task_t *rndc_task = NULL; static const char *admin_conffile = NULL; @@ -309,7 +308,8 @@ rndc_senddone(isc_nmhandle_t *handle, isc_result_t result, void *arg) { atomic_load_acquire(&recvs) == 0) { shuttingdown = true; - isc_app_shutdown(); + isc_task_detach(&rndc_task); + isc_loopmgr_shutdown(loopmgr); } } @@ -394,7 +394,8 @@ rndc_recvdone(isc_nmhandle_t *handle, isc_result_t result, void *arg) { atomic_fetch_sub_release(&recvs, 1) == 1) { shuttingdown = true; - isc_app_shutdown(); + isc_task_detach(&rndc_task); + isc_loopmgr_shutdown(loopmgr); } } @@ -603,10 +604,8 @@ rndc_startconnect(isc_sockaddr_t *addr) { } static void -rndc_start(isc_task_t *task, isc_event_t *event) { - isc_event_free(&event); - - UNUSED(task); +rndc_start(void *arg) { + UNUSED(arg); currentaddr = 0; rndc_startconnect(&serveraddrs[currentaddr]); @@ -916,11 +915,6 @@ main(int argc, char **argv) { isc_sockaddr_any(&local4); isc_sockaddr_any6(&local6); - result = isc_app_start(); - if (result != ISC_R_SUCCESS) { - fatal("isc_app_start() failed: %s", isc_result_totext(result)); - } - isc_commandline_errprint = false; preparse_args(argc, argv); @@ -1028,9 +1022,10 @@ main(int argc, char **argv) { serial = isc_random32(); - isc_mem_create(&rndc_mctx); - isc_managers_create(rndc_mctx, 1, 0, &loopmgr, &netmgr, &taskmgr); - DO("create task", isc_task_create(taskmgr, 0, &rndc_task, 0)); + isc_managers_create(&rndc_mctx, 1, &loopmgr, &netmgr, &taskmgr); + isc_loopmgr_setup(loopmgr, rndc_start, rndc_task); + + DO("create task", isc_task_create(taskmgr, &rndc_task, 0)); isc_log_create(rndc_mctx, &log, &logconfig); isc_log_setcontext(log); isc_log_settag(logconfig, progname); @@ -1076,15 +1071,7 @@ main(int argc, char **argv) { get_addresses(servername, (in_port_t)remoteport); } - DO("post event", isc_app_onrun(rndc_mctx, rndc_task, rndc_start, NULL)); - - result = isc_app_run(); - if (result != ISC_R_SUCCESS) { - fatal("isc_app_run() failed: %s", isc_result_totext(result)); - } - - isc_task_detach(&rndc_task); - isc_managers_destroy(&loopmgr, &netmgr, &taskmgr); + isc_loopmgr_run(loopmgr); /* * Note: when TCP connections are shut down, there will be a final @@ -1108,7 +1095,7 @@ main(int argc, char **argv) { isc_mem_stats(rndc_mctx, stderr); } - isc_mem_destroy(&rndc_mctx); + isc_managers_destroy(&rndc_mctx, &loopmgr, &netmgr, &taskmgr); if (failed) { return (1); diff --git a/bin/tests/system/dyndb/driver/Makefile.am b/bin/tests/system/dyndb/driver/Makefile.am index f8ceded031..e41a30a860 100644 --- a/bin/tests/system/dyndb/driver/Makefile.am +++ b/bin/tests/system/dyndb/driver/Makefile.am @@ -10,13 +10,11 @@ sample_la_SOURCES = \ db.c \ driver.c \ instance.c \ - lock.c \ log.c \ syncptr.c \ zone.c \ db.h \ instance.h \ - lock.h \ log.h \ syncptr.h \ util.h \ diff --git a/bin/tests/system/dyndb/driver/lock.c b/bin/tests/system/dyndb/driver/lock.c deleted file mode 100644 index 5d73871f2c..0000000000 --- a/bin/tests/system/dyndb/driver/lock.c +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright (C) Internet Systems Consortium, Inc. ("ISC") - * - * SPDX-License-Identifier: MPL-2.0 AND ISC - * - * This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at https://mozilla.org/MPL/2.0/. - * - * See the COPYRIGHT file distributed with this work for additional - * information regarding copyright ownership. - */ - -/* - * Copyright (C) Red Hat - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND AUTHORS DISCLAIMS ALL WARRANTIES WITH - * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY - * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, - * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM - * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE - * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR - * PERFORMANCE OF THIS SOFTWARE. - */ - -#include "lock.h" - -#include -#include - -/* - * Lock BIND dispatcher and allow only single task to run. - * - * @warning - * All calls to isc_task_beginexclusive() have to operate on the same task - * otherwise it would not be possible to distinguish recursive locking - * from real conflict on the dispatcher lock. - * For this reason this wrapper function always works with inst->task. - * As a result, this function have to be be called only from inst->task. - * - * Recursive locking is allowed. Auxiliary variable pointed to by "statep" - * stores information if last run_exclusive_enter() operation really locked - * something or if the lock was called recursively and was no-op. - * - * The pair (inst, state) used for run_exclusive_enter() has to be - * used for run_exclusive_exit(). - * - * @param[in] inst The instance with the only task which is allowed to - * run. - * @param[in,out] statep Lock state: ISC_R_SUCCESS or ISC_R_LOCKBUSY - */ -void -run_exclusive_enter(sample_instance_t *inst, isc_result_t *statep) { - REQUIRE(statep != NULL); - REQUIRE(*statep == ISC_R_IGNORE); - - *statep = isc_task_beginexclusive(inst->task); - RUNTIME_CHECK(*statep == ISC_R_SUCCESS || *statep == ISC_R_LOCKBUSY); -} - -/* - * Exit task-exclusive mode. - * - * @param[in] inst The instance used for previous run_exclusive_enter() call. - * @param[in] state Lock state as returned by run_exclusive_enter(). - */ -void -run_exclusive_exit(sample_instance_t *inst, isc_result_t state) { - if (state == ISC_R_SUCCESS) { - isc_task_endexclusive(inst->task); - } else { - /* Unlocking recursive lock or the lock was never locked. */ - INSIST(state == ISC_R_LOCKBUSY || state == ISC_R_IGNORE); - } - - return; -} diff --git a/bin/tests/system/dyndb/driver/lock.h b/bin/tests/system/dyndb/driver/lock.h deleted file mode 100644 index 9f2ed9ed7e..0000000000 --- a/bin/tests/system/dyndb/driver/lock.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (C) Internet Systems Consortium, Inc. ("ISC") - * - * SPDX-License-Identifier: MPL-2.0 AND ISC - * - * This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at https://mozilla.org/MPL/2.0/. - * - * See the COPYRIGHT file distributed with this work for additional - * information regarding copyright ownership. - */ - -/* - * Copyright (C) Red Hat - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND AUTHORS DISCLAIMS ALL WARRANTIES WITH - * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY - * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, - * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM - * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE - * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR - * PERFORMANCE OF THIS SOFTWARE. - */ - -#pragma once - -#include "instance.h" -#include "util.h" - -void -run_exclusive_enter(sample_instance_t *inst, isc_result_t *statep); - -void -run_exclusive_exit(sample_instance_t *inst, isc_result_t state); diff --git a/bin/tests/system/dyndb/driver/zone.c b/bin/tests/system/dyndb/driver/zone.c index bae2d60ef7..b4a881c86f 100644 --- a/bin/tests/system/dyndb/driver/zone.c +++ b/bin/tests/system/dyndb/driver/zone.c @@ -35,6 +35,7 @@ #include #include +#include #include #include @@ -42,7 +43,6 @@ #include #include "instance.h" -#include "lock.h" #include "log.h" #include "util.h" @@ -134,7 +134,6 @@ publish_zone(sample_instance_t *inst, dns_zone_t *zone) { bool freeze = false; dns_zone_t *zone_in_view = NULL; dns_view_t *view_in_zone = NULL; - isc_result_t lock_state = ISC_R_IGNORE; REQUIRE(inst != NULL); REQUIRE(zone != NULL); @@ -172,7 +171,7 @@ publish_zone(sample_instance_t *inst, dns_zone_t *zone) { CLEANUP_WITH(ISC_R_UNEXPECTED); } - run_exclusive_enter(inst, &lock_state); + isc_task_beginexclusive(inst->task); if (inst->view->frozen) { freeze = true; dns_view_thaw(inst->view); @@ -194,7 +193,7 @@ cleanup: if (freeze) { dns_view_freeze(inst->view); } - run_exclusive_exit(inst, lock_state); + isc_task_endexclusive(inst->task); return (result); } diff --git a/bin/tests/system/pipelined/pipequeries.c b/bin/tests/system/pipelined/pipequeries.c index 5693846387..e2f31df368 100644 --- a/bin/tests/system/pipelined/pipequeries.c +++ b/bin/tests/system/pipelined/pipequeries.c @@ -17,11 +17,11 @@ #include #include -#include #include #include #include #include +#include #include #include #include @@ -61,6 +61,7 @@ static isc_mem_t *mctx = NULL; static dns_requestmgr_t *requestmgr = NULL; +static isc_loopmgr_t *loopmgr = NULL; static bool have_src = false; static isc_sockaddr_t srcaddr; static isc_sockaddr_t dstaddr; @@ -119,7 +120,8 @@ recvresponse(isc_task_t *task, isc_event_t *event) { isc_event_free(&event); if (--onfly == 0) { - isc_app_shutdown(); + isc_task_detach(&task); + isc_loopmgr_shutdown(loopmgr); } return; } @@ -177,17 +179,17 @@ sendquery(isc_task_t *task) { } static void -sendqueries(isc_task_t *task, isc_event_t *event) { +sendqueries(void *arg) { + isc_task_t *task = (isc_task_t *)arg; isc_result_t result; - isc_event_free(&event); - do { result = sendquery(task); } while (result == ISC_R_SUCCESS); if (onfly == 0) { - isc_app_shutdown(); + isc_task_detach(&task); + isc_loopmgr_shutdown(loopmgr); } return; } @@ -208,8 +210,6 @@ main(int argc, char *argv[]) { uint16_t port = PORT; int c; - RUNCHECK(isc_app_start()); - isc_commandline_errprint = false; while ((c = isc_commandline_parse(argc, argv, "p:r:")) != -1) { switch (c) { @@ -256,14 +256,13 @@ main(int argc, char *argv[]) { } isc_sockaddr_fromin(&dstaddr, &inaddr, port); - isc_mem_create(&mctx); + isc_managers_create(&mctx, 1, &loopmgr, &netmgr, &taskmgr); isc_log_create(mctx, &lctx, &lcfg); RUNCHECK(dst_lib_init(mctx, NULL)); - isc_managers_create(mctx, 1, 0, &netmgr, &taskmgr, NULL); - RUNCHECK(isc_task_create(taskmgr, 0, &task, 0)); + RUNCHECK(isc_task_create(taskmgr, &task, 0)); RUNCHECK(dns_dispatchmgr_create(mctx, netmgr, &dispatchmgr)); RUNCHECK(dns_dispatch_createudp( @@ -272,9 +271,9 @@ main(int argc, char *argv[]) { NULL, &requestmgr)); RUNCHECK(dns_view_create(mctx, 0, "_test", &view)); - RUNCHECK(isc_app_onrun(mctx, task, sendqueries, NULL)); - (void)isc_app_run(); + isc_loopmgr_setup(loopmgr, sendqueries, task); + isc_loopmgr_run(loopmgr); dns_view_detach(&view); @@ -284,17 +283,11 @@ main(int argc, char *argv[]) { dns_dispatch_detach(&dispatchv4); dns_dispatchmgr_detach(&dispatchmgr); - isc_task_detach(&task); - - isc_managers_destroy(&netmgr, &taskmgr, NULL); - dst_lib_destroy(); isc_log_destroy(&lctx); - isc_mem_destroy(&mctx); - - isc_app_finish(); + isc_managers_destroy(&mctx, &loopmgr, &netmgr, &taskmgr); return (0); } diff --git a/bin/tests/system/resolve.c b/bin/tests/system/resolve.c index 7d489df7ca..be4eecf53b 100644 --- a/bin/tests/system/resolve.c +++ b/bin/tests/system/resolve.c @@ -22,11 +22,11 @@ #include #include -#include #include #include #include #include +#include #include #include #include @@ -56,24 +56,9 @@ */ isc_mem_t *ctxs_mctx = NULL; +isc_loopmgr_t *ctxs_loopmgr = NULL; isc_nm_t *ctxs_netmgr = NULL; isc_taskmgr_t *ctxs_taskmgr = NULL; -isc_timermgr_t *ctxs_timermgr = NULL; - -static void -ctxs_destroy(void) { - isc_managers_destroy(&ctxs_netmgr, &ctxs_taskmgr, &ctxs_timermgr); - - isc_mem_destroy(&ctxs_mctx); -} - -static void -ctxs_init(void) { - isc_mem_create(&ctxs_mctx); - - isc_managers_create(ctxs_mctx, 1, 0, &ctxs_netmgr, &ctxs_taskmgr, - &ctxs_timermgr); -} static char *algname = NULL; @@ -226,7 +211,7 @@ addserver(dns_client_t *client, const char *addrstr, const char *port, result = dns_name_fromtext(name, &b, dns_rootname, 0, NULL); if (result != ISC_R_SUCCESS) { fprintf(stderr, "failed to convert qname: %u\n", - result); + (unsigned int)result); exit(1); } } @@ -234,11 +219,67 @@ addserver(dns_client_t *client, const char *addrstr, const char *port, result = dns_client_setservers(client, dns_rdataclass_in, name, &servers); if (result != ISC_R_SUCCESS) { - fprintf(stderr, "set server failed: %u\n", result); + fprintf(stderr, "set server failed: %u\n", + (unsigned int)result); exit(1); } } +static dns_name_t *qname = NULL; +static unsigned int resopt = 0; +static dns_rdatatype_t type = dns_rdatatype_a; + +static void +resolve_cb(dns_client_t *client, const dns_name_t *query_name, + dns_namelist_t *namelist, isc_result_t result) { + UNUSED(query_name); + + if (result != ISC_R_SUCCESS) { + fprintf(stderr, "resolution failed: %s\n", + isc_result_totext(result)); + goto cleanup; + } + + for (dns_name_t *name = ISC_LIST_HEAD(*namelist); name != NULL; + name = ISC_LIST_NEXT(name, link)) + { + for (dns_rdataset_t *rdataset = ISC_LIST_HEAD(name->list); + rdataset != NULL; rdataset = ISC_LIST_NEXT(rdataset, link)) + { + if (printdata(rdataset, name) != ISC_R_SUCCESS) { + fprintf(stderr, "print data failed\n"); + } + } + } + +cleanup: + dns_client_freeresanswer(client, namelist); + + dns_client_detach(&client); + + isc_mem_put(ctxs_mctx, namelist, sizeof(*namelist)); + + isc_loopmgr_shutdown(ctxs_loopmgr); +} + +static void +resolve(void *arg) { + dns_client_t *client = (void *)arg; + dns_namelist_t *namelist = isc_mem_get(ctxs_mctx, sizeof(*namelist)); + isc_result_t result; + + ISC_LIST_INIT(*namelist); + result = dns_client_resolve(client, qname, dns_rdataclass_in, type, + resopt, namelist, resolve_cb); + + if (result != ISC_R_SUCCESS) { + fprintf(stderr, "resolution failed: %s\n", + isc_result_totext(result)); + isc_mem_put(ctxs_mctx, namelist, sizeof(*namelist)); + isc_loopmgr_shutdown(ctxs_loopmgr); + } +} + int main(int argc, char *argv[]) { int ch; @@ -254,11 +295,7 @@ main(int argc, char *argv[]) { isc_buffer_t b; dns_fixedname_t qname0; unsigned int namelen; - dns_name_t *qname = NULL, *name = NULL; - dns_rdatatype_t type = dns_rdatatype_a; - dns_rdataset_t *rdataset = NULL; - dns_namelist_t namelist; - unsigned int clientopt, resopt = 0; + unsigned int clientopt; bool is_sep = false; const char *port = "53"; struct in_addr in4; @@ -366,21 +403,23 @@ main(int argc, char *argv[]) { altserveraddr = cp + 1; } - ctxs_init(); + isc_managers_create(&ctxs_mctx, 1, &ctxs_loopmgr, &ctxs_netmgr, + &ctxs_taskmgr); result = dst_lib_init(ctxs_mctx, NULL); if (result != ISC_R_SUCCESS) { - fprintf(stderr, "dst_lib_init failed: %u\n", result); + fprintf(stderr, "dst_lib_init failed: %u\n", + (unsigned int)result); exit(1); } clientopt = 0; - result = dns_client_create(ctxs_mctx, ctxs_taskmgr, ctxs_netmgr, - ctxs_timermgr, clientopt, &client, addr4, + result = dns_client_create(ctxs_mctx, ctxs_loopmgr, ctxs_taskmgr, + ctxs_netmgr, clientopt, &client, addr4, addr6); if (result != ISC_R_SUCCESS) { - fprintf(stderr, "dns_client_create failed: %u, %s\n", result, - isc_result_totext(result)); + fprintf(stderr, "dns_client_create failed: %u, %s\n", + (unsigned int)result, isc_result_totext(result)); exit(1); } @@ -393,7 +432,7 @@ main(int argc, char *argv[]) { &resconf); if (result != ISC_R_SUCCESS && result != ISC_R_FILENOTFOUND) { fprintf(stderr, "irs_resconf_load failed: %u\n", - result); + (unsigned int)result); exit(1); } nameservers = irs_resconf_getnameservers(resconf); @@ -402,7 +441,7 @@ main(int argc, char *argv[]) { if (result != ISC_R_SUCCESS) { irs_resconf_destroy(&resconf); fprintf(stderr, "dns_client_setservers failed: %u\n", - result); + (unsigned int)result); exit(1); } irs_resconf_destroy(&resconf); @@ -432,39 +471,24 @@ main(int argc, char *argv[]) { qname = dns_fixedname_initname(&qname0); result = dns_name_fromtext(qname, &b, dns_rootname, 0, NULL); if (result != ISC_R_SUCCESS) { - fprintf(stderr, "failed to convert qname: %u\n", result); + fprintf(stderr, "failed to convert qname: %u\n", + (unsigned int)result); + exit(1); } /* Perform resolution */ if (keynamestr == NULL) { resopt |= DNS_CLIENTRESOPT_NODNSSEC; } - ISC_LIST_INIT(namelist); - result = dns_client_resolve(client, qname, dns_rdataclass_in, type, - resopt, &namelist); - if (result != ISC_R_SUCCESS) { - fprintf(stderr, "resolution failed: %s\n", - isc_result_totext(result)); - } - for (name = ISC_LIST_HEAD(namelist); name != NULL; - name = ISC_LIST_NEXT(name, link)) - { - for (rdataset = ISC_LIST_HEAD(name->list); rdataset != NULL; - rdataset = ISC_LIST_NEXT(rdataset, link)) - { - if (printdata(rdataset, name) != ISC_R_SUCCESS) { - fprintf(stderr, "print data failed\n"); - } - } - } - dns_client_freeresanswer(client, &namelist); + isc_loopmgr_setup(ctxs_loopmgr, resolve, client); - /* Cleanup */ - dns_client_detach(&client); + isc_loopmgr_run(ctxs_loopmgr); - ctxs_destroy(); dst_lib_destroy(); + isc_managers_destroy(&ctxs_mctx, &ctxs_loopmgr, &ctxs_netmgr, + &ctxs_taskmgr); + return (0); } diff --git a/bin/tests/system/statistics/tests.sh b/bin/tests/system/statistics/tests.sh index d21b4622b7..ce95ceccbe 100644 --- a/bin/tests/system/statistics/tests.sh +++ b/bin/tests/system/statistics/tests.sh @@ -217,7 +217,7 @@ if $FEATURETEST --have-libxml2 && "${CURL}" --http1.1 http://10.53.0.3:${EXTRAPO # grep "

Glue cache statistics

" xsltproc.out.${n} >/dev/null || ret=1 grep "

View _default" xsltproc.out.${n} >/dev/null || ret=1 grep "

Zone example" xsltproc.out.${n} >/dev/null || ret=1 - grep "

Task Manager Configuration

" xsltproc.out.${n} >/dev/null || ret=1 + # grep "

Task Manager Configuration

" xsltproc.out.${n} >/dev/null || ret=1 grep "

Tasks

" xsltproc.out.${n} >/dev/null || ret=1 grep "

Memory Usage Summary

" xsltproc.out.${n} >/dev/null || ret=1 grep "

Memory Contexts

" xsltproc.out.${n} >/dev/null || ret=1 diff --git a/bin/tests/system/tkey/keycreate.c b/bin/tests/system/tkey/keycreate.c index 83e0a38563..7725704333 100644 --- a/bin/tests/system/tkey/keycreate.c +++ b/bin/tests/system/tkey/keycreate.c @@ -14,10 +14,10 @@ #include #include -#include #include #include #include +#include #include #include #include @@ -58,6 +58,7 @@ static int port = 0; static dst_key_t *ourkey = NULL; static isc_mem_t *mctx = NULL; +static isc_loopmgr_t *loopmgr = NULL; static dns_tsigkey_t *tsigkey = NULL, *initialkey = NULL; static dns_tsig_keyring_t *ring = NULL; static unsigned char noncedata[16]; @@ -119,12 +120,13 @@ recvquery(isc_task_t *task, isc_event_t *event) { dns_message_detach(&response); dns_request_destroy(&reqev->request); isc_event_free(&event); - isc_app_shutdown(); - return; + isc_task_detach(&task); + isc_loopmgr_shutdown(loopmgr); } static void -sendquery(isc_task_t *task, isc_event_t *event) { +sendquery(void *arg) { + isc_task_t *task = (isc_task_t *)arg; struct in_addr inaddr; isc_sockaddr_t address; isc_region_t r; @@ -137,8 +139,6 @@ sendquery(isc_task_t *task, isc_event_t *event) { dns_request_t *request = NULL; static char keystr[] = "0123456789ab"; - isc_event_free(&event); - result = ISC_R_FAILURE; if (inet_pton(AF_INET, ip_address, &inaddr) != 1) { CHECK("inet_pton", result); @@ -200,8 +200,6 @@ main(int argc, char *argv[]) { isc_result_t result; int type; - RUNCHECK(isc_app_start()); - if (argc < 4) { fprintf(stderr, "I:no DH key provided\n"); exit(-1); @@ -215,15 +213,14 @@ main(int argc, char *argv[]) { } isc_mem_debugging = ISC_MEM_DEBUGRECORD; - isc_mem_create(&mctx); + + isc_managers_create(&mctx, 1, &loopmgr, &netmgr, &taskmgr); isc_log_create(mctx, &log, &logconfig); RUNCHECK(dst_lib_init(mctx, NULL)); - isc_managers_create(mctx, 1, 0, &netmgr, &taskmgr, NULL); - - RUNCHECK(isc_task_create(taskmgr, 0, &task, 0)); + RUNCHECK(isc_task_create(taskmgr, &task, 0)); RUNCHECK(dns_dispatchmgr_create(mctx, netmgr, &dispatchmgr)); isc_sockaddr_any(&bind_any); @@ -238,8 +235,6 @@ main(int argc, char *argv[]) { dns_view_setkeyring(view, ring); dns_tsigkeyring_detach(&ring); - RUNCHECK(isc_app_onrun(mctx, task, sendquery, NULL)); - type = DST_TYPE_PUBLIC | DST_TYPE_PRIVATE | DST_TYPE_KEY; result = dst_key_fromnamedfile(ourkeyname, NULL, type, mctx, &ourkey); CHECK("dst_key_fromnamedfile", result); @@ -248,14 +243,13 @@ main(int argc, char *argv[]) { isc_nonce_buf(noncedata, sizeof(noncedata)); isc_buffer_add(&nonce, sizeof(noncedata)); - (void)isc_app_run(); + isc_loopmgr_setup(loopmgr, sendquery, task); + isc_loopmgr_run(loopmgr); dns_requestmgr_shutdown(requestmgr); dns_requestmgr_detach(&requestmgr); dns_dispatch_detach(&dispatchv4); dns_dispatchmgr_detach(&dispatchmgr); - isc_task_detach(&task); - isc_managers_destroy(&netmgr, &taskmgr, NULL); dst_key_free(&ourkey); dns_tsigkey_detach(&initialkey); @@ -269,9 +263,7 @@ main(int argc, char *argv[]) { dst_lib_destroy(); - isc_mem_destroy(&mctx); - - isc_app_finish(); + isc_managers_destroy(&mctx, &loopmgr, &netmgr, &taskmgr); return (0); } diff --git a/bin/tests/system/tkey/keydelete.c b/bin/tests/system/tkey/keydelete.c index bc9e18a7b5..fc863bb906 100644 --- a/bin/tests/system/tkey/keydelete.c +++ b/bin/tests/system/tkey/keydelete.c @@ -14,10 +14,10 @@ #include #include -#include #include #include #include +#include #include #include #include @@ -55,6 +55,7 @@ static char *ip_address = NULL; static int port; static isc_mem_t *mctx = NULL; +static isc_loopmgr_t *loopmgr = NULL; static dns_tsigkey_t *tsigkey = NULL; static dns_tsig_keyring_t *ring = NULL; static dns_requestmgr_t *requestmgr = NULL; @@ -97,20 +98,19 @@ recvquery(isc_task_t *task, isc_event_t *event) { dns_message_detach(&response); dns_request_destroy(&reqev->request); isc_event_free(&event); - isc_app_shutdown(); - return; + isc_task_detach(&task); + isc_loopmgr_shutdown(loopmgr); } static void -sendquery(isc_task_t *task, isc_event_t *event) { +sendquery(void *arg) { + isc_task_t *task = (isc_task_t *)arg; struct in_addr inaddr; isc_sockaddr_t address; isc_result_t result; dns_message_t *query = NULL; dns_request_t *request = NULL; - isc_event_free(&event); - result = ISC_R_FAILURE; if (inet_pton(AF_INET, ip_address, &inaddr) != 1) { CHECK("inet_pton", result); @@ -145,8 +145,6 @@ main(int argc, char **argv) { isc_result_t result; int type; - RUNCHECK(isc_app_start()); - if (argc < 4) { fprintf(stderr, "I:no key to delete\n"); exit(-1); @@ -159,15 +157,13 @@ main(int argc, char **argv) { port = atoi(argv[2]); keyname = argv[3]; - isc_mem_create(&mctx); + isc_managers_create(&mctx, 1, &loopmgr, &netmgr, &taskmgr); isc_log_create(mctx, &log, &logconfig); RUNCHECK(dst_lib_init(mctx, NULL)); - isc_managers_create(mctx, 1, 0, &netmgr, &taskmgr, NULL); - - RUNCHECK(isc_task_create(taskmgr, 0, &task, 0)); + RUNCHECK(isc_task_create(taskmgr, &task, 0)); RUNCHECK(dns_dispatchmgr_create(mctx, netmgr, &dispatchmgr)); isc_sockaddr_any(&bind_any); RUNCHECK(dns_dispatch_createudp(dispatchmgr, &bind_any, &dispatchv4)); @@ -180,8 +176,6 @@ main(int argc, char **argv) { RUNCHECK(dns_view_create(mctx, 0, "_test", &view)); dns_view_setkeyring(view, ring); - RUNCHECK(isc_app_onrun(mctx, task, sendquery, NULL)); - type = DST_TYPE_PUBLIC | DST_TYPE_PRIVATE | DST_TYPE_KEY; result = dst_key_fromnamedfile(keyname, NULL, type, mctx, &dstkey); CHECK("dst_key_fromnamedfile", result); @@ -191,14 +185,13 @@ main(int argc, char **argv) { dst_key_free(&dstkey); CHECK("dns_tsigkey_createfromkey", result); - (void)isc_app_run(); + isc_loopmgr_setup(loopmgr, sendquery, task); + isc_loopmgr_run(loopmgr); dns_requestmgr_shutdown(requestmgr); dns_requestmgr_detach(&requestmgr); dns_dispatch_detach(&dispatchv4); dns_dispatchmgr_detach(&dispatchmgr); - isc_task_detach(&task); - isc_managers_destroy(&netmgr, &taskmgr, NULL); dns_tsigkeyring_detach(&ring); @@ -212,9 +205,7 @@ main(int argc, char **argv) { dst_lib_destroy(); - isc_mem_destroy(&mctx); - - isc_app_finish(); + isc_managers_destroy(&mctx, &loopmgr, &netmgr, &taskmgr); return (0); } diff --git a/bin/tests/test_client.c b/bin/tests/test_client.c index 45afc4c371..15cf35125d 100644 --- a/bin/tests/test_client.c +++ b/bin/tests/test_client.c @@ -25,6 +25,7 @@ #include #include +#include #include #include #include @@ -51,8 +52,9 @@ static const char *protocols[] = { "udp", "tcp", "http-plain-get" }; static isc_mem_t *mctx = NULL; -static isc_nm_t *netmgr = NULL; static isc_loopmgr_t *loopmgr = NULL; +static isc_nm_t *netmgr = NULL; +static isc_taskmgr_t *taskmgr = NULL; static protocol_t protocol; static const char *address; @@ -284,32 +286,9 @@ parse_options(int argc, char **argv) { printf(" to %s, %d workers\n", buf, workers); } -static void -_signal(int sig, void (*handler)(int)) { - struct sigaction sa = { .sa_handler = handler }; - - RUNTIME_CHECK(sigfillset(&sa.sa_mask) == 0); - RUNTIME_CHECK(sigaction(sig, &sa, NULL) >= 0); -} - static void setup(void) { - sigset_t sset; - - _signal(SIGPIPE, SIG_IGN); - _signal(SIGHUP, SIG_DFL); - _signal(SIGTERM, SIG_DFL); - _signal(SIGINT, SIG_DFL); - - RUNTIME_CHECK(sigemptyset(&sset) == 0); - RUNTIME_CHECK(sigaddset(&sset, SIGHUP) == 0); - RUNTIME_CHECK(sigaddset(&sset, SIGINT) == 0); - RUNTIME_CHECK(sigaddset(&sset, SIGTERM) == 0); - RUNTIME_CHECK(pthread_sigmask(SIG_BLOCK, &sset, NULL) == 0); - - isc_mem_create(&mctx); - - isc_managers_create(mctx, workers, 0, &loopmgr, &netmgr, NULL); + isc_managers_create(&mctx, workers, &loopmgr, &netmgr, &taskmgr); } static void @@ -318,11 +297,11 @@ teardown(void) { close(out); } - isc_managers_destroy(&loopmgr, &netmgr, NULL); - isc_mem_destroy(&mctx); if (tls_ctx) { isc_tlsctx_free(&tls_ctx); } + + isc_managers_destroy(&mctx, &loopmgr, &netmgr, &taskmgr); } static void diff --git a/bin/tests/test_server.c b/bin/tests/test_server.c index 3c31590244..05a3c0b13e 100644 --- a/bin/tests/test_server.c +++ b/bin/tests/test_server.c @@ -33,8 +33,9 @@ typedef enum { UDP, TCP, DOT, HTTPS, HTTP } protocol_t; static const char *protocols[] = { "udp", "tcp", "dot", "https", "http-plain" }; static isc_mem_t *mctx = NULL; -static isc_nm_t *netmgr = NULL; static isc_loopmgr_t *loopmgr = NULL; +static isc_nm_t *netmgr = NULL; +static isc_taskmgr_t *taskmgr = NULL; static protocol_t protocol; static in_port_t port; @@ -165,41 +166,18 @@ parse_options(int argc, char **argv) { workers); } -static void -_signal(int sig, void (*handler)(int)) { - struct sigaction sa = { .sa_handler = handler }; - - RUNTIME_CHECK(sigfillset(&sa.sa_mask) == 0); - RUNTIME_CHECK(sigaction(sig, &sa, NULL) >= 0); -} - static void setup(void) { - sigset_t sset; - - _signal(SIGPIPE, SIG_IGN); - _signal(SIGHUP, SIG_DFL); - _signal(SIGTERM, SIG_DFL); - _signal(SIGINT, SIG_DFL); - - RUNTIME_CHECK(sigemptyset(&sset) == 0); - RUNTIME_CHECK(sigaddset(&sset, SIGHUP) == 0); - RUNTIME_CHECK(sigaddset(&sset, SIGINT) == 0); - RUNTIME_CHECK(sigaddset(&sset, SIGTERM) == 0); - RUNTIME_CHECK(pthread_sigmask(SIG_BLOCK, &sset, NULL) == 0); - - isc_mem_create(&mctx); - - isc_managers_create(mctx, workers, 0, &loopmgr, &netmgr, NULL); + isc_managers_create(&mctx, workers, &loopmgr, &netmgr, &taskmgr); } static void teardown(void) { - isc_managers_destroy(&loopmgr, &netmgr, NULL); - isc_mem_destroy(&mctx); if (tls_ctx) { isc_tlsctx_free(&tls_ctx); } + + isc_managers_destroy(&mctx, &loopmgr, &netmgr, &taskmgr); } static void diff --git a/bin/tools/mdig.c b/bin/tools/mdig.c index eb72c62f08..9e81459919 100644 --- a/bin/tools/mdig.c +++ b/bin/tools/mdig.c @@ -17,12 +17,12 @@ #include #include -#include #include #include #include #include #include +#include #include #include #include @@ -89,6 +89,8 @@ #define US_PER_MS 1000 /*%< Microseconds per millisecond. */ static isc_mem_t *mctx = NULL; +static isc_task_t *global_task = NULL; +static isc_loopmgr_t *loopmgr = NULL; static dns_requestmgr_t *requestmgr = NULL; static const char *batchname = NULL; static FILE *batchfp = NULL; @@ -188,7 +190,6 @@ rcode_totext(dns_rcode_t rcode) { return (totext.deconsttext); } -/* receive response event handler */ static void recvresponse(isc_task_t *task, isc_event_t *event) { dns_requestevent_t *reqev = (dns_requestevent_t *)event; @@ -548,7 +549,8 @@ cleanup: isc_event_free(&event); if (--onfly == 0) { - isc_app_shutdown(); + isc_task_detach(&global_task); + isc_loopmgr_shutdown(loopmgr); } return; } @@ -579,7 +581,7 @@ compute_cookie(unsigned char *cookie, size_t len) { } static isc_result_t -sendquery(struct query *query, isc_task_t *task) { +sendquery(struct query *query) { dns_request_t *request = NULL; dns_message_t *message = NULL; dns_name_t *qname = NULL; @@ -587,7 +589,7 @@ sendquery(struct query *query, isc_task_t *task) { isc_result_t result; dns_fixedname_t queryname; isc_buffer_t buf; - unsigned int options; + unsigned int options = 0; onfly++; @@ -748,37 +750,35 @@ sendquery(struct query *query, isc_task_t *task) { add_opt(message, query->udpsize, query->edns, flags, opts, i); } - options = 0; if (tcp_mode) { options |= DNS_REQUESTOPT_TCP; } - request = NULL; + result = dns_request_createvia( requestmgr, message, have_src ? &srcaddr : NULL, &dstaddr, dscp, options, NULL, query->timeout, query->udptimeout, - query->udpretries, task, recvresponse, message, &request); + query->udpretries, global_task, recvresponse, message, + &request); CHECK("dns_request_createvia", result); return (ISC_R_SUCCESS); } static void -sendqueries(isc_task_t *task, isc_event_t *event) { - struct query *query = (struct query *)event->ev_arg; - - isc_event_free(&event); +sendqueries(void *arg) { + struct query *query = (struct query *)arg; while (query != NULL) { struct query *next = ISC_LIST_NEXT(query, link); - sendquery(query, task); + sendquery(query); query = next; } if (onfly == 0) { - isc_app_shutdown(); + isc_task_detach(&global_task); + isc_loopmgr_shutdown(loopmgr); } - return; } noreturn static void @@ -2103,18 +2103,14 @@ main(int argc, char *argv[]) { isc_sockaddr_t bind_any; isc_log_t *lctx = NULL; isc_logconfig_t *lcfg = NULL; - isc_loopmgr_t *loopmgr = NULL; isc_nm_t *netmgr = NULL; isc_taskmgr_t *taskmgr = NULL; - isc_task_t *task = NULL; dns_dispatchmgr_t *dispatchmgr = NULL; dns_dispatch_t *dispatchvx = NULL; dns_view_t *view = NULL; unsigned int i; int ns; - RUNCHECK(isc_app_start()); - if (isc_net_probeipv4() == ISC_R_SUCCESS) { have_ipv4 = true; } @@ -2127,7 +2123,7 @@ main(int argc, char *argv[]) { preparse_args(argc, argv); - isc_mem_create(&mctx); + isc_managers_create(&mctx, 1, &loopmgr, &netmgr, &taskmgr); isc_log_create(mctx, &lctx, &lcfg); RUNCHECK(dst_lib_init(mctx, NULL)); @@ -2157,8 +2153,7 @@ main(int argc, char *argv[]) { fatal("can't choose between IPv4 and IPv6"); } - isc_managers_create(mctx, 1, 0, &loopmgr, &netmgr, &taskmgr); - RUNCHECK(isc_task_create(taskmgr, 0, &task, 0)); + RUNCHECK(isc_task_create(taskmgr, &global_task, 0)); RUNCHECK(dns_dispatchmgr_create(mctx, netmgr, &dispatchmgr)); set_source_ports(dispatchmgr); @@ -2178,7 +2173,7 @@ main(int argc, char *argv[]) { RUNCHECK(dns_view_create(mctx, 0, "_test", &view)); query = ISC_LIST_HEAD(queries); - RUNCHECK(isc_app_onrun(mctx, task, sendqueries, query)); + isc_loopmgr_setup(loopmgr, sendqueries, query); /* * Stall to the start of a new second. @@ -2206,7 +2201,7 @@ main(int argc, char *argv[]) { } while (1); } - (void)isc_app_run(); + isc_loopmgr_run(loopmgr); dns_view_detach(&view); @@ -2216,10 +2211,6 @@ main(int argc, char *argv[]) { dns_dispatch_detach(&dispatchvx); dns_dispatchmgr_detach(&dispatchmgr); - isc_task_detach(&task); - - isc_managers_destroy(&loopmgr, &netmgr, &taskmgr); - dst_lib_destroy(); isc_log_destroy(&lctx); @@ -2249,9 +2240,6 @@ main(int argc, char *argv[]) { isc_mem_free(mctx, default_query.ecs_addr); } - isc_mem_destroy(&mctx); - - isc_app_finish(); - + isc_managers_destroy(&mctx, &loopmgr, &netmgr, &taskmgr); return (0); } diff --git a/doc/design/zone b/doc/design/zone index 1990712919..86331ec656 100644 --- a/doc/design/zone +++ b/doc/design/zone @@ -186,13 +186,6 @@ Functions: Initiate transfer of the zone from the given server or the primary servers listed in the zone structure. - dns_zone_maintenance(dns_zone_t *zone); - - Perform any maintenance operations required on the zone - * initiate up to date checks - * expire zones - * initiate ixfr version expire consolidation - dns_zone_locateprimary(dns_zone_t *zone); Working from the root zone locate the primary server for the zone. diff --git a/lib/bind9/include/bind9/getaddresses.h b/lib/bind9/include/bind9/getaddresses.h index 2904257177..b9a431b581 100644 --- a/lib/bind9/include/bind9/getaddresses.h +++ b/lib/bind9/include/bind9/getaddresses.h @@ -31,8 +31,9 @@ bind9_getaddresses(const char *hostname, in_port_t port, isc_sockaddr_t *addrs, * seen, it is ignored. If more than 'addrsize' addresses are seen, the * first 'addrsize' are returned and the remainder silently truncated. * - * This routine may block. If called by a program using the isc_app - * framework, it should be surrounded by isc_app_block()/isc_app_unblock(). + * This routine may block. If called by a program using the isc_loopmgr + * framework, it should be surrounded by isc_loopmgr_blocking() and + * isc_loopmgr_nonblocking(). * * Requires: *\li 'hostname' is not NULL. diff --git a/lib/dns/adb.c b/lib/dns/adb.c index ef52a778d1..10125de32f 100644 --- a/lib/dns/adb.c +++ b/lib/dns/adb.c @@ -2135,7 +2135,7 @@ dns_adb_create(isc_mem_t *mem, dns_view_t *view, isc_taskmgr_t *taskmgr, /* * Allocate an internal task. */ - result = isc_task_create(adb->taskmgr, 0, &adb->task, 0); + result = isc_task_create(adb->taskmgr, &adb->task, 0); if (result != ISC_R_SUCCESS) { goto free_lock; } diff --git a/lib/dns/cache.c b/lib/dns/cache.c index 6c856d0a16..51b9cfecdf 100644 --- a/lib/dns/cache.c +++ b/lib/dns/cache.c @@ -107,6 +107,7 @@ struct cache_cleaner { dns_cache_t *cache; isc_task_t *task; + isc_event_t *shutdown_event; isc_event_t *resched_event; /*% Sent by cleaner task to * itself to reschedule */ isc_event_t *overmem_event; @@ -259,7 +260,7 @@ dns_cache_create(isc_mem_t *cmctx, isc_mem_t *hmctx, isc_taskmgr_t *taskmgr, } if (taskmgr != NULL) { dbtask = NULL; - result = isc_task_create(taskmgr, 1, &dbtask, 0); + result = isc_task_create(taskmgr, &dbtask, 0); if (result != ISC_R_SUCCESS) { goto cleanup_db; } @@ -337,6 +338,10 @@ cache_free(dns_cache_t *cache) { isc_event_free(&cache->cleaner.resched_event); } + if (cache->cleaner.shutdown_event != NULL) { + isc_event_free(&cache->cleaner.shutdown_event); + } + if (cache->cleaner.iterator != NULL) { dns_dbiterator_destroy(&cache->cleaner.iterator); } @@ -406,16 +411,9 @@ dns_cache_detach(dns_cache_t **cachep) { if (isc_refcount_decrement(&cache->references) == 1) { cache->cleaner.overmem = false; - /* - * If the cleaner task exists, let it free the cache. - */ if (isc_refcount_decrement(&cache->live_tasks) > 1) { - isc_event_t *event = isc_event_allocate( - cache->mctx, &cache->cleaner, - DNS_EVENT_CACHESHUTDOWN, - cleaner_shutdown_action, &cache->cleaner, - sizeof(*event)); - isc_task_send(cache->cleaner.task, &event); + isc_task_send(cache->cleaner.task, + &cache->cleaner.shutdown_event); } else { cache_free(cache); } @@ -460,27 +458,32 @@ cache_cleaner_init(dns_cache_t *cache, isc_taskmgr_t *taskmgr, cleaner->replaceiterator = false; cleaner->task = NULL; + cleaner->shutdown_event = NULL; cleaner->resched_event = NULL; cleaner->overmem_event = NULL; result = dns_db_createiterator(cleaner->cache->db, false, &cleaner->iterator); if (result != ISC_R_SUCCESS) { - goto cleanup; + goto cleanup_mutex; } if (taskmgr != NULL) { - result = isc_task_create(taskmgr, 1, &cleaner->task, 0); + result = isc_task_create(taskmgr, &cleaner->task, 0); if (result != ISC_R_SUCCESS) { UNEXPECTED_ERROR(__FILE__, __LINE__, "isc_task_create() failed: %s", isc_result_totext(result)); result = ISC_R_UNEXPECTED; - goto cleanup; + goto cleanup_iterator; } isc_refcount_increment(&cleaner->cache->live_tasks); isc_task_setname(cleaner->task, "cachecleaner", cleaner); + cleaner->shutdown_event = isc_event_allocate( + cache->mctx, cleaner, DNS_EVENT_CACHESHUTDOWN, + cleaner_shutdown_action, cleaner, sizeof(isc_event_t)); + cleaner->resched_event = isc_event_allocate( cache->mctx, cleaner, DNS_EVENT_CACHECLEAN, incremental_cleaning_action, cleaner, @@ -493,19 +496,9 @@ cache_cleaner_init(dns_cache_t *cache, isc_taskmgr_t *taskmgr, return (ISC_R_SUCCESS); -cleanup: - if (cleaner->overmem_event != NULL) { - isc_event_free(&cleaner->overmem_event); - } - if (cleaner->resched_event != NULL) { - isc_event_free(&cleaner->resched_event); - } - if (cleaner->task != NULL) { - isc_task_detach(&cleaner->task); - } - if (cleaner->iterator != NULL) { - dns_dbiterator_destroy(&cleaner->iterator); - } +cleanup_iterator: + dns_dbiterator_destroy(&cleaner->iterator); +cleanup_mutex: isc_mutex_destroy(&cleaner->lock); return (result); @@ -956,8 +949,8 @@ cleaner_shutdown_action(isc_task_t *task, isc_event_t *event) { isc_event_free(&event); } - /* Make sure we don't reschedule anymore. */ - (void)isc_task_purgeevent(task, cache->cleaner.resched_event); + /* FIXME: Make sure we don't reschedule anymore. */ + /* (void)isc_task_purgeevent(task, cache->cleaner.resched_event); */ isc_refcount_decrementz(&cache->live_tasks); diff --git a/lib/dns/catz.c b/lib/dns/catz.c index e53df3b7fa..e58e285166 100644 --- a/lib/dns/catz.c +++ b/lib/dns/catz.c @@ -732,7 +732,7 @@ dns_catz_new_zones(dns_catz_zones_t **catzsp, dns_catz_zonemodmethods_t *zmm, new_zones->loopmgr = loopmgr; new_zones->taskmgr = taskmgr; - result = isc_task_create(taskmgr, 0, &new_zones->updater, 0); + result = isc_task_create(taskmgr, &new_zones->updater, 0); if (result != ISC_R_SUCCESS) { goto cleanup_ht; } diff --git a/lib/dns/client.c b/lib/dns/client.c index af34beba05..88998ab4e1 100644 --- a/lib/dns/client.c +++ b/lib/dns/client.c @@ -14,11 +14,11 @@ #include #include -#include #include #include #include #include +#include #include #include #include @@ -75,11 +75,7 @@ struct dns_client { /* Unlocked */ unsigned int magic; unsigned int attributes; - isc_mutex_t lock; isc_mem_t *mctx; - bool readydone; - isc_mutex_t readylock; - isc_condition_t ready; isc_taskmgr_t *taskmgr; isc_task_t *task; isc_nm_t *nm; @@ -93,8 +89,7 @@ struct dns_client { isc_refcount_t references; - /* Locked */ - dns_viewlist_t viewlist; + dns_view_t *view; ISC_LIST(struct resctx) resctxs; }; @@ -105,16 +100,13 @@ struct dns_client { * Internal state for a single name resolution procedure */ typedef struct resctx { - /* Unlocked */ unsigned int magic; - isc_mutex_t lock; dns_client_t *client; bool want_dnssec; bool want_validation; bool want_cdflag; bool want_tcp; - /* Locked */ ISC_LINK(struct resctx) link; isc_task_t *task; dns_view_t *view; @@ -125,7 +117,6 @@ typedef struct resctx { dns_namelist_t namelist; isc_result_t result; dns_clientresevent_t *event; - bool canceled; dns_rdataset_t *rdataset; dns_rdataset_t *sigrdataset; } resctx_t; @@ -134,24 +125,20 @@ typedef struct resctx { * Argument of an internal event for synchronous name resolution. */ typedef struct resarg { - /* Unlocked */ - isc_appctx_t *actx; + isc_mem_t *mctx; dns_client_t *client; - isc_mutex_t lock; + const dns_name_t *name; - /* Locked */ isc_result_t result; isc_result_t vresult; dns_namelist_t *namelist; dns_clientrestrans_t *trans; - bool canceled; + dns_client_resolve_cb resolve_cb; } resarg_t; static void client_resfind(resctx_t *rctx, dns_fetchevent_t *event); static void -cancelresolve(dns_clientrestrans_t *trans); -static void destroyrestrans(dns_clientrestrans_t **transp); /* @@ -217,8 +204,8 @@ getudpdispatch(int family, dns_dispatchmgr_t *dispatchmgr, } static isc_result_t -createview(isc_mem_t *mctx, dns_rdataclass_t rdclass, isc_taskmgr_t *taskmgr, - isc_nm_t *nm, isc_loopmgr_t *loopmgr, dns_dispatchmgr_t *dispatchmgr, +createview(isc_mem_t *mctx, dns_rdataclass_t rdclass, isc_loopmgr_t *loopmgr, + isc_taskmgr_t *taskmgr, isc_nm_t *nm, dns_dispatchmgr_t *dispatchmgr, dns_dispatch_t *dispatchv4, dns_dispatch_t *dispatchv6, dns_view_t **viewp) { isc_result_t result; @@ -232,26 +219,27 @@ createview(isc_mem_t *mctx, dns_rdataclass_t rdclass, isc_taskmgr_t *taskmgr, /* Initialize view security roots */ result = dns_view_initsecroots(view, mctx); if (result != ISC_R_SUCCESS) { - dns_view_detach(&view); - return (result); + goto cleanup_view; } result = dns_view_createresolver(view, loopmgr, taskmgr, 1, nm, 0, dispatchmgr, dispatchv4, dispatchv6); if (result != ISC_R_SUCCESS) { - dns_view_detach(&view); - return (result); + goto cleanup_view; } result = dns_db_create(mctx, "rbt", dns_rootname, dns_dbtype_cache, rdclass, 0, NULL, &view->cachedb); if (result != ISC_R_SUCCESS) { - dns_view_detach(&view); - return (result); + goto cleanup_view; } *viewp = view; return (ISC_R_SUCCESS); + +cleanup_view: + dns_view_detach(&view); + return (result); } isc_result_t @@ -267,7 +255,6 @@ dns_client_create(isc_mem_t *mctx, isc_loopmgr_t *loopmgr, REQUIRE(mctx != NULL); REQUIRE(taskmgr != NULL); - REQUIRE(loopmgr != NULL); REQUIRE(nm != NULL); REQUIRE(clientp != NULL && *clientp == NULL); @@ -280,12 +267,7 @@ dns_client_create(isc_mem_t *mctx, isc_loopmgr_t *loopmgr, .nm = nm, }; - isc_mutex_init(&client->lock); - - isc_mutex_init(&client->readylock); - isc_condition_init(&client->ready); - - result = isc_task_create(client->taskmgr, 0, &client->task, 0); + result = isc_task_create(client->taskmgr, &client->task, 0); if (result != ISC_R_SUCCESS) { goto cleanup_lock; } @@ -327,14 +309,13 @@ dns_client_create(isc_mem_t *mctx, isc_loopmgr_t *loopmgr, isc_refcount_init(&client->references, 1); /* Create the default view for class IN */ - result = createview(mctx, dns_rdataclass_in, taskmgr, nm, loopmgr, + result = createview(mctx, dns_rdataclass_in, loopmgr, taskmgr, nm, client->dispatchmgr, dispatchv4, dispatchv6, &view); if (result != ISC_R_SUCCESS) { goto cleanup_references; } - ISC_LIST_INIT(client->viewlist); - ISC_LIST_APPEND(client->viewlist, view, link); + client->view = view; dns_view_freeze(view); /* too early? */ @@ -365,7 +346,6 @@ cleanup_dispatchmgr: cleanup_task: isc_task_detach(&client->task); cleanup_lock: - isc_mutex_destroy(&client->lock); isc_mem_put(mctx, client, sizeof(*client)); return (result); @@ -373,14 +353,9 @@ cleanup_lock: static void destroyclient(dns_client_t *client) { - dns_view_t *view = NULL; - isc_refcount_destroy(&client->references); - while ((view = ISC_LIST_HEAD(client->viewlist)) != NULL) { - ISC_LIST_UNLINK(client->viewlist, view, link); - dns_view_detach(&view); - } + dns_view_detach(&client->view); if (client->dispatchv4 != NULL) { dns_dispatch_detach(&client->dispatchv4); @@ -393,10 +368,6 @@ destroyclient(dns_client_t *client) { isc_task_detach(&client->task); - isc_condition_destroy(&client->ready); - isc_mutex_destroy(&client->readylock); - - isc_mutex_destroy(&client->lock); client->magic = 0; isc_mem_putanddetach(&client->mctx, client, sizeof(*client)); @@ -421,29 +392,18 @@ isc_result_t dns_client_setservers(dns_client_t *client, dns_rdataclass_t rdclass, const dns_name_t *name_space, isc_sockaddrlist_t *addrs) { isc_result_t result; - dns_view_t *view = NULL; REQUIRE(DNS_CLIENT_VALID(client)); REQUIRE(addrs != NULL); + REQUIRE(rdclass == dns_rdataclass_in); if (name_space == NULL) { name_space = dns_rootname; } - LOCK(&client->lock); - result = dns_viewlist_find(&client->viewlist, DNS_CLIENTVIEW_NAME, - rdclass, &view); - if (result != ISC_R_SUCCESS) { - UNLOCK(&client->lock); - return (result); - } - UNLOCK(&client->lock); - - result = dns_fwdtable_add(view->fwdtable, name_space, addrs, + result = dns_fwdtable_add(client->view->fwdtable, name_space, addrs, dns_fwdpolicy_only); - dns_view_detach(&view); - return (result); } @@ -451,26 +411,15 @@ isc_result_t dns_client_clearservers(dns_client_t *client, dns_rdataclass_t rdclass, const dns_name_t *name_space) { isc_result_t result; - dns_view_t *view = NULL; REQUIRE(DNS_CLIENT_VALID(client)); + REQUIRE(rdclass == dns_rdataclass_in); if (name_space == NULL) { name_space = dns_rootname; } - LOCK(&client->lock); - result = dns_viewlist_find(&client->viewlist, DNS_CLIENTVIEW_NAME, - rdclass, &view); - if (result != ISC_R_SUCCESS) { - UNLOCK(&client->lock); - return (result); - } - UNLOCK(&client->lock); - - result = dns_fwdtable_delete(view->fwdtable, name_space); - - dns_view_detach(&view); + result = dns_fwdtable_delete(client->view->fwdtable, name_space); return (result); } @@ -589,8 +538,6 @@ client_resfind(resctx_t *rctx, dns_fetchevent_t *event) { REQUIRE(RCTX_VALID(rctx)); - LOCK(&rctx->lock); - mctx = rctx->view->mctx; name = dns_fixedname_name(&rctx->name); @@ -604,7 +551,7 @@ client_resfind(resctx_t *rctx, dns_fetchevent_t *event) { rctx->restarts++; want_restart = false; - if (event == NULL && !rctx->canceled) { + if (event == NULL) { fname = dns_fixedname_initname(&foundname); INSIST(!dns_rdataset_isassociated(rctx->rdataset)); INSIST(rctx->sigrdataset == NULL || @@ -647,22 +594,15 @@ client_resfind(resctx_t *rctx, dns_fetchevent_t *event) { } /* - * If we've been canceled, forget about the result. + * Get some resource for copying the + * result. */ - if (rctx->canceled) { - result = ISC_R_CANCELED; - } else { - /* - * Otherwise, get some resource for copying the - * result. - */ - dns_name_t *aname = dns_fixedname_name(&rctx->name); + dns_name_t *aname = dns_fixedname_name(&rctx->name); - ansname = isc_mem_get(mctx, sizeof(*ansname)); - dns_name_init(ansname, NULL); + ansname = isc_mem_get(mctx, sizeof(*ansname)); + dns_name_init(ansname, NULL); - dns_name_dup(aname, mctx, ansname); - } + dns_name_dup(aname, mctx, ansname); switch (result) { case ISC_R_SUCCESS: @@ -927,8 +867,6 @@ client_resfind(resctx_t *rctx, dns_fetchevent_t *event) { rctx->event->ev_sender = rctx; isc_task_sendanddetach(&task, ISC_EVENT_PTR(&rctx->event)); } - - UNLOCK(&rctx->lock); } static void @@ -936,12 +874,10 @@ resolve_done(isc_task_t *task, isc_event_t *event) { resarg_t *resarg = event->ev_arg; dns_clientresevent_t *rev = (dns_clientresevent_t *)event; dns_name_t *name = NULL; - dns_client_t *client = resarg->client; + isc_result_t result; UNUSED(task); - LOCK(&resarg->lock); - resarg->result = rev->result; resarg->vresult = rev->vresult; while ((name = ISC_LIST_HEAD(rev->answerlist)) != NULL) { @@ -949,75 +885,12 @@ resolve_done(isc_task_t *task, isc_event_t *event) { ISC_LIST_APPEND(*resarg->namelist, name, link); } - destroyrestrans(&resarg->trans); isc_event_free(&event); - resarg->client = NULL; - if (!resarg->canceled) { - UNLOCK(&resarg->lock); + destroyrestrans(&resarg->trans); - /* - * Signal that the entire process is done. - */ - LOCK(&client->readylock); - client->readydone = true; - SIGNAL(&client->ready); - UNLOCK(&client->readylock); - } else { - /* - * We have already exited from the loop (due to some - * unexpected event). Just clean the arg up. - */ - UNLOCK(&resarg->lock); - isc_mutex_destroy(&resarg->lock); - isc_mem_put(client->mctx, resarg, sizeof(*resarg)); - } + result = resarg->result; - dns_client_detach(&client); -} - -isc_result_t -dns_client_resolve(dns_client_t *client, const dns_name_t *name, - dns_rdataclass_t rdclass, dns_rdatatype_t type, - unsigned int options, dns_namelist_t *namelist) { - isc_result_t result; - resarg_t *resarg = NULL; - - REQUIRE(DNS_CLIENT_VALID(client)); - REQUIRE(namelist != NULL && ISC_LIST_EMPTY(*namelist)); - - resarg = isc_mem_get(client->mctx, sizeof(*resarg)); - - *resarg = (resarg_t){ - .client = client, - .result = DNS_R_SERVFAIL, - .namelist = namelist, - }; - - isc_mutex_init(&resarg->lock); - - result = dns_client_startresolve(client, name, rdclass, type, options, - client->task, resolve_done, resarg, - &resarg->trans); - if (result != ISC_R_SUCCESS) { - isc_mutex_destroy(&resarg->lock); - isc_mem_put(client->mctx, resarg, sizeof(*resarg)); - return (result); - } - - /* - * Block until the entire process is completed. - */ - LOCK(&client->readylock); - if (!client->readydone) { - WAIT(&client->ready, &client->readylock); - } - UNLOCK(&client->readylock); - - LOCK(&resarg->lock); - if (result == ISC_R_SUCCESS) { - result = resarg->result; - } if (result != ISC_R_SUCCESS && resarg->vresult != ISC_R_SUCCESS) { /* * If this lookup failed due to some error in DNSSEC @@ -1026,22 +899,45 @@ dns_client_resolve(dns_client_t *client, const dns_name_t *name, */ result = resarg->vresult; } - if (resarg->trans != NULL) { - /* - * Unusual termination (perhaps due to signal). We need some - * tricky cleanup process. - */ - resarg->canceled = true; - cancelresolve(resarg->trans); - UNLOCK(&resarg->lock); + resarg->resolve_cb(resarg->client, resarg->name, resarg->namelist, + result); - /* resarg will be freed in the event handler. */ - } else { - UNLOCK(&resarg->lock); + dns_client_detach(&resarg->client); - isc_mutex_destroy(&resarg->lock); + isc_mem_putanddetach(&resarg->mctx, resarg, sizeof(*resarg)); +} + +isc_result_t +dns_client_resolve(dns_client_t *client, const dns_name_t *name, + dns_rdataclass_t rdclass, dns_rdatatype_t type, + unsigned int options, dns_namelist_t *namelist, + dns_client_resolve_cb resolve_cb) { + isc_result_t result; + resarg_t *resarg = NULL; + + REQUIRE(DNS_CLIENT_VALID(client)); + REQUIRE(namelist != NULL && ISC_LIST_EMPTY(*namelist)); + REQUIRE(rdclass == dns_rdataclass_in); + + resarg = isc_mem_get(client->mctx, sizeof(*resarg)); + + *resarg = (resarg_t){ + .client = client, + .name = name, + .result = DNS_R_SERVFAIL, + .namelist = namelist, + .resolve_cb = resolve_cb, + }; + + isc_mem_attach(client->mctx, &resarg->mctx); + + result = dns_client_startresolve(client, name, rdclass, type, options, + client->task, resolve_done, resarg, + &resarg->trans); + if (result != ISC_R_SUCCESS) { isc_mem_put(client->mctx, resarg, sizeof(*resarg)); + return (result); } return (result); @@ -1053,7 +949,6 @@ dns_client_startresolve(dns_client_t *client, const dns_name_t *name, unsigned int options, isc_task_t *task, isc_taskaction_t action, void *arg, dns_clientrestrans_t **transp) { - dns_view_t *view = NULL; dns_clientresevent_t *event = NULL; resctx_t *rctx = NULL; isc_task_t *tclone = NULL; @@ -1064,14 +959,7 @@ dns_client_startresolve(dns_client_t *client, const dns_name_t *name, REQUIRE(DNS_CLIENT_VALID(client)); REQUIRE(transp != NULL && *transp == NULL); - - LOCK(&client->lock); - result = dns_viewlist_find(&client->viewlist, DNS_CLIENTVIEW_NAME, - rdclass, &view); - UNLOCK(&client->lock); - if (result != ISC_R_SUCCESS) { - return (result); - } + REQUIRE(rdclass == dns_rdataclass_in); mctx = client->mctx; rdataset = NULL; @@ -1093,7 +981,16 @@ dns_client_startresolve(dns_client_t *client, const dns_name_t *name, ISC_LIST_INIT(event->answerlist); rctx = isc_mem_get(mctx, sizeof(*rctx)); - isc_mutex_init(&rctx->lock); + *rctx = (resctx_t){ + .client = client, + .task = client->task, + .event = event, + .type = type, + .want_dnssec = want_dnssec, + .want_validation = want_validation, + .want_cdflag = want_cdflag, + .want_tcp = want_tcp, + }; result = getrdataset(mctx, &rdataset); if (result != ISC_R_SUCCESS) { @@ -1112,27 +1009,14 @@ dns_client_startresolve(dns_client_t *client, const dns_name_t *name, dns_fixedname_init(&rctx->name); dns_name_copy(name, dns_fixedname_name(&rctx->name)); - rctx->client = client; ISC_LINK_INIT(rctx, link); - rctx->canceled = false; - rctx->task = client->task; - rctx->type = type; - rctx->view = view; - rctx->restarts = 0; - rctx->fetch = NULL; - rctx->want_dnssec = want_dnssec; - rctx->want_validation = want_validation; - rctx->want_cdflag = want_cdflag; - rctx->want_tcp = want_tcp; + dns_view_attach(client->view, &rctx->view); ISC_LIST_INIT(rctx->namelist); - rctx->event = event; rctx->magic = RCTX_MAGIC; isc_refcount_increment(&client->references); - LOCK(&client->lock); ISC_LIST_APPEND(client->resctxs, rctx, link); - UNLOCK(&client->lock); *transp = (dns_clientrestrans_t *)rctx; client_resfind(rctx, NULL); @@ -1146,42 +1030,13 @@ cleanup: if (sigrdataset != NULL) { putrdataset(client->mctx, &sigrdataset); } - isc_mutex_destroy(&rctx->lock); isc_mem_put(mctx, rctx, sizeof(*rctx)); isc_event_free(ISC_EVENT_PTR(&event)); isc_task_detach(&tclone); - dns_view_detach(&view); return (result); } -/*%< - * Cancel an ongoing resolution procedure started via - * dns_client_startresolve(). - * - * If the resolution procedure has not completed, post its CLIENTRESDONE - * event with a result code of #ISC_R_CANCELED. - */ -static void -cancelresolve(dns_clientrestrans_t *trans) { - resctx_t *rctx = NULL; - - REQUIRE(trans != NULL); - rctx = (resctx_t *)trans; - REQUIRE(RCTX_VALID(rctx)); - - LOCK(&rctx->lock); - - if (!rctx->canceled) { - rctx->canceled = true; - if (rctx->fetch != NULL) { - dns_resolver_cancelfetch(rctx->fetch); - } - } - - UNLOCK(&rctx->lock); -} - void dns_client_freeresanswer(dns_client_t *client, dns_namelist_t *namelist) { dns_name_t *name; @@ -1233,19 +1088,12 @@ destroyrestrans(dns_clientrestrans_t **transp) { * Wait for the lock in client_resfind to be released before * destroying the lock. */ - LOCK(&rctx->lock); - UNLOCK(&rctx->lock); - - LOCK(&client->lock); INSIST(ISC_LINK_LINKED(rctx, link)); ISC_LIST_UNLINK(client->resctxs, rctx, link); - UNLOCK(&client->lock); - INSIST(ISC_LIST_EMPTY(rctx->namelist)); - isc_mutex_destroy(&rctx->lock); rctx->magic = 0; isc_mem_put(mctx, rctx, sizeof(*rctx)); @@ -1256,7 +1104,6 @@ dns_client_addtrustedkey(dns_client_t *client, dns_rdataclass_t rdclass, dns_rdatatype_t rdtype, const dns_name_t *keyname, isc_buffer_t *databuf) { isc_result_t result; - dns_view_t *view = NULL; dns_keytable_t *secroots = NULL; dns_name_t *name = NULL; char rdatabuf[DST_KEY_MAXSIZE]; @@ -1266,14 +1113,9 @@ dns_client_addtrustedkey(dns_client_t *client, dns_rdataclass_t rdclass, isc_buffer_t b; REQUIRE(DNS_CLIENT_VALID(client)); + REQUIRE(rdclass == dns_rdataclass_in); - LOCK(&client->lock); - result = dns_viewlist_find(&client->viewlist, DNS_CLIENTVIEW_NAME, - rdclass, &view); - UNLOCK(&client->lock); - CHECK(result); - - CHECK(dns_view_getsecroots(view, &secroots)); + CHECK(dns_view_getsecroots(client->view, &secroots)); DE_CONST(keyname, name); @@ -1298,9 +1140,6 @@ dns_client_addtrustedkey(dns_client_t *client, dns_rdataclass_t rdclass, CHECK(dns_keytable_add(secroots, false, false, name, &ds, NULL, NULL)); cleanup: - if (view != NULL) { - dns_view_detach(&view); - } if (secroots != NULL) { dns_keytable_detach(&secroots); } diff --git a/lib/dns/dnstap.c b/lib/dns/dnstap.c index e013ec27fa..e0b81911b4 100644 --- a/lib/dns/dnstap.c +++ b/lib/dns/dnstap.c @@ -287,8 +287,7 @@ dns_dt_reopen(dns_dtenv_t *env, int roll) { /* * Run in task-exclusive mode. */ - result = isc_task_beginexclusive(env->reopen_task); - RUNTIME_CHECK(result == ISC_R_SUCCESS); + isc_task_beginexclusive(env->reopen_task); /* * Check that we can create a new fw object. diff --git a/lib/dns/include/dns/client.h b/lib/dns/include/dns/client.h index 19ca2e0062..0caf154e61 100644 --- a/lib/dns/include/dns/client.h +++ b/lib/dns/include/dns/client.h @@ -113,7 +113,7 @@ dns_client_create(isc_mem_t *mctx, isc_loopmgr_t *loopmgr, * *\li 'nm' is a valid network manager. * - *\li 'loopmgr' is a valid loop manager. + *\li 'timermgr' is a valid timer manager. * *\li clientp != NULL && *clientp == NULL. * @@ -187,10 +187,16 @@ dns_client_clearservers(dns_client_t *client, dns_rdataclass_t rdclass, *\li Anything else Failure. */ +typedef void (*dns_client_resolve_cb)(dns_client_t *client, + const dns_name_t *name, + dns_namelist_t *namelist, + isc_result_t result); + isc_result_t dns_client_resolve(dns_client_t *client, const dns_name_t *name, dns_rdataclass_t rdclass, dns_rdatatype_t type, - unsigned int options, dns_namelist_t *namelist); + unsigned int options, dns_namelist_t *namelist, + dns_client_resolve_cb resolve_cb); isc_result_t dns_client_startresolve(dns_client_t *client, const dns_name_t *name, diff --git a/lib/dns/masterdump.c b/lib/dns/masterdump.c index e2743de806..3b419446ec 100644 --- a/lib/dns/masterdump.c +++ b/lib/dns/masterdump.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -1503,14 +1504,10 @@ master_dump_cb(void *data) { * This will run in a network/task manager thread when the dump is complete. */ static void -master_dump_done_cb(void *data, isc_result_t result) { +master_dump_done_cb(void *data) { dns_dumpctx_t *dctx = data; - if (result == ISC_R_SUCCESS && dctx->result != ISC_R_SUCCESS) { - result = dctx->result; - } - - (dctx->done)(dctx->done_arg, result); + (dctx->done)(dctx->done_arg, dctx->result); dns_dumpctx_detach(&dctx); } @@ -1520,16 +1517,16 @@ master_dump_done_cb(void *data, isc_result_t result) { static void setup_dump(isc_task_t *task, isc_event_t *event) { dns_dumpctx_t *dctx = NULL; + isc_loopmgr_t *loopmgr = isc_task_getloopmgr(task); + isc_loop_t *loop = isc_loop_current(loopmgr); - REQUIRE(isc_nm_tid() >= 0); REQUIRE(event != NULL); dctx = event->ev_arg; REQUIRE(DNS_DCTX_VALID(dctx)); - isc_nm_work_offload(isc_task_getnetmgr(task), master_dump_cb, - master_dump_done_cb, dctx); + isc_work_enqueue(loop, master_dump_cb, master_dump_done_cb, dctx); isc_event_free(&event); } diff --git a/lib/dns/nta.c b/lib/dns/nta.c index 5d53f316ac..040f97c0e9 100644 --- a/lib/dns/nta.c +++ b/lib/dns/nta.c @@ -112,7 +112,7 @@ dns_ntatable_create(dns_view_t *view, isc_taskmgr_t *taskmgr, ntatable = isc_mem_get(view->mctx, sizeof(*ntatable)); ntatable->task = NULL; - result = isc_task_create(taskmgr, 0, &ntatable->task, 0); + result = isc_task_create(taskmgr, &ntatable->task, 0); if (result != ISC_R_SUCCESS) { goto cleanup_ntatable; } diff --git a/lib/dns/resolver.c b/lib/dns/resolver.c index 7dfdda343e..e424835f0b 100644 --- a/lib/dns/resolver.c +++ b/lib/dns/resolver.c @@ -10235,7 +10235,7 @@ dns_resolver_create(dns_view_t *view, isc_loopmgr_t *loopmgr, * Since we have a pool of tasks we bind them to task * queues to spread the load evenly */ - result = isc_task_create(taskmgr, 0, &res->tasks[i], i); + result = isc_task_create(taskmgr, &res->tasks[i], i); if (result != ISC_R_SUCCESS) { goto cleanup_tasks; } diff --git a/lib/dns/rpz.c b/lib/dns/rpz.c index bfbb0795d6..4e78b57609 100644 --- a/lib/dns/rpz.c +++ b/lib/dns/rpz.c @@ -1478,7 +1478,7 @@ dns_rpz_new_zones(dns_rpz_zones_t **rpzsp, char *rps_cstr, size_t rps_cstr_size, goto cleanup_rbt; } - result = isc_task_create(taskmgr, 0, &rpzs->updater, 0); + result = isc_task_create(taskmgr, &rpzs->updater, 0); if (result != ISC_R_SUCCESS) { goto cleanup_task; } diff --git a/lib/dns/view.c b/lib/dns/view.c index 56aeb913cf..b7e3fe83b3 100644 --- a/lib/dns/view.c +++ b/lib/dns/view.c @@ -636,7 +636,7 @@ dns_view_createresolver(dns_view_t *view, isc_loopmgr_t *loopmgr, REQUIRE(!view->frozen); REQUIRE(view->resolver == NULL); - result = isc_task_create(taskmgr, 0, &view->task, 0); + result = isc_task_create(taskmgr, &view->task, 0); if (result != ISC_R_SUCCESS) { return (result); } diff --git a/lib/dns/zone.c b/lib/dns/zone.c index ab28bf693e..bcdd3ce720 100644 --- a/lib/dns/zone.c +++ b/lib/dns/zone.c @@ -18860,8 +18860,7 @@ dns_zonemgr_create(isc_mem_t *mctx, isc_loopmgr_t *loopmgr, zmgr->mctx, zmgr->workers * sizeof(zmgr->zonetasks[0])); memset(zmgr->zonetasks, 0, zmgr->workers * sizeof(zmgr->zonetasks[0])); for (size_t i = 0; i < zmgr->workers; i++) { - result = isc_task_create(zmgr->taskmgr, 2, &zmgr->zonetasks[i], - i); + result = isc_task_create(zmgr->taskmgr, &zmgr->zonetasks[i], i); INSIST(result == ISC_R_SUCCESS); if (result != ISC_R_SUCCESS) { INSIST(result == ISC_R_SUCCESS); @@ -18874,8 +18873,7 @@ dns_zonemgr_create(isc_mem_t *mctx, isc_loopmgr_t *loopmgr, zmgr->mctx, zmgr->workers * sizeof(zmgr->loadtasks[0])); memset(zmgr->loadtasks, 0, zmgr->workers * sizeof(zmgr->loadtasks[0])); for (size_t i = 0; i < zmgr->workers; i++) { - result = isc_task_create(zmgr->taskmgr, UINT_MAX, - &zmgr->loadtasks[i], i); + result = isc_task_create(zmgr->taskmgr, &zmgr->loadtasks[i], i); INSIST(result == ISC_R_SUCCESS); if (result != ISC_R_SUCCESS) { goto free_loadtasks; diff --git a/lib/isc/httpd.c b/lib/isc/httpd.c index b154b3aa6c..e4c7d71699 100644 --- a/lib/isc/httpd.c +++ b/lib/isc/httpd.c @@ -1055,8 +1055,8 @@ cleanup_readhandle: void isc_httpdmgr_shutdown(isc_httpdmgr_t **httpdmgrp) { - isc_httpdmgr_t *httpdmgr; - isc_httpd_t *httpd; + isc_httpdmgr_t *httpdmgr = NULL; + isc_httpd_t *httpd = NULL; REQUIRE(httpdmgrp != NULL); REQUIRE(VALID_HTTPDMGR(*httpdmgrp)); @@ -1076,6 +1076,8 @@ isc_httpdmgr_shutdown(isc_httpdmgr_t **httpdmgrp) { } UNLOCK(&httpdmgr->lock); + isc_nmsocket_close(&httpdmgr->sock); + httpdmgr_detach(&httpdmgr); } diff --git a/lib/isc/include/isc/managers.h b/lib/isc/include/isc/managers.h index 3a0f4b4ddd..3f2061d0c6 100644 --- a/lib/isc/include/isc/managers.h +++ b/lib/isc/include/isc/managers.h @@ -13,6 +13,7 @@ #pragma once +#include #include #include #include @@ -20,11 +21,11 @@ typedef struct isc_managers isc_managers_t; -isc_result_t -isc_managers_create(isc_mem_t *mctx, size_t workers, size_t quantum, +void +isc_managers_create(isc_mem_t **mctx, uint32_t workers, isc_loopmgr_t **loopmgrp, isc_nm_t **netmgrp, isc_taskmgr_t **taskmgrp); void -isc_managers_destroy(isc_loopmgr_t **loopmgr, isc_nm_t **netmgrp, - isc_taskmgr_t **taskmgrp); +isc_managers_destroy(isc_mem_t **mctx, isc_loopmgr_t **loopmgrp, + isc_nm_t **netmgrp, isc_taskmgr_t **taskmgrp); diff --git a/lib/isc/include/isc/netmgr.h b/lib/isc/include/isc/netmgr.h index 75e66f6713..b282bf6d4d 100644 --- a/lib/isc/include/isc/netmgr.h +++ b/lib/isc/include/isc/netmgr.h @@ -85,10 +85,16 @@ typedef void (*isc_nm_opaquecb_t)(void *arg); * callbacks. */ -typedef void (*isc_nm_workcb_t)(void *arg); -typedef void (*isc_nm_after_workcb_t)(void *arg, isc_result_t result); +void +isc_netmgr_create(isc_mem_t *mctx, isc_loopmgr_t *loopmgr, isc_nm_t **netgmrp); /*%< - * Callback functions for libuv threadpool work (see uv_work_t) + * Creates a new network manager and starts it running when loopmgr is started. + */ + +void +isc_netmgr_destroy(isc_nm_t **netmgrp); +/*%< + * Similar to isc_nm_detach(), but requires all other references to be gone. */ void @@ -98,16 +104,8 @@ isc_nm_detach(isc_nm_t **mgr0); /*%< * Attach/detach a network manager. When all references have been * released, the network manager is shut down, freeing all resources. - * Destroy is working the same way as detach, but it actively waits - * for all other references to be gone. */ -#define ISC_NETMGR_TID_UNKNOWN -1 - -/* Return thread ID of current thread, or ISC_NETMGR_TID_UNKNOWN */ -int -isc_nm_tid(void); - void isc_nmsocket_close(isc_nmsocket_t **sockp); /*%< @@ -708,32 +706,6 @@ isc_nm_verify_tls_peer_result_string(const isc_nmhandle_t *handle); * \li 'handle' is a valid netmgr handle object. */ -void -isc_nm_task_enqueue(isc_nm_t *mgr, isc_task_t *task, int tid); -/*%< - * Enqueue the 'task' onto the netmgr ievents queue. - * - * Requires: - * \li 'mgr' is a valid netmgr object - * \li 'task' is a valid task - * \li 'tid' is either the preferred netmgr tid or -1, in which case - * tid will be picked randomly. The tid is capped (by modulo) to - * maximum number of 'workers' as specifed in isc_nm_start() - */ - -void -isc_nm_work_offload(isc_nm_t *mgr, isc_nm_workcb_t work_cb, - isc_nm_after_workcb_t after_work_cb, void *data); -/*%< - * Schedules a job to be handled by the libuv thread pool (see uv_work_t). - * The function specified in `work_cb` will be run by a thread in the - * thread pool; when complete, the `after_work_cb` function will run. - * - * Requires: - * \li 'mgr' is a valid netmgr object. - * \li We are currently running in a network manager thread. - */ - void isc__nm_force_tid(int tid); /*%< @@ -741,12 +713,6 @@ isc__nm_force_tid(int tid); * tests and should not be used in any production code. */ -uint32_t -isc_nm_getnworkers(const isc_nm_t *); -/*%< - * Return the number of active workers - */ - void isc_nmhandle_setwritetimeout(isc_nmhandle_t *handle, uint64_t write_timeout); diff --git a/lib/isc/include/isc/task.h b/lib/isc/include/isc/task.h index d91ea1b604..09d20b5fd9 100644 --- a/lib/isc/include/isc/task.h +++ b/lib/isc/include/isc/task.h @@ -62,6 +62,7 @@ #include #include #include +#include #include #include #include @@ -89,26 +90,15 @@ ISC_LANG_BEGINDECLS *** Types ***/ -#define isc_task_create(m, q, t, i) \ - isc__task_create(m, q, t, i ISC__TASKFILELINE) +#define isc_task_create(manager, taskp, tid) \ + isc__task_create(manager, taskp, tid ISC__TASKFILELINE) isc_result_t -isc__task_create(isc_taskmgr_t *manager, unsigned int quantum, - isc_task_t **taskp, int tid ISC__TASKFLARG); +isc__task_create(isc_taskmgr_t *manager, isc_task_t **taskp, + int tid ISC__TASKFLARG); /*%< * Create a task, bound to a particular thread id. * - * Notes: - * - *\li If 'quantum' is non-zero, then only that many events can be dispatched - * before the task must yield to other tasks waiting to execute. If - * quantum is zero, then the default quantum of the task manager will - * be used. - * - *\li The 'quantum' option may be removed from isc_task_create() in the - * future. If this happens, isc_task_getquantum() and - * isc_task_setquantum() will be provided. - * * Requires: * *\li 'manager' is a valid task manager. @@ -126,66 +116,7 @@ isc__task_create(isc_taskmgr_t *manager, unsigned int quantum, *\li #ISC_R_SHUTTINGDOWN */ -void -isc_task_ready(isc_task_t *task); -/*%< - * Enqueue the task onto netmgr queue. - */ - -isc_result_t -isc_task_run(isc_task_t *task); -/*%< - * Run all the queued events for the 'task', returning - * when the queue is empty or the number of events executed - * exceeds the 'quantum' specified when the task was created. - * - * Requires: - * - *\li 'task' is a valid task. - * - * Returns: - * - *\li #ISC_R_SUCCESS - *\li #ISC_R_QUOTA - */ - -void -isc_task_attach(isc_task_t *source, isc_task_t **targetp); -/*%< - * Attach *targetp to source. - * - * Requires: - * - *\li 'source' is a valid task. - * - *\li 'targetp' points to a NULL isc_task_t *. - * - * Ensures: - * - *\li *targetp is attached to source. - */ - -void -isc_task_detach(isc_task_t **taskp); -/*%< - * Detach *taskp from its task. - * - * Requires: - * - *\li '*taskp' is a valid task. - * - * Ensures: - * - *\li *taskp is NULL. - * - *\li If '*taskp' is the last reference to the task, the task is idle (has - * an empty event queue), and has not been shutdown, the task will be - * shutdown. - * - *\li If '*taskp' is the last reference to the task and - * the task has been shutdown, - * all resources used by the task will be freed. - */ +ISC_REFCOUNT_DECL(isc_task); void isc_task_send(isc_task_t *task, isc_event_t **eventp); @@ -228,34 +159,6 @@ isc_task_sendanddetach(isc_task_t **taskp, isc_event_t **eventp); * all resources used by the task will be freed. */ -bool -isc_task_purgeevent(isc_task_t *task, isc_event_t *event); -/*%< - * Purge 'event' from a task's event queue. - * - * Notes: - * - *\li If 'event' is on the task's event queue, it will be purged. 'event' - * does not have to be on the task's event queue; in fact, it can even be - * an invalid pointer. Purging only occurs if the event is actually on the - * task's event queue. - * - * \li Purging never changes the state of the task. - * - * Requires: - * - *\li 'task' is a valid task. - * - * Ensures: - * - *\li 'event' is not in the event queue for 'task'. - * - * Returns: - * - *\li #true The event was purged. - *\li #false The event was not in the event queue. - */ - void isc_task_setname(isc_task_t *task, const char *name, void *tag); /*%< @@ -272,6 +175,9 @@ isc_task_setname(isc_task_t *task, const char *name, void *tag); *\li 'task' is a valid task. */ +isc_loopmgr_t * +isc_task_getloopmgr(isc_task_t *task); + const char * isc_task_getname(isc_task_t *task); /*%< @@ -290,9 +196,6 @@ isc_task_getname(isc_task_t *task); * */ -isc_nm_t * -isc_task_getnetmgr(isc_task_t *task); - void * isc_task_gettag(isc_task_t *task); /*%< @@ -306,15 +209,6 @@ isc_task_gettag(isc_task_t *task); */ void -isc_task_setquantum(isc_task_t *task, unsigned int quantum); -/*%< - * Set future 'task' quantum to 'quantum'. The current 'task' quantum will be - * kept for the current isc_task_run() loop, and will be changed for the next - * run. Therefore, the function is save to use from the event callback as it - * will not affect the current event loop processing. - */ - -isc_result_t isc_task_beginexclusive(isc_task_t *task); /*%< * Request exclusive access for 'task', which must be the calling @@ -339,8 +233,8 @@ isc_task_beginexclusive(isc_task_t *task); void isc_task_endexclusive(isc_task_t *task); /*%< - * Relinquish the exclusive access obtained by isc_task_beginexclusive(), - * allowing other tasks to execute. + * Relinquish the exclusive access obtained by + *isc_task_beginexclusive(), allowing other tasks to execute. * * Requires: *\li 'task' is the calling task, and has obtained @@ -351,6 +245,72 @@ isc_task_endexclusive(isc_task_t *task); ***** Task Manager. *****/ +void +isc_taskmgr_create(isc_mem_t *mctx, isc_loopmgr_t *loopmgr, + isc_taskmgr_t **managerp); +/*%< + * Create a new task manager. + * + * Notes: + * + *\li This is meant to be called from isc_managers_create(). + * + * Requires: + * + *\li 'mctx' is a valid memory context. + + *\li 'loopmgr' is a valid loop manager. + * + *\li managerp != NULL && *managerp == NULL + * + * Ensures: + * + *\li On success, '*managerp' will be attached to the newly created task + * manager. + * + * Returns: + * + *\li #ISC_R_SUCCESS + *\li #ISC_R_NOMEMORY + *\li #ISC_R_NOTHREADS No threads could be created. + *\li #ISC_R_UNEXPECTED An unexpected error occurred. + *\li #ISC_R_SHUTTINGDOWN The non-threaded, shared, task + * manager shutting down. + */ + +void +isc_taskmgr_destroy(isc_taskmgr_t **managerp); +/*%< + * Destroy '*managerp'. + * + * Notes: + * + *\li Calling isc__taskmgr_destroy() will shut down all tasks managed by + * *managerp that haven't already been shutdown. The call will block + * until all tasks have entered the done state. + * + *\li isc__taskmgr_destroy() must not be called by a task event action, + * because it would block forever waiting for the event action to + * complete. An event action that wants to cause task manager shutdown + * should request some non-event action thread of execution to do the + * shutdown, e.g. by signaling a condition variable or using + * isc_loopmgr_shutdown(). + * + *\li The task manager is reference counted and will be destroyed when + * the last reference is detached. The only difference between this + * function and isc_task_detach() is that this one will assert if + * more than 1 reference is held. This function is only meant to be + * called from isc_managers_destroy(), by which time all other + * references should have been detached. If any are still being held, + * it's a programming error, and we want to crash. + * + * Requires: + * + *\li '*managerp' is a valid task manager. + * + *\li No other references to the task manager are being held. + */ + void isc_taskmgr_attach(isc_taskmgr_t *, isc_taskmgr_t **); void diff --git a/lib/isc/include/isc/types.h b/lib/isc/include/isc/types.h index fb951dc4c8..f8fd98af24 100644 --- a/lib/isc/include/isc/types.h +++ b/lib/isc/include/isc/types.h @@ -79,16 +79,14 @@ typedef struct isc_signal isc_signal_t; /*%< Signal handler */ typedef struct isc_sockaddr isc_sockaddr_t; /*%< Socket Address */ typedef ISC_LIST(isc_sockaddr_t) isc_sockaddrlist_t; /*%< Socket Address List * */ -typedef struct isc_stats isc_stats_t; /*%< Statistics */ -typedef int_fast64_t isc_statscounter_t; -typedef struct isc_symtab isc_symtab_t; /*%< Symbol Table */ -typedef struct isc_task isc_task_t; /*%< Task */ -typedef ISC_LIST(isc_task_t) isc_tasklist_t; /*%< Task List */ +typedef struct isc_stats isc_stats_t; /*%< Statistics */ +typedef int_fast64_t isc_statscounter_t; +typedef struct isc_symtab isc_symtab_t; /*%< Symbol Table */ +typedef struct isc_task isc_task_t; /*%< Task */ typedef struct isc_taskmgr isc_taskmgr_t; /*%< Task Manager */ typedef struct isc_textregion isc_textregion_t; /*%< Text Region */ typedef struct isc_time isc_time_t; /*%< Time */ typedef struct isc_timer isc_timer_t; /*%< Timer */ -typedef struct isc_timermgr isc_timermgr_t; /*%< Timer Manager */ typedef struct isc_work isc_work_t; /*%< Work offloaded to an * external thread */ diff --git a/lib/isc/include/isc/util.h b/lib/isc/include/isc/util.h index 0d49b9587c..243ded9759 100644 --- a/lib/isc/include/isc/util.h +++ b/lib/isc/include/isc/util.h @@ -193,6 +193,11 @@ isc_rwlock_unlock((lp), (t)); \ } +#define RDLOCK(lp) RWLOCK(lp, isc_rwlocktype_read) +#define RDUNLOCK(lp) RWUNLOCK(lp, isc_rwlocktype_read) +#define WRLOCK(lp) RWLOCK(lp, isc_rwlocktype_write) +#define WRUNLOCK(lp) RWUNLOCK(lp, isc_rwlocktype_write) + /* * List Macros. */ diff --git a/lib/isc/log.c b/lib/isc/log.c index 95c3e6b05d..e02479d0b7 100644 --- a/lib/isc/log.c +++ b/lib/isc/log.c @@ -42,11 +42,6 @@ #define LCFG_MAGIC ISC_MAGIC('L', 'c', 'f', 'g') #define VALID_CONFIG(lcfg) ISC_MAGIC_VALID(lcfg, LCFG_MAGIC) -#define RDLOCK(lp) RWLOCK(lp, isc_rwlocktype_read); -#define WRLOCK(lp) RWLOCK(lp, isc_rwlocktype_write); -#define RDUNLOCK(lp) RWUNLOCK(lp, isc_rwlocktype_read); -#define WRUNLOCK(lp) RWUNLOCK(lp, isc_rwlocktype_write); - static thread_local bool forcelog = false; /* diff --git a/lib/isc/managers.c b/lib/isc/managers.c index b24115da22..2fa8177720 100644 --- a/lib/isc/managers.c +++ b/lib/isc/managers.c @@ -11,92 +11,44 @@ * information regarding copyright ownership. */ -#include #include #include -#include "netmgr_p.h" -#include "task_p.h" - -isc_result_t -isc_managers_create(isc_mem_t *mctx, size_t workers, size_t quantum, +void +isc_managers_create(isc_mem_t **mctxp, uint32_t workers, isc_loopmgr_t **loopmgrp, isc_nm_t **netmgrp, isc_taskmgr_t **taskmgrp) { - isc_result_t result; - isc_nm_t *netmgr = NULL; - isc_taskmgr_t *taskmgr = NULL; + REQUIRE(mctxp != NULL && *mctxp == NULL); + isc_mem_create(mctxp); + INSIST(*mctxp != NULL); + + REQUIRE(loopmgrp != NULL && *loopmgrp == NULL); + isc_loopmgr_create(*mctxp, workers, loopmgrp); + INSIST(*loopmgrp != NULL); REQUIRE(netmgrp != NULL && *netmgrp == NULL); - isc__netmgr_create(mctx, workers, &netmgr); - *netmgrp = netmgr; - INSIST(netmgr != NULL); + isc_netmgr_create(*mctxp, *loopmgrp, netmgrp); + INSIST(*netmgrp != NULL); - REQUIRE(taskmgrp == NULL || *taskmgrp == NULL); - if (taskmgrp != NULL) { - INSIST(netmgr != NULL); - result = isc__taskmgr_create(mctx, quantum, netmgr, &taskmgr); - if (result != ISC_R_SUCCESS) { - UNEXPECTED_ERROR(__FILE__, __LINE__, - "isc_taskmgr_create() failed: %s", - isc_result_totext(result)); - goto fail; - } - *taskmgrp = taskmgr; - } - - isc_loopmgr_create(mctx, workers, loopmgrp); - - return (ISC_R_SUCCESS); -fail: - isc_managers_destroy(loopmgrp, netmgrp, taskmgrp); - - return (result); + REQUIRE(taskmgrp != NULL && *taskmgrp == NULL); + isc_taskmgr_create(*mctxp, *loopmgrp, taskmgrp); + INSIST(*taskmgrp != NULL); } void -isc_managers_destroy(isc_loopmgr_t **loopmgrp, isc_nm_t **netmgrp, - isc_taskmgr_t **taskmgrp) { - /* - * If we have a taskmgr to clean up, then we must also have a netmgr. - */ - REQUIRE(taskmgrp == NULL || netmgrp != NULL); +isc_managers_destroy(isc_mem_t **mctxp, isc_loopmgr_t **loopmgrp, + isc_nm_t **netmgrp, isc_taskmgr_t **taskmgrp) { + REQUIRE(mctxp != NULL && *mctxp != NULL); + REQUIRE(loopmgrp != NULL && *loopmgrp != NULL); + REQUIRE(netmgrp != NULL && *netmgrp != NULL); + REQUIRE(taskmgrp != NULL && *taskmgrp != NULL); /* * The sequence of operations here is important: - * - * 1. Initiate shutdown of the taskmgr, sending shutdown events to - * all tasks that are not already shutting down. */ - if (taskmgrp != NULL) { - INSIST(*taskmgrp != NULL); - isc__taskmgr_shutdown(*taskmgrp); - } - - /* - * 2. Initiate shutdown of the network manager, freeing clients - * and other resources and preventing new connections, but do - * not stop processing of existing events. - */ - if (netmgrp != NULL) { - INSIST(*netmgrp != NULL); - isc__netmgr_shutdown(*netmgrp); - } - - /* - * 3. Finish destruction of the task manager when all tasks - * have completed. - */ - if (taskmgrp != NULL) { - isc__taskmgr_destroy(taskmgrp); - } - - /* - * 4. Finish destruction of the netmgr, and wait until all - * references have been released. - */ - if (netmgrp != NULL) { - isc__netmgr_destroy(netmgrp); - } + isc_taskmgr_destroy(taskmgrp); + isc_netmgr_destroy(netmgrp); isc_loopmgr_destroy(loopmgrp); + isc_mem_destroy(mctxp); } diff --git a/lib/isc/netmgr/http.c b/lib/isc/netmgr/http.c index 6d58906b89..2ad1cf3142 100644 --- a/lib/isc/netmgr/http.c +++ b/lib/isc/netmgr/http.c @@ -376,7 +376,7 @@ find_http_cstream(int32_t stream_id, isc_nm_http_session_t *session) { static isc_result_t new_http_cstream(isc_nmsocket_t *sock, http_cstream_t **streamp) { - isc_mem_t *mctx = sock->mgr->mctx; + isc_mem_t *mctx = sock->worker->mctx; const char *uri = NULL; bool post; http_cstream_t *stream = NULL; @@ -550,12 +550,14 @@ static int on_server_data_chunk_recv_callback(int32_t stream_id, const uint8_t *data, size_t len, isc_nm_http_session_t *session) { isc_nmsocket_h2_t *h2 = ISC_LIST_HEAD(session->sstreams); + isc_mem_t *mctx = h2->psock->worker->mctx; + while (h2 != NULL) { if (stream_id == h2->stream_id) { if (isc_buffer_base(&h2->rbuf) == NULL) { isc_buffer_init( &h2->rbuf, - isc_mem_allocate(session->mctx, + isc_mem_allocate(mctx, h2->content_length), MAX_DNS_MESSAGE_SIZE); } @@ -1019,7 +1021,7 @@ call_pending_callbacks(isc__nm_http_pending_callbacks_t pending_callbacks, while (cbreq != NULL) { isc__nm_uvreq_t *next = ISC_LIST_NEXT(cbreq, link); ISC_LIST_UNLINK(pending_callbacks, cbreq, link); - isc__nm_sendcb(cbreq->handle->sock, cbreq, result, false); + isc__nm_sendcb(cbreq->handle->sock, cbreq, result, true); cbreq = next; } } @@ -1087,12 +1089,14 @@ http_send_outgoing(isc_nm_http_session_t *session, isc_nmhandle_t *httphandle, return (false); } - /* We need to attach to the session->handle earlier because as an + /* + * We need to attach to the session->handle earlier because as an * indirect result of the nghttp2_session_mem_send() the session * might get closed and the handle detached. However, there is * still some outgoing data to handle and we need to call it * anyway if only to get the write callback passed here to get - * called properly. */ + * called properly. + */ isc_nmhandle_attach(session->handle, &transphandle); while (nghttp2_session_want_write(session->ngsession)) { @@ -1101,9 +1105,11 @@ http_send_outgoing(isc_nm_http_session_t *session, isc_nmhandle_t *httphandle, nghttp2_session_mem_send(session->ngsession, &data); const size_t new_total = total + pending; - /* Sometimes nghttp2_session_mem_send() does not return any + /* + * Sometimes nghttp2_session_mem_send() does not return any * data to send even though nghttp2_session_want_write() - * returns success. */ + * returns success. + */ if (pending == 0 || data == NULL) { break; } @@ -1126,28 +1132,33 @@ http_send_outgoing(isc_nm_http_session_t *session, isc_nmhandle_t *httphandle, isc_buffer_usedlength(session->pending_write_data); } - /* Here we are trying to flush the pending writes buffer earlier + /* + * Here we are trying to flush the pending writes buffer earlier * to avoid hitting unnecessary limitations on a TLS record size - * within some tools (e.g. flamethrower). */ + * within some tools (e.g. flamethrower). + */ if (max_total_write_size >= FLUSH_HTTP_WRITE_BUFFER_AFTER) { - /* Case 1: We have equal or more than - * FLUSH_HTTP_WRITE_BUFFER_AFTER bytes to send. Let's flush it. + /* + * Case 1: We have at least FLUSH_HTTP_WRITE_BUFFER_AFTER + * bytes to send. Let's flush it. */ total = max_total_write_size; } else if (session->sending > 0 && total > 0) { - /* Case 2: There is one or more write requests in flight and + /* + * Case 2: There is one or more write requests in flight and * we have some new data form nghttp2 to send. Let's put the * write callback (if any) into the pending write callbacks * list. Then let's return from the function: as soon as the * "in-flight" write callback get's called or we have reached * FLUSH_HTTP_WRITE_BUFFER_AFTER bytes in the write buffer, we - * will flush the buffer. */ + * will flush the buffer. + */ if (cb != NULL) { isc__nm_uvreq_t *newcb = NULL; INSIST(VALID_NMHANDLE(httphandle)); - newcb = isc__nm_uvreq_get(httphandle->sock->mgr, + newcb = isc__nm_uvreq_get(httphandle->sock->worker, httphandle->sock); newcb->cb.send = cb; newcb->cbarg = cbarg; @@ -1159,26 +1170,31 @@ http_send_outgoing(isc_nm_http_session_t *session, isc_nmhandle_t *httphandle, } else if (session->sending == 0 && total == 0 && session->pending_write_data != NULL) { - /* Case 3: There is no write in flight and we haven't got + /* + * Case 3: There is no write in flight and we haven't got * anything new from nghttp2, but there is some data pending - * in the write buffer. Let's flush the buffer. */ + * in the write buffer. Let's flush the buffer. + */ isc_region_t region = { 0 }; total = isc_buffer_usedlength(session->pending_write_data); INSIST(total > 0); isc_buffer_usedregion(session->pending_write_data, ®ion); INSIST(total == region.length); } else { - /* The other cases are, uninteresting, fall-through ones. */ - /* In the following cases (4-6) we will just bail out. */ - /* Case 4: There is nothing new to send, nor anything in the - * write buffer. */ - /* Case 5: There is nothing new to send and there is write - * request(s) in flight. */ - /* Case 6: There is nothing new to send nor there are any - * write requests in flight. */ - - /* Case 7: There is some new data to send and there are no any - * write requests in flight: Let's send the data.*/ + /* + * The other cases are uninteresting, fall-through ones. + * In the following cases (4-6) we will just bail out: + * + * Case 4: There is nothing new to send, nor anything in the + * write buffer. + * Case 5: There is nothing new to send and there are write + * request(s) in flight. + * Case 6: There is nothing new to send nor are there any + * write requests in flight. + * + * Case 7: There is some new data to send and there are no + * write requests in flight: Let's send the data. + */ INSIST((total == 0 && session->pending_write_data == NULL) || (total == 0 && session->sending > 0) || (total == 0 && session->sending == 0) || @@ -1193,8 +1209,10 @@ http_send_outgoing(isc_nm_http_session_t *session, isc_nmhandle_t *httphandle, goto nothing_to_send; } - /* If we have reached the point it means that we need to send some - * data and flush the outgoing buffer. The code below does that. */ + /* + * If we have reached this point it means that we need to send some + * data and flush the outgoing buffer. The code below does that. + */ send = isc_mem_get(session->mctx, sizeof(*send)); *send = (isc_http_send_req_t){ .pending_write_data = @@ -1216,6 +1234,7 @@ http_send_outgoing(isc_nm_http_session_t *session, isc_nmhandle_t *httphandle, isc_buffer_usedregion(send->pending_write_data, &send_data); isc_nm_send(transphandle, &send_data, http_writecb, send); return (true); + nothing_to_send: isc_nmhandle_detach(&transphandle); return (false); @@ -1320,7 +1339,7 @@ http_call_connect_cb(isc_nmsocket_t *sock, isc_nm_http_session_t *session, REQUIRE(sock->connect_cb != NULL); if (result == ISC_R_SUCCESS) { - req = isc__nm_uvreq_get(sock->mgr, sock); + req = isc__nm_uvreq_get(sock->worker, sock); req->cb.connect = sock->connect_cb; req->cbarg = sock->connect_cbarg; if (session != NULL) { @@ -1358,11 +1377,10 @@ transport_connect_cb(isc_nmhandle_t *handle, isc_result_t result, void *cbarg) { REQUIRE(VALID_NMSOCK(transp_sock)); - mctx = transp_sock->mgr->mctx; + mctx = transp_sock->worker->mctx; INSIST(http_sock->h2.connect.uri != NULL); - http_sock->tid = transp_sock->tid; http_sock->h2.connect.tls_peer_verify_string = isc_nm_verify_tls_peer_result_string(handle); if (result != ISC_R_SUCCESS) { @@ -1429,7 +1447,8 @@ error: http_call_connect_cb(http_sock, session, result); if (http_sock->h2.connect.uri != NULL) { - isc_mem_free(mctx, http_sock->h2.connect.uri); + isc_mem_free(http_sock->worker->mctx, + http_sock->h2.connect.uri); } isc__nmsocket_prep_destroy(http_sock); @@ -1444,6 +1463,7 @@ isc_nm_httpconnect(isc_nm_t *mgr, isc_sockaddr_t *local, isc_sockaddr_t *peer, unsigned int timeout) { isc_sockaddr_t local_interface; isc_nmsocket_t *sock = NULL; + isc__networker_t *worker = &mgr->workers[isc_tid()]; REQUIRE(VALID_NM(mgr)); REQUIRE(cb != NULL); @@ -1456,17 +1476,16 @@ isc_nm_httpconnect(isc_nm_t *mgr, isc_sockaddr_t *local, isc_sockaddr_t *peer, local = &local_interface; } - sock = isc_mem_get(mgr->mctx, sizeof(*sock)); - isc__nmsocket_init(sock, mgr, isc_nm_httpsocket, local); + sock = isc_mem_get(worker->mctx, sizeof(*sock)); + isc__nmsocket_init(sock, worker, isc_nm_httpsocket, local); sock->connect_timeout = timeout; - sock->result = ISC_R_UNSET; sock->connect_cb = cb; sock->connect_cbarg = cbarg; atomic_init(&sock->client, true); - if (isc__nm_closing(sock)) { - isc__nm_uvreq_t *req = isc__nm_uvreq_get(mgr, sock); + if (isc__nm_closing(worker)) { + isc__nm_uvreq_t *req = isc__nm_uvreq_get(worker, sock); req->cb.connect = cb; req->cbarg = cbarg; @@ -1474,10 +1493,6 @@ isc_nm_httpconnect(isc_nm_t *mgr, isc_sockaddr_t *local, isc_sockaddr_t *peer, req->local = *local; req->handle = isc__nmhandle_get(sock, &req->peer, &sock->iface); - if (isc__nm_in_netthread()) { - sock->tid = isc_nm_tid(); - } - isc__nmsocket_clearcb(sock); isc__nm_connectcb(sock, req, ISC_R_SHUTTINGDOWN, true); isc__nmsocket_prep_destroy(sock); @@ -1485,8 +1500,8 @@ isc_nm_httpconnect(isc_nm_t *mgr, isc_sockaddr_t *local, isc_sockaddr_t *peer, return; } - sock->h2 = (isc_nmsocket_h2_t){ .connect.uri = isc_mem_strdup(mgr->mctx, - uri), + sock->h2 = (isc_nmsocket_h2_t){ .connect.uri = isc_mem_strdup( + sock->worker->mctx, uri), .connect.post = post, .connect.tlsctx = tlsctx }; ISC_LINK_INIT(&sock->h2, link); @@ -1513,7 +1528,7 @@ static isc_result_t client_send(isc_nmhandle_t *handle, const isc_region_t *region) { isc_result_t result = ISC_R_SUCCESS; isc_nmsocket_t *sock = handle->sock; - isc_mem_t *mctx = sock->mgr->mctx; + isc_mem_t *mctx = sock->worker->mctx; isc_nm_http_session_t *session = sock->h2.session; http_cstream_t *cstream = sock->h2.connect.cstream; @@ -1596,7 +1611,7 @@ isc__nm_http_request(isc_nmhandle_t *handle, isc_region_t *region, REQUIRE(VALID_NMHANDLE(handle)); REQUIRE(VALID_NMSOCK(handle->sock)); - REQUIRE(handle->sock->tid == isc_nm_tid()); + REQUIRE(handle->sock->tid == isc_tid()); REQUIRE(atomic_load(&handle->sock->client)); REQUIRE(cb != NULL); @@ -1628,6 +1643,7 @@ server_on_begin_headers_callback(nghttp2_session *ngsession, const nghttp2_frame *frame, void *user_data) { isc_nm_http_session_t *session = (isc_nm_http_session_t *)user_data; isc_nmsocket_t *socket = NULL; + isc__networker_t *worker = NULL; if (frame->hd.type != NGHTTP2_HEADERS || frame->headers.cat != NGHTTP2_HCAT_REQUEST) @@ -1641,9 +1657,11 @@ server_on_begin_headers_callback(nghttp2_session *ngsession, return (NGHTTP2_ERR_CALLBACK_FAILURE); } - socket = isc_mem_get(session->mctx, sizeof(isc_nmsocket_t)); - isc__nmsocket_init(socket, session->serversocket->mgr, - isc_nm_httpsocket, + INSIST(session->handle->sock->tid == isc_tid()); + + worker = session->handle->sock->worker; + socket = isc_mem_get(worker->mctx, sizeof(isc_nmsocket_t)); + isc__nmsocket_init(socket, worker, isc_nm_httpsocket, (isc_sockaddr_t *)&session->handle->sock->iface); socket->peer = session->handle->sock->peer; socket->h2 = (isc_nmsocket_h2_t){ @@ -1657,7 +1675,6 @@ server_on_begin_headers_callback(nghttp2_session *ngsession, isc_buffer_initnull(&socket->h2.wbuf); session->nsstreams++; isc__nm_httpsession_attach(session, &socket->h2.session); - socket->tid = session->handle->sock->tid; ISC_LINK_INIT(&socket->h2, link); ISC_LIST_APPEND(session->sstreams, &socket->h2, link); @@ -1694,13 +1711,13 @@ server_handle_path_header(isc_nmsocket_t *socket, const uint8_t *value, } if (socket->h2.request_path != NULL) { - isc_mem_free(socket->mgr->mctx, socket->h2.request_path); + isc_mem_free(socket->worker->mctx, socket->h2.request_path); } socket->h2.request_path = isc_mem_strndup( - socket->mgr->mctx, (const char *)value, vlen + 1); + socket->worker->mctx, (const char *)value, vlen + 1); if (!isc_nm_http_path_isvalid(socket->h2.request_path)) { - isc_mem_free(socket->mgr->mctx, socket->h2.request_path); + isc_mem_free(socket->worker->mctx, socket->h2.request_path); socket->h2.request_path = NULL; return (ISC_HTTP_ERROR_BAD_REQUEST); } @@ -1712,7 +1729,7 @@ server_handle_path_header(isc_nmsocket_t *socket, const uint8_t *value, socket->h2.cb = handler->cb; socket->h2.cbarg = handler->cbarg; } else { - isc_mem_free(socket->mgr->mctx, socket->h2.request_path); + isc_mem_free(socket->worker->mctx, socket->h2.request_path); socket->h2.request_path = NULL; return (ISC_HTTP_ERROR_NOT_FOUND); } @@ -1726,12 +1743,12 @@ server_handle_path_header(isc_nmsocket_t *socket, const uint8_t *value, const size_t decoded_size = dns_value_len / 4 * 3; if (decoded_size <= MAX_DNS_MESSAGE_SIZE) { if (socket->h2.query_data != NULL) { - isc_mem_free(socket->mgr->mctx, + isc_mem_free(socket->worker->mctx, socket->h2.query_data); } socket->h2.query_data = isc__nm_base64url_to_base64( - socket->mgr->mctx, dns_value, + socket->worker->mctx, dns_value, dns_value_len, &socket->h2.query_data_len); } else { @@ -2157,8 +2174,9 @@ isc__nm_http_send(isc_nmhandle_t *handle, const isc_region_t *region, sock = handle->sock; REQUIRE(VALID_NMSOCK(sock)); + REQUIRE(sock->tid == isc_tid()); - uvreq = isc__nm_uvreq_get(sock->mgr, sock); + uvreq = isc__nm_uvreq_get(sock->worker, sock); isc_nmhandle_attach(handle, &uvreq->handle); uvreq->cb.send = cb; uvreq->cbarg = cbarg; @@ -2166,9 +2184,8 @@ isc__nm_http_send(isc_nmhandle_t *handle, const isc_region_t *region, uvreq->uvbuf.base = (char *)region->base; uvreq->uvbuf.len = region->length; - ievent = isc__nm_get_netievent_httpsend(sock->mgr, sock, uvreq); - isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)ievent); + ievent = isc__nm_get_netievent_httpsend(sock->worker, sock, uvreq); + isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent); } static void @@ -2216,7 +2233,7 @@ server_httpsend(isc_nmhandle_t *handle, isc_nmsocket_t *sock, return; } - INSIST(handle->httpsession->handle->sock->tid == isc_nm_tid()); + INSIST(handle->sock->tid == isc_tid()); INSIST(VALID_NMHANDLE(handle->httpsession->handle)); INSIST(VALID_NMSOCK(handle->httpsession->handle->sock)); @@ -2467,7 +2484,7 @@ httplisten_acceptcb(isc_nmhandle_t *handle, isc_result_t result, void *cbarg) { http_transpost_tcp_nodelay(handle); - new_session(httplistensock->mgr->mctx, NULL, &session); + new_session(handle->sock->worker->mctx, NULL, &session); session->max_concurrent_streams = atomic_load(&httplistensock->h2.max_concurrent_streams); initialize_nghttp2_server_session(session); @@ -2489,13 +2506,15 @@ isc_nm_listenhttp(isc_nm_t *mgr, uint32_t workers, isc_sockaddr_t *iface, isc_nmsocket_t **sockp) { isc_nmsocket_t *sock = NULL; isc_result_t result; + isc__networker_t *worker = &mgr->workers[isc_tid()]; REQUIRE(!ISC_LIST_EMPTY(eps->handlers)); REQUIRE(!ISC_LIST_EMPTY(eps->handler_cbargs)); REQUIRE(atomic_load(&eps->in_use) == false); + REQUIRE(isc_tid() == 0); - sock = isc_mem_get(mgr->mctx, sizeof(*sock)); - isc__nmsocket_init(sock, mgr, isc_nm_httplistener, iface); + sock = isc_mem_get(worker->mctx, sizeof(*sock)); + isc__nmsocket_init(sock, worker, isc_nm_httplistener, iface); atomic_init(&sock->h2.max_concurrent_streams, NGHTTP2_INITIAL_MAX_CONCURRENT_STREAMS); @@ -2523,8 +2542,6 @@ isc_nm_listenhttp(isc_nm_t *mgr, uint32_t workers, isc_sockaddr_t *iface, isc__nmsocket_attach(sock, &sock->outer->h2.httpserver); sock->nchildren = sock->outer->nchildren; - sock->result = ISC_R_UNSET; - sock->tid = 0; sock->fd = (uv_os_sock_t)-1; atomic_store(&sock->listening, true); @@ -2697,16 +2714,9 @@ isc__nm_http_stoplistening(isc_nmsocket_t *sock) { UNREACHABLE(); } - if (!isc__nm_in_netthread()) { - isc__netievent_httpstop_t *ievent = - isc__nm_get_netievent_httpstop(sock->mgr, sock); - isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)ievent); - } else { - REQUIRE(isc_nm_tid() == sock->tid); - isc__netievent_httpstop_t ievent = { .sock = sock }; - isc__nm_async_httpstop(NULL, (isc__netievent_t *)&ievent); - } + REQUIRE(isc_tid() == sock->tid); + isc__netievent_httpstop_t ievent = { .sock = sock }; + isc__nm_async_httpstop(NULL, (isc__netievent_t *)&ievent); } void @@ -2762,11 +2772,11 @@ isc__nm_http_close(isc_nmsocket_t *sock) { } if (sock->h2.session != NULL && sock->h2.session->closed && - sock->tid == isc_nm_tid()) + sock->tid == isc_tid()) { isc__nm_httpsession_detach(&sock->h2.session); destroy = true; - } else if (sock->h2.session == NULL && sock->tid == isc_nm_tid()) { + } else if (sock->h2.session == NULL && sock->tid == isc_tid()) { destroy = true; } @@ -2777,10 +2787,9 @@ isc__nm_http_close(isc_nmsocket_t *sock) { } isc__netievent_httpclose_t *ievent = - isc__nm_get_netievent_httpclose(sock->mgr, sock); + isc__nm_get_netievent_httpclose(sock->worker, sock); - isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)ievent); + isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent); } void @@ -2789,7 +2798,7 @@ isc__nm_async_httpclose(isc__networker_t *worker, isc__netievent_t *ev0) { isc_nmsocket_t *sock = ievent->sock; REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); UNUSED(worker); @@ -2991,17 +3000,22 @@ isc__nm_http_set_max_streams(isc_nmsocket_t *listener, void isc_nm_http_set_endpoints(isc_nmsocket_t *listener, isc_nm_http_endpoints_t *eps) { + isc_loopmgr_t *loopmgr = NULL; + REQUIRE(VALID_NMSOCK(listener)); REQUIRE(listener->type == isc_nm_httplistener); REQUIRE(VALID_HTTP_ENDPOINTS(eps)); + loopmgr = listener->worker->netmgr->loopmgr; + atomic_store(&eps->in_use, true); - for (size_t i = 0; i < isc_nm_getnworkers(listener->mgr); i++) { + for (size_t i = 0; i < isc_loopmgr_nloops(loopmgr); i++) { isc__netievent__http_eps_t *ievent = - isc__nm_get_netievent_httpendpoints(listener->mgr, - listener, eps); - isc__nm_enqueue_ievent(&listener->mgr->workers[i], + isc__nm_get_netievent_httpendpoints( + &listener->worker->netmgr->workers[i], listener, + eps); + isc__nm_enqueue_ievent(&listener->worker->netmgr->workers[i], (isc__netievent_t *)ievent); } } @@ -3009,9 +3023,10 @@ isc_nm_http_set_endpoints(isc_nmsocket_t *listener, void isc__nm_async_httpendpoints(isc__networker_t *worker, isc__netievent_t *ev0) { isc__netievent__http_eps_t *ievent = (isc__netievent__http_eps_t *)ev0; - const int tid = isc_nm_tid(); + const int tid = isc_tid(); isc_nmsocket_t *listener = ievent->sock; isc_nm_http_endpoints_t *eps = ievent->endpoints; + UNUSED(worker); isc_nm_http_endpoints_detach(&listener->h2.listener_endpoints[tid]); @@ -3023,16 +3038,18 @@ static void http_init_listener_endpoints(isc_nmsocket_t *listener, isc_nm_http_endpoints_t *epset) { size_t nworkers; + isc_loopmgr_t *loopmgr = NULL; REQUIRE(VALID_NMSOCK(listener)); - REQUIRE(VALID_NM(listener->mgr)); + REQUIRE(listener->worker != NULL && VALID_NM(listener->worker->netmgr)); REQUIRE(VALID_HTTP_ENDPOINTS(epset)); - nworkers = (size_t)isc_nm_getnworkers(listener->mgr); + loopmgr = listener->worker->netmgr->loopmgr; + nworkers = (size_t)isc_loopmgr_nloops(loopmgr); INSIST(nworkers > 0); listener->h2.listener_endpoints = - isc_mem_get(listener->mgr->mctx, + isc_mem_get(listener->worker->mctx, sizeof(isc_nm_http_endpoints_t *) * nworkers); listener->h2.n_listener_endpoints = nworkers; for (size_t i = 0; i < nworkers; i++) { @@ -3044,7 +3061,7 @@ http_init_listener_endpoints(isc_nmsocket_t *listener, static void http_cleanup_listener_endpoints(isc_nmsocket_t *listener) { - REQUIRE(VALID_NM(listener->mgr)); + REQUIRE(listener->worker != NULL && VALID_NM(listener->worker->netmgr)); if (listener->h2.listener_endpoints == NULL) { return; @@ -3054,7 +3071,7 @@ http_cleanup_listener_endpoints(isc_nmsocket_t *listener) { isc_nm_http_endpoints_detach( &listener->h2.listener_endpoints[i]); } - isc_mem_put(listener->mgr->mctx, listener->h2.listener_endpoints, + isc_mem_put(listener->worker->mctx, listener->h2.listener_endpoints, sizeof(isc_nm_http_endpoints_t *) * listener->h2.n_listener_endpoints); listener->h2.n_listener_endpoints = 0; @@ -3229,12 +3246,12 @@ isc__nm_http_cleanup_data(isc_nmsocket_t *sock) { } if (sock->h2.request_path != NULL) { - isc_mem_free(sock->mgr->mctx, sock->h2.request_path); + isc_mem_free(sock->worker->mctx, sock->h2.request_path); sock->h2.request_path = NULL; } if (sock->h2.query_data != NULL) { - isc_mem_free(sock->mgr->mctx, sock->h2.query_data); + isc_mem_free(sock->worker->mctx, sock->h2.query_data); sock->h2.query_data = NULL; } @@ -3242,7 +3259,7 @@ isc__nm_http_cleanup_data(isc_nmsocket_t *sock) { if (isc_buffer_base(&sock->h2.rbuf) != NULL) { void *base = isc_buffer_base(&sock->h2.rbuf); - isc_mem_free(sock->mgr->mctx, base); + isc_mem_free(sock->worker->mctx, base); isc_buffer_initnull(&sock->h2.rbuf); } } @@ -3254,7 +3271,7 @@ isc__nm_http_cleanup_data(isc_nmsocket_t *sock) { sock->h2.session != NULL) { if (sock->h2.connect.uri != NULL) { - isc_mem_free(sock->mgr->mctx, sock->h2.connect.uri); + isc_mem_free(sock->worker->mctx, sock->h2.connect.uri); sock->h2.connect.uri = NULL; } isc__nm_httpsession_detach(&sock->h2.session); diff --git a/lib/isc/netmgr/netmgr-int.h b/lib/isc/netmgr/netmgr-int.h index 96f863fa4d..1479bfe1b5 100644 --- a/lib/isc/netmgr/netmgr-int.h +++ b/lib/isc/netmgr/netmgr-int.h @@ -34,12 +34,14 @@ #include #include #include +#include #include #include #include -/* Must be different from ISC_NETMGR_TID_UNKNOWN */ -#define ISC_NETMGR_NON_INTERLOCKED -2 +#include "../loop_p.h" + +#define ISC_NETMGR_TID_UNKNOWN -1 /* * Receive buffers @@ -129,7 +131,7 @@ isc__nm_dump_active(isc_nm_t *nm); #ifdef NETMGR_TRACE_VERBOSE #define NETMGR_TRACE_LOG(format, ...) \ fprintf(stderr, "%" PRIu32 ":%d:%s:%u:%s:" format, gettid(), \ - isc_nm_tid(), file, line, func, __VA_ARGS__) + isc_tid(), file, line, func, __VA_ARGS__) #else #define NETMGR_TRACE_LOG(format, ...) \ (void)file; \ @@ -189,48 +191,26 @@ isc__nm_dump_active(isc_nm_t *nm); #define isc__nmsocket_prep_destroy(sock) isc___nmsocket_prep_destroy(sock) #endif -/* - * Queue types in the order of processing priority. - */ -typedef enum { - NETIEVENT_PRIORITY = 0, - NETIEVENT_TASK = 1, - NETIEVENT_NORMAL = 2, - NETIEVENT_MAX = 3, -} netievent_type_t; - typedef struct isc__nm_uvreq isc__nm_uvreq_t; typedef struct isc__netievent isc__netievent_t; -typedef ISC_LIST(isc__netievent_t) isc__netievent_list_t; - -typedef struct ievent { - isc_mutex_t lock; - isc_condition_t cond; - isc__netievent_list_t list; -} ievent_t; - /* * Single network event loop worker. */ typedef struct isc__networker { - isc_nm_t *mgr; - int id; /* thread id */ - uv_loop_t loop; /* libuv loop structure */ - uv_async_t async; /* async channel to send - * data to this networker */ - bool paused; - bool finished; - isc_thread_t thread; - ievent_t ievents[NETIEVENT_MAX]; - + isc_mem_t *mctx; isc_refcount_t references; - atomic_int_fast64_t pktcount; + isc_loop_t *loop; + isc_nm_t *netmgr; + bool shuttingdown; + char *recvbuf; char *sendbuf; bool recvbuf_inuse; } isc__networker_t; +ISC_REFCOUNT_DECL(isc__networker); + /* * A general handle for a connection bound to a networker. For UDP * connections we have peer address here, so both TCP and UDP can be @@ -272,14 +252,8 @@ struct isc_nmhandle { }; typedef enum isc__netievent_type { - netievent_udpconnect, - netievent_udpclose, - netievent_udpsend, - netievent_udpread, netievent_udpcancel, - netievent_routeconnect, - netievent_tcpconnect, netievent_tcpclose, netievent_tcpsend, @@ -315,27 +289,16 @@ typedef enum isc__netievent_type { netievent_httpsend, netievent_httpendpoints, - netievent_shutdown, - netievent_stop, - netievent_pause, - netievent_connectcb, netievent_readcb, netievent_sendcb, - netievent_task, - netievent_settlsctx, - /* - * event type values higher than this will be treated - * as high-priority events, which can be processed - * while the netmgr is pausing or paused. - */ - netievent_prio = 0xff, - netievent_udplisten, netievent_udpstop, + netievent_udpread, + netievent_tcplisten, netievent_tcpstop, netievent_tcpdnslisten, @@ -344,9 +307,7 @@ typedef enum isc__netievent_type { netievent_tlsdnsstop, netievent_httpstop, - netievent_resume, netievent_detach, - netievent_close, } isc__netievent_type; typedef union { @@ -389,18 +350,17 @@ struct isc__nm_uvreq { uv_connect_t connect; uv_udp_send_t udp_send; uv_fs_t fs; - uv_work_t work; } uv_req; ISC_LINK(isc__nm_uvreq_t) link; }; void * -isc__nm_get_netievent(isc_nm_t *mgr, isc__netievent_type type); +isc__nm_get_netievent(isc__networker_t *worker, isc__netievent_type type); /*%< * Allocate an ievent and set the type. */ void -isc__nm_put_netievent(isc_nm_t *mgr, void *ievent); +isc__nm_put_netievent(isc__networker_t *worker, void *ievent); /* * The macros here are used to simulate the "inheritance" in C, there's the base @@ -432,6 +392,7 @@ isc__nm_put_netievent(isc_nm_t *mgr, void *ievent); #define NETIEVENT__SOCKET \ isc__netievent_type type; \ ISC_LINK(isc__netievent_t) link; \ + isc__networker_t *worker; \ isc_nmsocket_t *sock; \ const char *file; \ unsigned int line; \ @@ -444,26 +405,26 @@ typedef struct isc__netievent__socket { #define NETIEVENT_SOCKET_TYPE(type) \ typedef isc__netievent__socket_t isc__netievent_##type##_t; -#define NETIEVENT_SOCKET_DECL(type) \ - isc__netievent_##type##_t *isc__nm_get_netievent_##type( \ - isc_nm_t *nm, isc_nmsocket_t *sock); \ - void isc__nm_put_netievent_##type(isc_nm_t *nm, \ +#define NETIEVENT_SOCKET_DECL(type) \ + isc__netievent_##type##_t *isc__nm_get_netievent_##type( \ + isc__networker_t *worker, isc_nmsocket_t *sock); \ + void isc__nm_put_netievent_##type(isc__networker_t *worker, \ isc__netievent_##type##_t *ievent); #define NETIEVENT_SOCKET_DEF(type) \ isc__netievent_##type##_t *isc__nm_get_netievent_##type( \ - isc_nm_t *nm, isc_nmsocket_t *sock) { \ + isc__networker_t *worker, isc_nmsocket_t *sock) { \ isc__netievent_##type##_t *ievent = \ - isc__nm_get_netievent(nm, netievent_##type); \ + isc__nm_get_netievent(worker, netievent_##type); \ isc__nmsocket_attach(sock, &ievent->sock); \ \ return (ievent); \ } \ \ - void isc__nm_put_netievent_##type(isc_nm_t *nm, \ + void isc__nm_put_netievent_##type(isc__networker_t *worker, \ isc__netievent_##type##_t *ievent) { \ isc__nmsocket_detach(&ievent->sock); \ - isc__nm_put_netievent(nm, ievent); \ + isc__nm_put_netievent(worker, ievent); \ } typedef struct isc__netievent__socket_req { @@ -474,27 +435,29 @@ typedef struct isc__netievent__socket_req { #define NETIEVENT_SOCKET_REQ_TYPE(type) \ typedef isc__netievent__socket_req_t isc__netievent_##type##_t; -#define NETIEVENT_SOCKET_REQ_DECL(type) \ - isc__netievent_##type##_t *isc__nm_get_netievent_##type( \ - isc_nm_t *nm, isc_nmsocket_t *sock, isc__nm_uvreq_t *req); \ - void isc__nm_put_netievent_##type(isc_nm_t *nm, \ +#define NETIEVENT_SOCKET_REQ_DECL(type) \ + isc__netievent_##type##_t *isc__nm_get_netievent_##type( \ + isc__networker_t *worker, isc_nmsocket_t *sock, \ + isc__nm_uvreq_t *req); \ + void isc__nm_put_netievent_##type(isc__networker_t *worker, \ isc__netievent_##type##_t *ievent); #define NETIEVENT_SOCKET_REQ_DEF(type) \ isc__netievent_##type##_t *isc__nm_get_netievent_##type( \ - isc_nm_t *nm, isc_nmsocket_t *sock, isc__nm_uvreq_t *req) { \ + isc__networker_t *worker, isc_nmsocket_t *sock, \ + isc__nm_uvreq_t *req) { \ isc__netievent_##type##_t *ievent = \ - isc__nm_get_netievent(nm, netievent_##type); \ + isc__nm_get_netievent(worker, netievent_##type); \ isc__nmsocket_attach(sock, &ievent->sock); \ ievent->req = req; \ \ return (ievent); \ } \ \ - void isc__nm_put_netievent_##type(isc_nm_t *nm, \ + void isc__nm_put_netievent_##type(isc__networker_t *worker, \ isc__netievent_##type##_t *ievent) { \ isc__nmsocket_detach(&ievent->sock); \ - isc__nm_put_netievent(nm, ievent); \ + isc__nm_put_netievent(worker, ievent); \ } typedef struct isc__netievent__socket_req_result { @@ -506,19 +469,19 @@ typedef struct isc__netievent__socket_req_result { #define NETIEVENT_SOCKET_REQ_RESULT_TYPE(type) \ typedef isc__netievent__socket_req_result_t isc__netievent_##type##_t; -#define NETIEVENT_SOCKET_REQ_RESULT_DECL(type) \ - isc__netievent_##type##_t *isc__nm_get_netievent_##type( \ - isc_nm_t *nm, isc_nmsocket_t *sock, isc__nm_uvreq_t *req, \ - isc_result_t result); \ - void isc__nm_put_netievent_##type(isc_nm_t *nm, \ +#define NETIEVENT_SOCKET_REQ_RESULT_DECL(type) \ + isc__netievent_##type##_t *isc__nm_get_netievent_##type( \ + isc__networker_t *worker, isc_nmsocket_t *sock, \ + isc__nm_uvreq_t *req, isc_result_t result); \ + void isc__nm_put_netievent_##type(isc__networker_t *worker, \ isc__netievent_##type##_t *ievent); #define NETIEVENT_SOCKET_REQ_RESULT_DEF(type) \ isc__netievent_##type##_t *isc__nm_get_netievent_##type( \ - isc_nm_t *nm, isc_nmsocket_t *sock, isc__nm_uvreq_t *req, \ - isc_result_t result) { \ + isc__networker_t *worker, isc_nmsocket_t *sock, \ + isc__nm_uvreq_t *req, isc_result_t result) { \ isc__netievent_##type##_t *ievent = \ - isc__nm_get_netievent(nm, netievent_##type); \ + isc__nm_get_netievent(worker, netievent_##type); \ isc__nmsocket_attach(sock, &ievent->sock); \ ievent->req = req; \ ievent->result = result; \ @@ -526,10 +489,10 @@ typedef struct isc__netievent__socket_req_result { return (ievent); \ } \ \ - void isc__nm_put_netievent_##type(isc_nm_t *nm, \ + void isc__nm_put_netievent_##type(isc__networker_t *worker, \ isc__netievent_##type##_t *ievent) { \ isc__nmsocket_detach(&ievent->sock); \ - isc__nm_put_netievent(nm, ievent); \ + isc__nm_put_netievent(worker, ievent); \ } typedef struct isc__netievent__socket_handle { @@ -540,28 +503,30 @@ typedef struct isc__netievent__socket_handle { #define NETIEVENT_SOCKET_HANDLE_TYPE(type) \ typedef isc__netievent__socket_handle_t isc__netievent_##type##_t; -#define NETIEVENT_SOCKET_HANDLE_DECL(type) \ - isc__netievent_##type##_t *isc__nm_get_netievent_##type( \ - isc_nm_t *nm, isc_nmsocket_t *sock, isc_nmhandle_t *handle); \ - void isc__nm_put_netievent_##type(isc_nm_t *nm, \ +#define NETIEVENT_SOCKET_HANDLE_DECL(type) \ + isc__netievent_##type##_t *isc__nm_get_netievent_##type( \ + isc__networker_t *worker, isc_nmsocket_t *sock, \ + isc_nmhandle_t *handle); \ + void isc__nm_put_netievent_##type(isc__networker_t *worker, \ isc__netievent_##type##_t *ievent); #define NETIEVENT_SOCKET_HANDLE_DEF(type) \ isc__netievent_##type##_t *isc__nm_get_netievent_##type( \ - isc_nm_t *nm, isc_nmsocket_t *sock, isc_nmhandle_t *handle) { \ + isc__networker_t *worker, isc_nmsocket_t *sock, \ + isc_nmhandle_t *handle) { \ isc__netievent_##type##_t *ievent = \ - isc__nm_get_netievent(nm, netievent_##type); \ + isc__nm_get_netievent(worker, netievent_##type); \ isc__nmsocket_attach(sock, &ievent->sock); \ isc_nmhandle_attach(handle, &ievent->handle); \ \ return (ievent); \ } \ \ - void isc__nm_put_netievent_##type(isc_nm_t *nm, \ + void isc__nm_put_netievent_##type(isc__networker_t *worker, \ isc__netievent_##type##_t *ievent) { \ isc__nmsocket_detach(&ievent->sock); \ isc_nmhandle_detach(&ievent->handle); \ - isc__nm_put_netievent(nm, ievent); \ + isc__nm_put_netievent(worker, ievent); \ } typedef struct isc__netievent__socket_quota { @@ -572,66 +537,31 @@ typedef struct isc__netievent__socket_quota { #define NETIEVENT_SOCKET_QUOTA_TYPE(type) \ typedef isc__netievent__socket_quota_t isc__netievent_##type##_t; -#define NETIEVENT_SOCKET_QUOTA_DECL(type) \ - isc__netievent_##type##_t *isc__nm_get_netievent_##type( \ - isc_nm_t *nm, isc_nmsocket_t *sock, isc_quota_t *quota); \ - void isc__nm_put_netievent_##type(isc_nm_t *nm, \ +#define NETIEVENT_SOCKET_QUOTA_DECL(type) \ + isc__netievent_##type##_t *isc__nm_get_netievent_##type( \ + isc__networker_t *worker, isc_nmsocket_t *sock, \ + isc_quota_t *quota); \ + void isc__nm_put_netievent_##type(isc__networker_t *worker, \ isc__netievent_##type##_t *ievent); #define NETIEVENT_SOCKET_QUOTA_DEF(type) \ isc__netievent_##type##_t *isc__nm_get_netievent_##type( \ - isc_nm_t *nm, isc_nmsocket_t *sock, isc_quota_t *quota) { \ + isc__networker_t *worker, isc_nmsocket_t *sock, \ + isc_quota_t *quota) { \ isc__netievent_##type##_t *ievent = \ - isc__nm_get_netievent(nm, netievent_##type); \ + isc__nm_get_netievent(worker, netievent_##type); \ isc__nmsocket_attach(sock, &ievent->sock); \ ievent->quota = quota; \ \ return (ievent); \ } \ \ - void isc__nm_put_netievent_##type(isc_nm_t *nm, \ + void isc__nm_put_netievent_##type(isc__networker_t *worker, \ isc__netievent_##type##_t *ievent) { \ isc__nmsocket_detach(&ievent->sock); \ - isc__nm_put_netievent(nm, ievent); \ + isc__nm_put_netievent(worker, ievent); \ } -typedef struct isc__netievent__task { - isc__netievent_type type; - ISC_LINK(isc__netievent_t) link; - isc_task_t *task; -} isc__netievent__task_t; - -#define NETIEVENT_TASK_TYPE(type) \ - typedef isc__netievent__task_t isc__netievent_##type##_t; - -#define NETIEVENT_TASK_DECL(type) \ - isc__netievent_##type##_t *isc__nm_get_netievent_##type( \ - isc_nm_t *nm, isc_task_t *task); \ - void isc__nm_put_netievent_##type(isc_nm_t *nm, \ - isc__netievent_##type##_t *ievent); - -#define NETIEVENT_TASK_DEF(type) \ - isc__netievent_##type##_t *isc__nm_get_netievent_##type( \ - isc_nm_t *nm, isc_task_t *task) { \ - isc__netievent_##type##_t *ievent = \ - isc__nm_get_netievent(nm, netievent_##type); \ - ievent->task = task; \ - \ - return (ievent); \ - } \ - \ - void isc__nm_put_netievent_##type(isc_nm_t *nm, \ - isc__netievent_##type##_t *ievent) { \ - ievent->task = NULL; \ - isc__nm_put_netievent(nm, ievent); \ - } - -typedef struct isc__netievent_udpsend { - NETIEVENT__SOCKET; - isc_sockaddr_t peer; - isc__nm_uvreq_t *req; -} isc__netievent_udpsend_t; - typedef struct isc__netievent_tlsconnect { NETIEVENT__SOCKET; SSL_CTX *ctx; @@ -642,27 +572,29 @@ typedef struct isc__netievent_tlsconnect { typedef struct isc__netievent { isc__netievent_type type; ISC_LINK(isc__netievent_t) link; + isc__networker_t *worker; } isc__netievent_t; #define NETIEVENT_TYPE(type) typedef isc__netievent_t isc__netievent_##type##_t; -#define NETIEVENT_DECL(type) \ - isc__netievent_##type##_t *isc__nm_get_netievent_##type(isc_nm_t *nm); \ - void isc__nm_put_netievent_##type(isc_nm_t *nm, \ +#define NETIEVENT_DECL(type) \ + isc__netievent_##type##_t *isc__nm_get_netievent_##type( \ + isc__networker_t *worker); \ + void isc__nm_put_netievent_##type(isc__networker_t *worker, \ isc__netievent_##type##_t *ievent); #define NETIEVENT_DEF(type) \ isc__netievent_##type##_t *isc__nm_get_netievent_##type( \ - isc_nm_t *nm) { \ + isc__networker_t *worker) { \ isc__netievent_##type##_t *ievent = \ - isc__nm_get_netievent(nm, netievent_##type); \ + isc__nm_get_netievent(worker, netievent_##type); \ \ return (ievent); \ } \ \ - void isc__nm_put_netievent_##type(isc_nm_t *nm, \ + void isc__nm_put_netievent_##type(isc__networker_t *worker, \ isc__netievent_##type##_t *ievent) { \ - isc__nm_put_netievent(nm, ievent); \ + isc__nm_put_netievent(worker, ievent); \ } typedef struct isc__netievent__tlsctx { @@ -673,28 +605,30 @@ typedef struct isc__netievent__tlsctx { #define NETIEVENT_SOCKET_TLSCTX_TYPE(type) \ typedef isc__netievent__tlsctx_t isc__netievent_##type##_t; -#define NETIEVENT_SOCKET_TLSCTX_DECL(type) \ - isc__netievent_##type##_t *isc__nm_get_netievent_##type( \ - isc_nm_t *nm, isc_nmsocket_t *sock, isc_tlsctx_t *tlsctx); \ - void isc__nm_put_netievent_##type(isc_nm_t *nm, \ +#define NETIEVENT_SOCKET_TLSCTX_DECL(type) \ + isc__netievent_##type##_t *isc__nm_get_netievent_##type( \ + isc__networker_t *worker, isc_nmsocket_t *sock, \ + isc_tlsctx_t *tlsctx); \ + void isc__nm_put_netievent_##type(isc__networker_t *worker, \ isc__netievent_##type##_t *ievent); #define NETIEVENT_SOCKET_TLSCTX_DEF(type) \ isc__netievent_##type##_t *isc__nm_get_netievent_##type( \ - isc_nm_t *nm, isc_nmsocket_t *sock, isc_tlsctx_t *tlsctx) { \ + isc__networker_t *worker, isc_nmsocket_t *sock, \ + isc_tlsctx_t *tlsctx) { \ isc__netievent_##type##_t *ievent = \ - isc__nm_get_netievent(nm, netievent_##type); \ + isc__nm_get_netievent(worker, netievent_##type); \ isc__nmsocket_attach(sock, &ievent->sock); \ isc_tlsctx_attach(tlsctx, &ievent->tlsctx); \ \ return (ievent); \ } \ \ - void isc__nm_put_netievent_##type(isc_nm_t *nm, \ + void isc__nm_put_netievent_##type(isc__networker_t *worker, \ isc__netievent_##type##_t *ievent) { \ isc_tlsctx_free(&ievent->tlsctx); \ isc__nmsocket_detach(&ievent->sock); \ - isc__nm_put_netievent(nm, ievent); \ + isc__nm_put_netievent(worker, ievent); \ } #ifdef HAVE_LIBNGHTTP2 @@ -706,30 +640,30 @@ typedef struct isc__netievent__http_eps { #define NETIEVENT_SOCKET_HTTP_EPS_TYPE(type) \ typedef isc__netievent__http_eps_t isc__netievent_##type##_t; -#define NETIEVENT_SOCKET_HTTP_EPS_DECL(type) \ - isc__netievent_##type##_t *isc__nm_get_netievent_##type( \ - isc_nm_t *nm, isc_nmsocket_t *sock, \ - isc_nm_http_endpoints_t *endpoints); \ - void isc__nm_put_netievent_##type(isc_nm_t *nm, \ +#define NETIEVENT_SOCKET_HTTP_EPS_DECL(type) \ + isc__netievent_##type##_t *isc__nm_get_netievent_##type( \ + isc__networker_t *worker, isc_nmsocket_t *sock, \ + isc_nm_http_endpoints_t *endpoints); \ + void isc__nm_put_netievent_##type(isc__networker_t *worker, \ isc__netievent_##type##_t *ievent); #define NETIEVENT_SOCKET_HTTP_EPS_DEF(type) \ isc__netievent_##type##_t *isc__nm_get_netievent_##type( \ - isc_nm_t *nm, isc_nmsocket_t *sock, \ + isc__networker_t *worker, isc_nmsocket_t *sock, \ isc_nm_http_endpoints_t *endpoints) { \ isc__netievent_##type##_t *ievent = \ - isc__nm_get_netievent(nm, netievent_##type); \ + isc__nm_get_netievent(worker, netievent_##type); \ isc__nmsocket_attach(sock, &ievent->sock); \ isc_nm_http_endpoints_attach(endpoints, &ievent->endpoints); \ \ return (ievent); \ } \ \ - void isc__nm_put_netievent_##type(isc_nm_t *nm, \ + void isc__nm_put_netievent_##type(isc__networker_t *worker, \ isc__netievent_##type##_t *ievent) { \ isc_nm_http_endpoints_detach(&ievent->endpoints); \ isc__nmsocket_detach(&ievent->sock); \ - isc__nm_put_netievent(nm, ievent); \ + isc__nm_put_netievent(worker, ievent); \ } #endif /* HAVE_LIBNGHTTP2 */ @@ -737,7 +671,6 @@ typedef union { isc__netievent_t ni; isc__netievent__socket_t nis; isc__netievent__socket_req_t nisr; - isc__netievent_udpsend_t nius; isc__netievent__socket_quota_t nisq; isc__netievent_tlsconnect_t nitc; isc__netievent__tlsctx_t nitls; @@ -746,17 +679,6 @@ typedef union { #endif /* HAVE_LIBNGHTTP2 */ } isc__netievent_storage_t; -/* - * Work item for a uv_work threadpool. - */ -typedef struct isc__nm_work { - isc_nm_t *netmgr; - uv_work_t req; - isc_nm_workcb_t cb; - isc_nm_after_workcb_t after_cb; - void *data; -} isc__nm_work_t; - /* * Network manager */ @@ -767,35 +689,22 @@ struct isc_nm { int magic; isc_refcount_t references; isc_mem_t *mctx; - int nworkers; + isc_loopmgr_t *loopmgr; + uint32_t nloops; isc_mutex_t lock; - isc_condition_t wkstatecond; - isc_condition_t wkpausecond; isc__networker_t *workers; isc_stats_t *stats; - uint_fast32_t workers_running; - atomic_uint_fast32_t workers_paused; atomic_uint_fast32_t maxudp; bool load_balance_sockets; - atomic_bool paused; - /* * Active connections are being closed and new connections are * no longer allowed. */ - atomic_bool closing; - - /* - * A worker is actively waiting for other workers, for example to - * stop listening; that means no other thread can do the same thing - * or pause, or we'll deadlock. We have to either re-enqueue our - * event or wait for the other one to finish if we want to pause. - */ - atomic_int interlocked; + atomic_bool shuttingdown; /* * Timeout values for TCP connections, corresponding to @@ -809,9 +718,6 @@ struct isc_nm { atomic_uint_fast32_t keepalive; atomic_uint_fast32_t advertised; - isc_barrier_t pausing; - isc_barrier_t resuming; - /* * Socket SO_RCVBUF and SO_SNDBUF values */ @@ -957,9 +863,12 @@ typedef void (*isc_nm_closehandlecb_t)(void *arg); struct isc_nmsocket { /*% Unlocked, RO */ int magic; - int tid; + uint32_t tid; isc_nmsocket_type type; - isc_nm_t *mgr; + isc__networker_t *worker; + + isc_mutex_t lock; + isc_barrier_t barrier; /*% Parent socket for multithreaded listeners */ isc_nmsocket_t *parent; @@ -968,9 +877,6 @@ struct isc_nmsocket { /*% Self socket */ isc_nmsocket_t *self; - isc_barrier_t startlistening; - isc_barrier_t stoplistening; - /*% TLS stuff */ struct tls { isc_tls_t *tls; @@ -1134,41 +1040,6 @@ struct isc_nmsocket { isc_astack_t *inactivehandles; isc_astack_t *inactivereqs; - /*% - * Used to wait for TCP listening events to complete, and - * for the number of running children to reach zero during - * shutdown. - * - * We use two condition variables to prevent the race where the netmgr - * threads would be able to finish and destroy the socket before it's - * unlocked by the isc_nm_listen() function. So, the flow is as - * follows: - * - * 1. parent thread creates all children sockets and passes then to - * netthreads, looks at the signaling variable and WAIT(cond) until - * the childrens are done initializing - * - * 2. the events get picked by netthreads, calls the libuv API (and - * either succeeds or fails) and WAIT(scond) until all other - * children sockets in netthreads are initialized and the listening - * socket lock is unlocked - * - * 3. the control is given back to the parent thread which now either - * returns success or shutdowns the listener if an error has - * occured in the children netthread - * - * NOTE: The other approach would be doing an extra attach to the parent - * listening socket, and then detach it in the parent thread, but that - * breaks the promise that once the libuv socket is initialized on the - * nmsocket, the nmsocket needs to be handled only by matching - * netthread, so in fact that would add a complexity in a way that - * isc__nmsocket_detach would have to be converted to use an - * asynchrounous netievent. - */ - isc_mutex_t lock; - isc_condition_t cond; - isc_condition_t scond; - /*% * Used to pass a result back from listen or connect events. */ @@ -1209,13 +1080,14 @@ struct isc_nmsocket { int backtrace_size; LINK(isc_nmsocket_t) active_link; ISC_LIST(isc_nmhandle_t) active_handles; + isc_mutex_t tracelock; #endif }; -bool -isc__nm_in_netthread(void); +void +isc__nm_process_ievent(isc__networker_t *worker, isc__netievent_t *event); /*%< - * Returns 'true' if we're in the network thread. + * If the call knows it's in the matching loop, process the netievent directly. */ void @@ -1260,7 +1132,7 @@ isc___nmhandle_get(isc_nmsocket_t *sock, isc_sockaddr_t *peer, */ isc__nm_uvreq_t * -isc___nm_uvreq_get(isc_nm_t *mgr, isc_nmsocket_t *sock FLARG); +isc___nm_uvreq_get(isc__networker_t *worker, isc_nmsocket_t *sock FLARG); /*%< * Get a UV request structure for the socket 'sock', allocating a * new one if there isn't one available in 'sock->inactivereqs'. @@ -1276,8 +1148,8 @@ isc___nm_uvreq_put(isc__nm_uvreq_t **req, isc_nmsocket_t *sock FLARG); */ void -isc___nmsocket_init(isc_nmsocket_t *sock, isc_nm_t *mgr, isc_nmsocket_type type, - isc_sockaddr_t *iface FLARG); +isc___nmsocket_init(isc_nmsocket_t *sock, isc__networker_t *worker, + isc_nmsocket_type type, isc_sockaddr_t *iface FLARG); /*%< * Initialize socket 'sock', attach it to 'mgr', and set it to type 'type' * and its interface to 'iface'. @@ -1439,27 +1311,15 @@ isc__nm_udp_settimeout(isc_nmhandle_t *handle, uint32_t timeout); void isc__nm_async_udplisten(isc__networker_t *worker, isc__netievent_t *ev0); void -isc__nm_async_udpconnect(isc__networker_t *worker, isc__netievent_t *ev0); -void isc__nm_async_udpstop(isc__networker_t *worker, isc__netievent_t *ev0); void -isc__nm_async_udpsend(isc__networker_t *worker, isc__netievent_t *ev0); -void -isc__nm_async_udpread(isc__networker_t *worker, isc__netievent_t *ev0); -void isc__nm_async_udpcancel(isc__networker_t *worker, isc__netievent_t *ev0); void -isc__nm_async_udpclose(isc__networker_t *worker, isc__netievent_t *ev0); +isc__nm_async_udpread(isc__networker_t *worker, isc__netievent_t *ev0); /*%< * Callback handlers for asynchronous UDP events (listen, stoplisten, send). */ -void -isc__nm_async_routeconnect(isc__networker_t *worker, isc__netievent_t *ev0); -/*%< - * Callback handler for route socket events. - */ - void isc__nm_tcp_send(isc_nmhandle_t *handle, const isc_region_t *region, isc_nm_cb_t cb, void *cbarg); @@ -1871,28 +1731,10 @@ void isc__nm_http_set_max_streams(isc_nmsocket_t *listener, const uint32_t max_concurrent_streams); -#endif - void isc__nm_async_settlsctx(isc__networker_t *worker, isc__netievent_t *ev0); -bool -isc__nm_acquire_interlocked(isc_nm_t *mgr); -/*%< - * Try to acquire interlocked state; return true if successful. - */ - -void -isc__nm_drop_interlocked(isc_nm_t *mgr); -/*%< - * Drop interlocked state; signal waiters. - */ - -void -isc__nm_acquire_interlocked_force(isc_nm_t *mgr); -/*%< - * Actively wait for interlocked state. - */ +#endif void isc__nm_incstats(isc_nmsocket_t *sock, isc__nm_statid_t id); @@ -1984,7 +1826,6 @@ isc__nm_set_network_buffers(isc_nm_t *nm, uv_handle_t *handle); * typedef all the netievent types */ -NETIEVENT_SOCKET_TYPE(close); NETIEVENT_SOCKET_TYPE(tcpclose); NETIEVENT_SOCKET_TYPE(tcplisten); NETIEVENT_SOCKET_TYPE(tcppauseread); @@ -1995,11 +1836,9 @@ NETIEVENT_SOCKET_TYPE(tlsclose); NETIEVENT_SOCKET_TYPE(tlsdobio); NETIEVENT_SOCKET_TYPE(tlsstartread); NETIEVENT_SOCKET_HANDLE_TYPE(tlscancel); -NETIEVENT_SOCKET_TYPE(udpclose); NETIEVENT_SOCKET_TYPE(udplisten); -NETIEVENT_SOCKET_TYPE(udpread); -/* NETIEVENT_SOCKET_TYPE(udpsend); */ /* unique type, defined independently */ NETIEVENT_SOCKET_TYPE(udpstop); +NETIEVENT_SOCKET_TYPE(udpread); NETIEVENT_SOCKET_TYPE(tcpdnsclose); NETIEVENT_SOCKET_TYPE(tcpdnsread); @@ -2032,9 +1871,6 @@ NETIEVENT_SOCKET_REQ_TYPE(tcpconnect); NETIEVENT_SOCKET_REQ_TYPE(tcpsend); NETIEVENT_SOCKET_TYPE(tcpstartread); NETIEVENT_SOCKET_REQ_TYPE(tlssend); -NETIEVENT_SOCKET_REQ_TYPE(udpconnect); - -NETIEVENT_SOCKET_REQ_TYPE(routeconnect); NETIEVENT_SOCKET_REQ_RESULT_TYPE(connectcb); NETIEVENT_SOCKET_REQ_RESULT_TYPE(readcb); @@ -2046,18 +1882,10 @@ NETIEVENT_SOCKET_HANDLE_TYPE(udpcancel); NETIEVENT_SOCKET_QUOTA_TYPE(tcpaccept); -NETIEVENT_TYPE(pause); -NETIEVENT_TYPE(resume); -NETIEVENT_TYPE(shutdown); -NETIEVENT_TYPE(stop); - -NETIEVENT_TASK_TYPE(task); - NETIEVENT_SOCKET_TLSCTX_TYPE(settlsctx); /* Now declared the helper functions */ -NETIEVENT_SOCKET_DECL(close); NETIEVENT_SOCKET_DECL(tcpclose); NETIEVENT_SOCKET_DECL(tcplisten); NETIEVENT_SOCKET_DECL(tcppauseread); @@ -2068,11 +1896,9 @@ NETIEVENT_SOCKET_DECL(tlsconnect); NETIEVENT_SOCKET_DECL(tlsdobio); NETIEVENT_SOCKET_DECL(tlsstartread); NETIEVENT_SOCKET_HANDLE_DECL(tlscancel); -NETIEVENT_SOCKET_DECL(udpclose); NETIEVENT_SOCKET_DECL(udplisten); -NETIEVENT_SOCKET_DECL(udpread); -NETIEVENT_SOCKET_DECL(udpsend); NETIEVENT_SOCKET_DECL(udpstop); +NETIEVENT_SOCKET_DECL(udpread); NETIEVENT_SOCKET_DECL(tcpdnsclose); NETIEVENT_SOCKET_DECL(tcpdnsread); @@ -2104,27 +1930,17 @@ NETIEVENT_SOCKET_HTTP_EPS_DECL(httpendpoints); NETIEVENT_SOCKET_REQ_DECL(tcpconnect); NETIEVENT_SOCKET_REQ_DECL(tcpsend); NETIEVENT_SOCKET_REQ_DECL(tlssend); -NETIEVENT_SOCKET_REQ_DECL(udpconnect); - -NETIEVENT_SOCKET_REQ_DECL(routeconnect); NETIEVENT_SOCKET_REQ_RESULT_DECL(connectcb); NETIEVENT_SOCKET_REQ_RESULT_DECL(readcb); NETIEVENT_SOCKET_REQ_RESULT_DECL(sendcb); -NETIEVENT_SOCKET_HANDLE_DECL(udpcancel); NETIEVENT_SOCKET_HANDLE_DECL(tcpcancel); +NETIEVENT_SOCKET_HANDLE_DECL(udpcancel); NETIEVENT_SOCKET_DECL(detach); NETIEVENT_SOCKET_QUOTA_DECL(tcpaccept); -NETIEVENT_DECL(pause); -NETIEVENT_DECL(resume); -NETIEVENT_DECL(shutdown); -NETIEVENT_DECL(stop); - -NETIEVENT_TASK_DECL(task); - NETIEVENT_SOCKET_TLSCTX_DECL(settlsctx); void @@ -2169,7 +1985,7 @@ isc__nm_resume_processing(void *arg); bool isc__nmsocket_closing(isc_nmsocket_t *sock); bool -isc__nm_closing(isc_nmsocket_t *sock); +isc__nm_closing(isc__networker_t *worker); void isc__nm_alloc_dnsbuf(isc_nmsocket_t *sock, size_t len); diff --git a/lib/isc/netmgr/netmgr.c b/lib/isc/netmgr/netmgr.c index f1a0b94186..974f882914 100644 --- a/lib/isc/netmgr/netmgr.c +++ b/lib/isc/netmgr/netmgr.c @@ -11,9 +11,11 @@ * information regarding copyright ownership. */ +#include #include #include +#include #include #include #include @@ -22,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -36,12 +39,13 @@ #include #include #include +#include #include #include #include +#include "../loop_p.h" #include "netmgr-int.h" -#include "netmgr_p.h" #include "openssl_shim.h" #include "trampoline_p.h" @@ -119,17 +123,6 @@ static const isc_statscounter_t unixstatsindex[] = { }; #endif /* if 0 */ -/* - * libuv is not thread safe, but has mechanisms to pass messages - * between threads. Each socket is owned by a thread. For UDP - * sockets we have a set of sockets for each interface and we can - * choose a sibling and send the message directly. For TCP, or if - * we're calling from a non-networking thread, we need to pass the - * request using async_cb. - */ - -static thread_local int isc__nm_tid_v = ISC_NETMGR_TID_UNKNOWN; - /* * Set by the -T dscp option on the command line. If set to a value * other than -1, we check to make sure DSCP values match it, and @@ -141,37 +134,12 @@ static void nmsocket_maybe_destroy(isc_nmsocket_t *sock FLARG); static void nmhandle_free(isc_nmsocket_t *sock, isc_nmhandle_t *handle); -static isc_threadresult_t -nm_thread(isc_threadarg_t worker0); -static void -async_cb(uv_async_t *handle); - -static bool -process_netievent(isc__networker_t *worker, isc__netievent_t *ievent); -static isc_result_t -process_queue(isc__networker_t *worker, netievent_type_t type); -static void -wait_for_priority_queue(isc__networker_t *worker); -static void -drain_queue(isc__networker_t *worker, netievent_type_t type); static void -isc__nm_async_stop(isc__networker_t *worker, isc__netievent_t *ev0); -static void -isc__nm_async_pause(isc__networker_t *worker, isc__netievent_t *ev0); -static void -isc__nm_async_resume(isc__networker_t *worker, isc__netievent_t *ev0); +process_netievent(void *arg); + static void isc__nm_async_detach(isc__networker_t *worker, isc__netievent_t *ev0); -static void -isc__nm_async_close(isc__networker_t *worker, isc__netievent_t *ev0); - -static void -isc__nm_threadpool_initialize(uint32_t workers); -static void -isc__nm_work_cb(uv_work_t *req); -static void -isc__nm_after_work_cb(uv_work_t *req, int status); /*%< * Issue a 'handle closed' callback on the socket. @@ -180,29 +148,36 @@ isc__nm_after_work_cb(uv_work_t *req, int status); static void nmhandle_detach_cb(isc_nmhandle_t **handlep FLARG); -int -isc_nm_tid(void) { - return (isc__nm_tid_v); -} +static void +shutdown_walk_cb(uv_handle_t *handle, void *arg); -bool -isc__nm_in_netthread(void) { - return (isc__nm_tid_v >= 0); -} +static void +networker_teardown(void *arg) { + isc__networker_t *worker = arg; + isc_loop_t *loop = worker->loop; -void -isc__nm_force_tid(int tid) { - isc__nm_tid_v = tid; + worker->shuttingdown = true; + + isc_log_write(isc_lctx, ISC_LOGCATEGORY_GENERAL, ISC_LOGMODULE_NETMGR, + ISC_LOG_DEBUG(1), + "Shutting down network manager worker on loop %p(%d)", + loop, isc_tid()); + + uv_walk(&loop->loop, shutdown_walk_cb, NULL); + + isc__networker_detach(&worker); } static void -isc__nm_threadpool_initialize(uint32_t workers) { - char buf[11]; - int r = uv_os_getenv("UV_THREADPOOL_SIZE", buf, - &(size_t){ sizeof(buf) }); - if (r == UV_ENOENT) { - snprintf(buf, sizeof(buf), "%" PRIu32, workers); - uv_os_setenv("UV_THREADPOOL_SIZE", buf); +netmgr_teardown(void *arg) { + isc_nm_t *netmgr = (void *)arg; + + if (atomic_compare_exchange_strong(&netmgr->shuttingdown, + &(bool){ false }, true)) + { + isc_log_write(isc_lctx, ISC_LOGCATEGORY_GENERAL, + ISC_LOGMODULE_NETMGR, ISC_LOG_DEBUG(1), + "Shutting down network manager"); } } @@ -219,11 +194,8 @@ isc__nm_threadpool_initialize(uint32_t workers) { #endif void -isc__netmgr_create(isc_mem_t *mctx, uint32_t workers, isc_nm_t **netmgrp) { - isc_nm_t *mgr = NULL; - char name[32]; - - REQUIRE(workers > 0); +isc_netmgr_create(isc_mem_t *mctx, isc_loopmgr_t *loopmgr, isc_nm_t **netmgrp) { + isc_nm_t *netmgr = NULL; if (uv_version() < MINIMAL_UV_VERSION) { isc_error_fatal(__FILE__, __LINE__, @@ -233,88 +205,68 @@ isc__netmgr_create(isc_mem_t *mctx, uint32_t workers, isc_nm_t **netmgrp) { uv_version_string(), UV_VERSION_STRING); } - isc__nm_threadpool_initialize(workers); + netmgr = isc_mem_get(mctx, sizeof(*netmgr)); + *netmgr = (isc_nm_t){ + .loopmgr = loopmgr, + .nloops = isc_loopmgr_nloops(loopmgr), + }; - mgr = isc_mem_get(mctx, sizeof(*mgr)); - *mgr = (isc_nm_t){ .nworkers = workers }; - - isc_mem_attach(mctx, &mgr->mctx); - isc_mutex_init(&mgr->lock); - isc_condition_init(&mgr->wkstatecond); - isc_condition_init(&mgr->wkpausecond); - isc_refcount_init(&mgr->references, 1); - atomic_init(&mgr->maxudp, 0); - atomic_init(&mgr->interlocked, ISC_NETMGR_NON_INTERLOCKED); - atomic_init(&mgr->workers_paused, 0); - atomic_init(&mgr->paused, false); - atomic_init(&mgr->closing, false); - atomic_init(&mgr->recv_tcp_buffer_size, 0); - atomic_init(&mgr->send_tcp_buffer_size, 0); - atomic_init(&mgr->recv_udp_buffer_size, 0); - atomic_init(&mgr->send_udp_buffer_size, 0); + isc_mem_attach(mctx, &netmgr->mctx); + isc_mutex_init(&netmgr->lock); + isc_refcount_init(&netmgr->references, 1); + atomic_init(&netmgr->maxudp, 0); + atomic_init(&netmgr->shuttingdown, false); + atomic_init(&netmgr->recv_tcp_buffer_size, 0); + atomic_init(&netmgr->send_tcp_buffer_size, 0); + atomic_init(&netmgr->recv_udp_buffer_size, 0); + atomic_init(&netmgr->send_udp_buffer_size, 0); #if HAVE_SO_REUSEPORT_LB - mgr->load_balance_sockets = true; + netmgr->load_balance_sockets = true; #else - mgr->load_balance_sockets = false; + netmgr->load_balance_sockets = false; #endif #ifdef NETMGR_TRACE - ISC_LIST_INIT(mgr->active_sockets); + ISC_LIST_INIT(netmgr->active_sockets); #endif /* * Default TCP timeout values. * May be updated by isc_nm_tcptimeouts(). */ - atomic_init(&mgr->init, 30000); - atomic_init(&mgr->idle, 30000); - atomic_init(&mgr->keepalive, 30000); - atomic_init(&mgr->advertised, 30000); + atomic_init(&netmgr->init, 30000); + atomic_init(&netmgr->idle, 30000); + atomic_init(&netmgr->keepalive, 30000); + atomic_init(&netmgr->advertised, 30000); - isc_barrier_init(&mgr->pausing, workers); - isc_barrier_init(&mgr->resuming, workers); + netmgr->workers = + isc_mem_get(mctx, netmgr->nloops * sizeof(netmgr->workers[0])); - mgr->workers = isc_mem_get(mctx, workers * sizeof(isc__networker_t)); - for (size_t i = 0; i < workers; i++) { - isc__networker_t *worker = &mgr->workers[i]; - int r; + isc_loopmgr_teardown(loopmgr, netmgr_teardown, netmgr); + + netmgr->magic = NM_MAGIC; + + for (size_t i = 0; i < netmgr->nloops; i++) { + isc_loop_t *loop = isc_loop_get(netmgr->loopmgr, i); + isc__networker_t *worker = &netmgr->workers[i]; *worker = (isc__networker_t){ - .mgr = mgr, - .id = i, + .recvbuf = isc_mem_get(loop->mctx, + ISC_NETMGR_RECVBUF_SIZE), + .sendbuf = isc_mem_get(loop->mctx, + ISC_NETMGR_SENDBUF_SIZE), }; - r = uv_loop_init(&worker->loop); - UV_RUNTIME_CHECK(uv_loop_init, r); + isc_nm_attach(netmgr, &worker->netmgr); - worker->loop.data = &mgr->workers[i]; + isc_mem_attach(loop->mctx, &worker->mctx); - r = uv_async_init(&worker->loop, &worker->async, async_cb); - UV_RUNTIME_CHECK(uv_async_init, r); - - for (size_t type = 0; type < NETIEVENT_MAX; type++) { - isc_mutex_init(&worker->ievents[type].lock); - isc_condition_init(&worker->ievents[type].cond); - ISC_LIST_INIT(worker->ievents[type].list); - } - - worker->recvbuf = isc_mem_get(mctx, ISC_NETMGR_RECVBUF_SIZE); - worker->sendbuf = isc_mem_get(mctx, ISC_NETMGR_SENDBUF_SIZE); - - /* - * We need to do this here and not in nm_thread to avoid a - * race - we could exit isc_nm_start, launch nm_destroy, - * and nm_thread would still not be up. - */ - mgr->workers_running++; - isc_thread_create(nm_thread, &mgr->workers[i], &worker->thread); - - snprintf(name, sizeof(name), "isc-net-%04zu", i); - isc_thread_setname(worker->thread, name); + isc_loop_attach(loop, &worker->loop); + isc_loop_teardown(loop, networker_teardown, worker); + isc_refcount_init(&worker->references, 1); } - mgr->magic = NM_MAGIC; - *netmgrp = mgr; + *netmgrp = netmgr; } /* @@ -323,7 +275,6 @@ isc__netmgr_create(isc_mem_t *mctx, uint32_t workers, isc_nm_t **netmgrp) { static void nm_destroy(isc_nm_t **mgr0) { REQUIRE(VALID_NM(*mgr0)); - REQUIRE(!isc__nm_in_netthread()); isc_nm_t *mgr = *mgr0; *mgr0 = NULL; @@ -332,154 +283,17 @@ nm_destroy(isc_nm_t **mgr0) { mgr->magic = 0; - for (int i = 0; i < mgr->nworkers; i++) { - isc__networker_t *worker = &mgr->workers[i]; - isc__netievent_t *event = isc__nm_get_netievent_stop(mgr); - isc__nm_enqueue_ievent(worker, event); - } - - LOCK(&mgr->lock); - while (mgr->workers_running > 0) { - WAIT(&mgr->wkstatecond, &mgr->lock); - } - UNLOCK(&mgr->lock); - - for (int i = 0; i < mgr->nworkers; i++) { - isc__networker_t *worker = &mgr->workers[i]; - int r; - - r = uv_loop_close(&worker->loop); - UV_RUNTIME_CHECK(uv_loop_close, r); - - for (size_t type = 0; type < NETIEVENT_MAX; type++) { - INSIST(ISC_LIST_EMPTY(worker->ievents[type].list)); - isc_condition_destroy(&worker->ievents[type].cond); - isc_mutex_destroy(&worker->ievents[type].lock); - } - - isc_mem_put(mgr->mctx, worker->sendbuf, - ISC_NETMGR_SENDBUF_SIZE); - isc_mem_put(mgr->mctx, worker->recvbuf, - ISC_NETMGR_RECVBUF_SIZE); - isc_thread_join(worker->thread, NULL); - } - if (mgr->stats != NULL) { isc_stats_detach(&mgr->stats); } - isc_barrier_destroy(&mgr->resuming); - isc_barrier_destroy(&mgr->pausing); - - isc_condition_destroy(&mgr->wkstatecond); - isc_condition_destroy(&mgr->wkpausecond); isc_mutex_destroy(&mgr->lock); isc_mem_put(mgr->mctx, mgr->workers, - mgr->nworkers * sizeof(isc__networker_t)); + mgr->nloops * sizeof(mgr->workers[0])); isc_mem_putanddetach(&mgr->mctx, mgr, sizeof(*mgr)); } -static void -enqueue_pause(isc__networker_t *worker) { - isc__netievent_pause_t *event = - isc__nm_get_netievent_pause(worker->mgr); - isc__nm_enqueue_ievent(worker, (isc__netievent_t *)event); -} - -static void -isc__nm_async_pause(isc__networker_t *worker, isc__netievent_t *ev0) { - UNUSED(ev0); - REQUIRE(worker->paused == false); - - worker->paused = true; - uv_stop(&worker->loop); -} - -void -isc_nm_pause(isc_nm_t *mgr) { - REQUIRE(VALID_NM(mgr)); - REQUIRE(!atomic_load(&mgr->paused)); - - isc__nm_acquire_interlocked_force(mgr); - - if (isc__nm_in_netthread()) { - REQUIRE(isc_nm_tid() == 0); - } - - for (int i = 0; i < mgr->nworkers; i++) { - isc__networker_t *worker = &mgr->workers[i]; - if (i == isc_nm_tid()) { - isc__nm_async_pause(worker, NULL); - } else { - enqueue_pause(worker); - } - } - - if (isc__nm_in_netthread()) { - atomic_fetch_add(&mgr->workers_paused, 1); - isc_barrier_wait(&mgr->pausing); - } - - LOCK(&mgr->lock); - while (atomic_load(&mgr->workers_paused) != mgr->workers_running) { - WAIT(&mgr->wkstatecond, &mgr->lock); - } - UNLOCK(&mgr->lock); - - atomic_compare_exchange_enforced(&mgr->paused, &(bool){ false }, true); -} - -static void -enqueue_resume(isc__networker_t *worker) { - isc__netievent_resume_t *event = - isc__nm_get_netievent_resume(worker->mgr); - isc__nm_enqueue_ievent(worker, (isc__netievent_t *)event); -} - -static void -isc__nm_async_resume(isc__networker_t *worker, isc__netievent_t *ev0) { - UNUSED(ev0); - REQUIRE(worker->paused == true); - - worker->paused = false; -} - -void -isc_nm_resume(isc_nm_t *mgr) { - REQUIRE(VALID_NM(mgr)); - REQUIRE(atomic_load(&mgr->paused)); - - if (isc__nm_in_netthread()) { - REQUIRE(isc_nm_tid() == 0); - drain_queue(&mgr->workers[isc_nm_tid()], NETIEVENT_PRIORITY); - } - - for (int i = 0; i < mgr->nworkers; i++) { - isc__networker_t *worker = &mgr->workers[i]; - if (i == isc_nm_tid()) { - isc__nm_async_resume(worker, NULL); - } else { - enqueue_resume(worker); - } - } - - if (isc__nm_in_netthread()) { - atomic_fetch_sub(&mgr->workers_paused, 1); - isc_barrier_wait(&mgr->resuming); - } - - LOCK(&mgr->lock); - while (atomic_load(&mgr->workers_paused) != 0) { - WAIT(&mgr->wkstatecond, &mgr->lock); - } - UNLOCK(&mgr->lock); - - atomic_compare_exchange_enforced(&mgr->paused, &(bool){ true }, false); - - isc__nm_drop_interlocked(mgr); -} - void isc_nm_attach(isc_nm_t *mgr, isc_nm_t **dst) { REQUIRE(VALID_NM(mgr)); @@ -506,56 +320,16 @@ isc_nm_detach(isc_nm_t **mgr0) { } void -isc__netmgr_shutdown(isc_nm_t *mgr) { - REQUIRE(VALID_NM(mgr)); - - atomic_store(&mgr->closing, true); - for (int i = 0; i < mgr->nworkers; i++) { - isc__netievent_t *event = NULL; - event = isc__nm_get_netievent_shutdown(mgr); - isc__nm_enqueue_ievent(&mgr->workers[i], event); - } -} - -void -isc__netmgr_destroy(isc_nm_t **netmgrp) { +isc_netmgr_destroy(isc_nm_t **netmgrp) { isc_nm_t *mgr = NULL; - int counter = 0; REQUIRE(VALID_NM(*netmgrp)); mgr = *netmgrp; + *netmgrp = NULL; - /* - * Close active connections. - */ - isc__netmgr_shutdown(mgr); - - /* - * Wait for the manager to be dereferenced elsewhere. - */ - while (isc_refcount_current(&mgr->references) > 1 && counter++ < 1000) { - uv_sleep(10); - } - -#ifdef NETMGR_TRACE - if (isc_refcount_current(&mgr->references) > 1) { - isc__nm_dump_active(mgr); - UNREACHABLE(); - } -#endif - - /* - * Now just patiently wait - */ - while (isc_refcount_current(&mgr->references) > 1) { - uv_sleep(10); - } - - /* - * Detach final reference. - */ - isc_nm_detach(netmgrp); + REQUIRE(isc_refcount_decrement(&mgr->references) == 1); + nm_destroy(&mgr); } void @@ -569,7 +343,7 @@ void isc_nmhandle_setwritetimeout(isc_nmhandle_t *handle, uint64_t write_timeout) { REQUIRE(VALID_NMHANDLE(handle)); REQUIRE(VALID_NMSOCK(handle->sock)); - REQUIRE(handle->sock->tid == isc_nm_tid()); + REQUIRE(handle->sock->tid == isc_tid()); switch (handle->sock->type) { case isc_nm_tcpsocket: @@ -651,208 +425,6 @@ isc_nm_gettimeouts(isc_nm_t *mgr, uint32_t *initial, uint32_t *idle, } } -/* - * nm_thread is a single worker thread, that runs uv_run event loop - * until asked to stop. - * - * There are four queues for asynchronous events: - * - * 1. priority queue - netievents on the priority queue are run even when - * the taskmgr enters exclusive mode and the netmgr is paused. This - * is needed to properly start listening on the interfaces, free - * resources on shutdown, or resume from a pause. - * - * 2. task queue - only (traditional) tasks are scheduled here, and this queue - * is processed when the netmgr workers are finishing. This is needed to - * process the task shutdown events. - * - * 3. normal queue - this is the queue with netmgr events, e.g. reading, - * sending, callbacks, etc. - */ - -static isc_threadresult_t -nm_thread(isc_threadarg_t worker0) { - isc__networker_t *worker = (isc__networker_t *)worker0; - isc_nm_t *mgr = worker->mgr; - - isc__nm_tid_v = worker->id; - - while (true) { - /* - * uv_run() runs async_cb() in a loop, which processes - * all four event queues until a "pause" or "stop" event - * is encountered. On pause, we process only priority - * events until resuming. - */ - int r = uv_run(&worker->loop, UV_RUN_DEFAULT); - INSIST(r > 0 || worker->finished); - - if (worker->paused) { - INSIST(atomic_load(&mgr->interlocked) != isc_nm_tid()); - - atomic_fetch_add(&mgr->workers_paused, 1); - if (isc_barrier_wait(&mgr->pausing) != 0) { - LOCK(&mgr->lock); - SIGNAL(&mgr->wkstatecond); - UNLOCK(&mgr->lock); - } - - while (worker->paused) { - wait_for_priority_queue(worker); - } - - atomic_fetch_sub(&mgr->workers_paused, 1); - if (isc_barrier_wait(&mgr->resuming) != 0) { - LOCK(&mgr->lock); - SIGNAL(&mgr->wkstatecond); - UNLOCK(&mgr->lock); - } - } - - if (r == 0) { - INSIST(worker->finished); - break; - } - - INSIST(!worker->finished); - } - - /* - * We are shutting down. Drain the queues. - */ - drain_queue(worker, NETIEVENT_TASK); - - for (size_t type = 0; type < NETIEVENT_MAX; type++) { - LOCK(&worker->ievents[type].lock); - INSIST(ISC_LIST_EMPTY(worker->ievents[type].list)); - UNLOCK(&worker->ievents[type].lock); - } - - LOCK(&mgr->lock); - mgr->workers_running--; - SIGNAL(&mgr->wkstatecond); - UNLOCK(&mgr->lock); - - return ((isc_threadresult_t)0); -} - -static bool -process_all_queues(isc__networker_t *worker) { - bool reschedule = false; - /* - * The queue processing functions will return false when the - * system is pausing or stopping and we don't want to process - * the other queues in such case, but we need the async event - * to be rescheduled in the next uv_run(). - */ - for (size_t type = 0; type < NETIEVENT_MAX; type++) { - isc_result_t result = process_queue(worker, type); - switch (result) { - case ISC_R_SUSPEND: - reschedule = true; - break; - case ISC_R_EMPTY: - /* empty queue */ - break; - case ISC_R_SUCCESS: - reschedule = true; - break; - default: - UNREACHABLE(); - } - } - - return (reschedule); -} - -/* - * async_cb() is a universal callback for 'async' events sent to event loop. - * It's the only way to safely pass data to the libuv event loop. We use a - * single async event and a set of lockless queues of 'isc__netievent_t' - * structures passed from other threads. - */ -static void -async_cb(uv_async_t *handle) { - isc__networker_t *worker = (isc__networker_t *)handle->loop->data; - - if (process_all_queues(worker)) { - /* - * If we didn't process all the events, we need to enqueue - * async_cb to be run in the next iteration of the uv_loop - */ - uv_async_send(handle); - } -} - -static void -isc__nm_async_stop(isc__networker_t *worker, isc__netievent_t *ev0) { - UNUSED(ev0); - worker->finished = true; - /* Close the async handler */ - uv_close((uv_handle_t *)&worker->async, NULL); -} - -void -isc_nm_task_enqueue(isc_nm_t *nm, isc_task_t *task, int tid) { - isc__netievent_t *event = NULL; - isc__networker_t *worker = NULL; - REQUIRE(tid >= 0 && tid < nm->nworkers); - - worker = &nm->workers[tid]; - - event = (isc__netievent_t *)isc__nm_get_netievent_task(nm, task); - - isc__nm_enqueue_ievent(worker, event); -} - -static void -isc__nm_async_task(isc__networker_t *worker, isc__netievent_t *ev0) { - isc__netievent_task_t *ievent = (isc__netievent_task_t *)ev0; - isc_result_t result; - - UNUSED(worker); - - result = isc_task_run(ievent->task); - - switch (result) { - case ISC_R_QUOTA: - isc_task_ready(ievent->task); - return; - case ISC_R_SUCCESS: - return; - default: - UNREACHABLE(); - } -} - -static void -wait_for_priority_queue(isc__networker_t *worker) { - isc_condition_t *cond = &worker->ievents[NETIEVENT_PRIORITY].cond; - isc_mutex_t *lock = &worker->ievents[NETIEVENT_PRIORITY].lock; - isc__netievent_list_t *list = - &(worker->ievents[NETIEVENT_PRIORITY].list); - - LOCK(lock); - while (ISC_LIST_EMPTY(*list)) { - WAIT(cond, lock); - } - UNLOCK(lock); - - drain_queue(worker, NETIEVENT_PRIORITY); -} - -static void -drain_queue(isc__networker_t *worker, netievent_type_t type) { - bool empty = false; - while (!empty) { - if (process_queue(worker, type) == ISC_R_EMPTY) { - LOCK(&worker->ievents[type].lock); - empty = ISC_LIST_EMPTY(worker->ievents[type].list); - UNLOCK(&worker->ievents[type].lock); - } - } -} - /* * The two macros here generate the individual cases for the process_netievent() * function. The NETIEVENT_CASE(type) macro is the common case, and @@ -860,40 +432,24 @@ drain_queue(isc__networker_t *worker, netievent_type_t type) { * process_queue() to stop, e.g. it's only used for the netievent that * stops/pauses processing the enqueued netievents. */ -#define NETIEVENT_CASE(type) \ - case netievent_##type: { \ - isc__nm_async_##type(worker, ievent); \ - isc__nm_put_netievent_##type( \ - worker->mgr, (isc__netievent_##type##_t *)ievent); \ - return (true); \ +#define NETIEVENT_CASE(type) \ + case netievent_##type: { \ + isc__nm_async_##type(worker, ievent); \ + isc__nm_put_netievent_##type( \ + worker, (isc__netievent_##type##_t *)ievent); \ + return; \ } -#define NETIEVENT_CASE_NOMORE(type) \ - case netievent_##type: { \ - isc__nm_async_##type(worker, ievent); \ - isc__nm_put_netievent_##type(worker->mgr, ievent); \ - return (false); \ - } - -static bool -process_netievent(isc__networker_t *worker, isc__netievent_t *ievent) { - REQUIRE(worker->id == isc_nm_tid()); +static void +process_netievent(void *arg) { + isc__netievent_t *ievent = (isc__netievent_t *)arg; + isc__networker_t *worker = ievent->worker; switch (ievent->type) { - /* Don't process more ievents when we are stopping */ - NETIEVENT_CASE_NOMORE(stop); - - NETIEVENT_CASE(task); - - NETIEVENT_CASE(udpconnect); NETIEVENT_CASE(udplisten); NETIEVENT_CASE(udpstop); - NETIEVENT_CASE(udpsend); - NETIEVENT_CASE(udpread); NETIEVENT_CASE(udpcancel); - NETIEVENT_CASE(udpclose); - - NETIEVENT_CASE(routeconnect); + NETIEVENT_CASE(udpread); NETIEVENT_CASE(tcpaccept); NETIEVENT_CASE(tcpconnect); @@ -936,81 +492,36 @@ process_netievent(isc__networker_t *worker, isc__netievent_t *ievent) { NETIEVENT_CASE(httpsend); NETIEVENT_CASE(httpclose); NETIEVENT_CASE(httpendpoints); -#endif NETIEVENT_CASE(settlsctx); +#endif NETIEVENT_CASE(connectcb); NETIEVENT_CASE(readcb); NETIEVENT_CASE(sendcb); - NETIEVENT_CASE(close); NETIEVENT_CASE(detach); - - NETIEVENT_CASE(shutdown); - NETIEVENT_CASE(resume); - NETIEVENT_CASE_NOMORE(pause); default: UNREACHABLE(); } - return (true); -} - -static isc_result_t -process_queue(isc__networker_t *worker, netievent_type_t type) { - isc__netievent_t *ievent = NULL; - isc__netievent_list_t list; - - ISC_LIST_INIT(list); - - LOCK(&worker->ievents[type].lock); - ISC_LIST_MOVE(list, worker->ievents[type].list); - UNLOCK(&worker->ievents[type].lock); - - ievent = ISC_LIST_HEAD(list); - if (ievent == NULL) { - /* There's nothing scheduled */ - return (ISC_R_EMPTY); - } - - while (ievent != NULL) { - isc__netievent_t *next = ISC_LIST_NEXT(ievent, link); - ISC_LIST_DEQUEUE(list, ievent, link); - - if (!process_netievent(worker, ievent)) { - /* The netievent told us to stop */ - if (!ISC_LIST_EMPTY(list)) { - /* - * Reschedule the rest of the unprocessed - * events. - */ - LOCK(&worker->ievents[type].lock); - ISC_LIST_PREPENDLIST(worker->ievents[type].list, - list, link); - UNLOCK(&worker->ievents[type].lock); - } - return (ISC_R_SUSPEND); - } - - ievent = next; - } - - /* We processed at least one */ - return (ISC_R_SUCCESS); } void * -isc__nm_get_netievent(isc_nm_t *mgr, isc__netievent_type type) { - isc__netievent_storage_t *event = isc_mem_get(mgr->mctx, +isc__nm_get_netievent(isc__networker_t *worker, isc__netievent_type type) { + isc__netievent_storage_t *event = isc_mem_get(worker->mctx, sizeof(*event)); *event = (isc__netievent_storage_t){ .ni.type = type }; ISC_LINK_INIT(&(event->ni), link); + + isc__networker_ref(worker); + return (event); } void -isc__nm_put_netievent(isc_nm_t *mgr, void *ievent) { - isc_mem_put(mgr->mctx, ievent, sizeof(isc__netievent_storage_t)); +isc__nm_put_netievent(isc__networker_t *worker, void *ievent) { + isc_mem_put(worker->mctx, ievent, sizeof(isc__netievent_storage_t)); + isc__networker_unref(worker); } NETIEVENT_SOCKET_DEF(tcpclose); @@ -1023,11 +534,10 @@ NETIEVENT_SOCKET_DEF(tlsconnect); NETIEVENT_SOCKET_DEF(tlsdobio); NETIEVENT_SOCKET_DEF(tlsstartread); NETIEVENT_SOCKET_HANDLE_DEF(tlscancel); -NETIEVENT_SOCKET_DEF(udpclose); NETIEVENT_SOCKET_DEF(udplisten); -NETIEVENT_SOCKET_DEF(udpread); -NETIEVENT_SOCKET_DEF(udpsend); NETIEVENT_SOCKET_DEF(udpstop); +NETIEVENT_SOCKET_HANDLE_DEF(udpcancel); +NETIEVENT_SOCKET_DEF(udpread); NETIEVENT_SOCKET_DEF(tcpdnsclose); NETIEVENT_SOCKET_DEF(tcpdnsread); @@ -1059,28 +569,23 @@ NETIEVENT_SOCKET_HTTP_EPS_DEF(httpendpoints); NETIEVENT_SOCKET_REQ_DEF(tcpconnect); NETIEVENT_SOCKET_REQ_DEF(tcpsend); NETIEVENT_SOCKET_REQ_DEF(tlssend); -NETIEVENT_SOCKET_REQ_DEF(udpconnect); -NETIEVENT_SOCKET_REQ_DEF(routeconnect); NETIEVENT_SOCKET_REQ_RESULT_DEF(connectcb); NETIEVENT_SOCKET_REQ_RESULT_DEF(readcb); NETIEVENT_SOCKET_REQ_RESULT_DEF(sendcb); NETIEVENT_SOCKET_DEF(detach); NETIEVENT_SOCKET_HANDLE_DEF(tcpcancel); -NETIEVENT_SOCKET_HANDLE_DEF(udpcancel); NETIEVENT_SOCKET_QUOTA_DEF(tcpaccept); -NETIEVENT_SOCKET_DEF(close); -NETIEVENT_DEF(pause); -NETIEVENT_DEF(resume); -NETIEVENT_DEF(shutdown); -NETIEVENT_DEF(stop); - -NETIEVENT_TASK_DEF(task); - NETIEVENT_SOCKET_TLSCTX_DEF(settlsctx); +void +isc__nm_process_ievent(isc__networker_t *worker, isc__netievent_t *event) { + event->worker = worker; + process_netievent(event); +} + void isc__nm_maybe_enqueue_ievent(isc__networker_t *worker, isc__netievent_t *event) { @@ -1088,8 +593,8 @@ isc__nm_maybe_enqueue_ievent(isc__networker_t *worker, * If we are already in the matching nmthread, process the ievent * directly. */ - if (worker->id == isc_nm_tid()) { - process_netievent(worker, event); + if (worker->loop == isc_loop_current(worker->netmgr->loopmgr)) { + isc__nm_process_ievent(worker, event); return; } @@ -1098,36 +603,9 @@ isc__nm_maybe_enqueue_ievent(isc__networker_t *worker, void isc__nm_enqueue_ievent(isc__networker_t *worker, isc__netievent_t *event) { - netievent_type_t type; + event->worker = worker; - if (event->type > netievent_prio) { - type = NETIEVENT_PRIORITY; - } else { - switch (event->type) { - case netievent_prio: - UNREACHABLE(); - break; - case netievent_task: - type = NETIEVENT_TASK; - break; - default: - type = NETIEVENT_NORMAL; - break; - } - } - - /* - * We need to make sure this signal will be delivered and - * the queue will be processed. - */ - LOCK(&worker->ievents[type].lock); - ISC_LIST_ENQUEUE(worker->ievents[type].list, event, link); - if (type == NETIEVENT_PRIORITY) { - SIGNAL(&worker->ievents[type].cond); - } - UNLOCK(&worker->ievents[type].lock); - - uv_async_send(&worker->async); + isc_async_run(worker->loop, process_netievent, event); } bool @@ -1207,17 +685,10 @@ nmsocket_cleanup(isc_nmsocket_t *sock, bool dofree FLARG) { } } - /* - * This was a parent socket: destroy the listening - * barriers that synchronized the children. - */ - isc_barrier_destroy(&sock->startlistening); - isc_barrier_destroy(&sock->stoplistening); - /* * Now free them. */ - isc_mem_put(sock->mgr->mctx, sock->children, + isc_mem_put(sock->worker->mctx, sock->children, sock->nchildren * sizeof(*sock)); sock->children = NULL; sock->nchildren = 0; @@ -1238,7 +709,7 @@ nmsocket_cleanup(isc_nmsocket_t *sock, bool dofree FLARG) { } if (sock->buf != NULL) { - isc_mem_put(sock->mgr->mctx, sock->buf, sock->buf_size); + isc_mem_put(sock->worker->mctx, sock->buf, sock->buf_size); } if (sock->quota != NULL) { @@ -1250,31 +721,35 @@ nmsocket_cleanup(isc_nmsocket_t *sock, bool dofree FLARG) { isc_astack_destroy(sock->inactivehandles); while ((uvreq = isc_astack_pop(sock->inactivereqs)) != NULL) { - isc_mem_put(sock->mgr->mctx, uvreq, sizeof(*uvreq)); + isc_mem_put(sock->worker->mctx, uvreq, sizeof(*uvreq)); } isc_astack_destroy(sock->inactivereqs); - sock->magic = 0; - isc_condition_destroy(&sock->scond); - isc_condition_destroy(&sock->cond); - isc_mutex_destroy(&sock->lock); isc__nm_tlsdns_cleanup_data(sock); #if HAVE_LIBNGHTTP2 isc__nm_tls_cleanup_data(sock); isc__nm_http_cleanup_data(sock); #endif + + sock->magic = 0; + #ifdef NETMGR_TRACE - LOCK(&sock->mgr->lock); - ISC_LIST_UNLINK(sock->mgr->active_sockets, sock, active_link); - UNLOCK(&sock->mgr->lock); + LOCK(&sock->worker->netmgr->lock); + ISC_LIST_UNLINK(sock->worker->netmgr->active_sockets, sock, + active_link); + UNLOCK(&sock->worker->netmgr->lock); + isc_mutex_destroy(&sock->tracelock); #endif + + isc_mutex_destroy(&sock->lock); + if (dofree) { - isc_nm_t *mgr = sock->mgr; - isc_mem_put(mgr->mctx, sock, sizeof(*sock)); - isc_nm_detach(&mgr); + isc__networker_t *worker = sock->worker; + isc_mem_put(worker->mctx, sock, sizeof(*sock)); + isc__networker_detach(&worker); } else { - isc_nm_detach(&sock->mgr); + isc__networker_detach(&sock->worker); } } @@ -1301,20 +776,16 @@ nmsocket_maybe_destroy(isc_nmsocket_t *sock FLARG) { * children have active handles before deciding whether to * accept destruction. */ - LOCK(&sock->lock); if (atomic_load(&sock->active) || atomic_load(&sock->destroying) || !atomic_load(&sock->closed) || atomic_load(&sock->references) != 0) { - UNLOCK(&sock->lock); return; } active_handles = atomic_load(&sock->ah); if (sock->children != NULL) { for (size_t i = 0; i < sock->nchildren; i++) { - LOCK(&sock->children[i].lock); active_handles += atomic_load(&sock->children[i].ah); - UNLOCK(&sock->children[i].lock); } } @@ -1327,10 +798,7 @@ nmsocket_maybe_destroy(isc_nmsocket_t *sock FLARG) { if (destroy) { atomic_store(&sock->destroying, true); - UNLOCK(&sock->lock); nmsocket_cleanup(sock, true FLARG_PASS); - } else { - UNLOCK(&sock->lock); } } @@ -1438,21 +906,26 @@ isc_nmsocket_close(isc_nmsocket_t **sockp) { } void -isc___nmsocket_init(isc_nmsocket_t *sock, isc_nm_t *mgr, isc_nmsocket_type type, - isc_sockaddr_t *iface FLARG) { +isc___nmsocket_init(isc_nmsocket_t *sock, isc__networker_t *worker, + isc_nmsocket_type type, isc_sockaddr_t *iface FLARG) { uint16_t family; REQUIRE(sock != NULL); - REQUIRE(mgr != NULL); + REQUIRE(worker != NULL); - *sock = (isc_nmsocket_t){ .type = type, - .fd = -1, - .inactivehandles = isc_astack_new( - mgr->mctx, ISC_NM_HANDLES_STACK_SIZE), - .inactivereqs = isc_astack_new( - mgr->mctx, ISC_NM_REQS_STACK_SIZE) }; + *sock = (isc_nmsocket_t){ + .type = type, + .tid = worker->loop->tid, + .fd = -1, + .inactivehandles = isc_astack_new(worker->mctx, + ISC_NM_HANDLES_STACK_SIZE), + .inactivereqs = isc_astack_new(worker->mctx, + ISC_NM_REQS_STACK_SIZE), + .result = ISC_R_UNSET, + }; ISC_LIST_INIT(sock->tls.sendreqs); + isc_mutex_init(&sock->lock); if (iface != NULL) { family = iface->type.sa.sa_family; @@ -1465,12 +938,13 @@ isc___nmsocket_init(isc_nmsocket_t *sock, isc_nm_t *mgr, isc_nmsocket_type type, sock->backtrace_size = isc_backtrace(sock->backtrace, TRACE_SIZE); ISC_LINK_INIT(sock, active_link); ISC_LIST_INIT(sock->active_handles); - LOCK(&mgr->lock); - ISC_LIST_APPEND(mgr->active_sockets, sock, active_link); - UNLOCK(&mgr->lock); + LOCK(&worker->netmgr->lock); + ISC_LIST_APPEND(worker->netmgr->active_sockets, sock, active_link); + UNLOCK(&worker->netmgr->lock); + isc_mutex_init(&sock->tracelock); #endif - isc_nm_attach(mgr, &sock->mgr); + isc__networker_attach(worker, &sock->worker); sock->uv_handle.handle.data = sock; ISC_LINK_INIT(&sock->quotacb, link); @@ -1518,9 +992,6 @@ isc___nmsocket_init(isc_nmsocket_t *sock, isc_nm_t *mgr, isc_nmsocket_type type, break; } - isc_mutex_init(&sock->lock); - isc_condition_init(&sock->cond); - isc_condition_init(&sock->scond); isc_refcount_init(&sock->references, 1); #if HAVE_LIBNGHTTP2 @@ -1558,7 +1029,7 @@ isc___nmsocket_init(isc_nmsocket_t *sock, isc_nm_t *mgr, isc_nmsocket_type type, void isc__nmsocket_clearcb(isc_nmsocket_t *sock) { REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(!isc__nm_in_netthread() || sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); sock->recv_cb = NULL; sock->recv_cbarg = NULL; @@ -1570,19 +1041,16 @@ isc__nmsocket_clearcb(isc_nmsocket_t *sock) { void isc__nm_free_uvbuf(isc_nmsocket_t *sock, const uv_buf_t *buf) { - isc__networker_t *worker = NULL; - REQUIRE(VALID_NMSOCK(sock)); - worker = &sock->mgr->workers[sock->tid]; - REQUIRE(buf->base == worker->recvbuf); + REQUIRE(buf->base == sock->worker->recvbuf); - worker->recvbuf_inuse = false; + sock->worker->recvbuf_inuse = false; } static isc_nmhandle_t * alloc_handle(isc_nmsocket_t *sock) { - isc_nmhandle_t *handle = isc_mem_get(sock->mgr->mctx, + isc_nmhandle_t *handle = isc_mem_get(sock->worker->mctx, sizeof(isc_nmhandle_t)); *handle = (isc_nmhandle_t){ .magic = NMHANDLE_MAGIC }; @@ -1635,9 +1103,9 @@ isc___nmhandle_get(isc_nmsocket_t *sock, isc_sockaddr_t *peer, (void)atomic_fetch_add(&sock->ah, 1); #ifdef NETMGR_TRACE - LOCK(&sock->lock); + LOCK(&sock->tracelock); ISC_LIST_APPEND(sock->active_handles, handle, active_link); - UNLOCK(&sock->lock); + UNLOCK(&sock->tracelock); #endif switch (sock->type) { @@ -1708,7 +1176,7 @@ nmhandle_free(isc_nmsocket_t *sock, isc_nmhandle_t *handle) { *handle = (isc_nmhandle_t){ .magic = 0 }; - isc_mem_put(sock->mgr->mctx, handle, sizeof(isc_nmhandle_t)); + isc_mem_put(sock->worker->mctx, handle, sizeof(isc_nmhandle_t)); } static void @@ -1721,8 +1189,6 @@ nmhandle_deactivate(isc_nmsocket_t *sock, isc_nmhandle_t *handle) { * destruction. We have to do this now, because at this point the * socket is either unused or still attached to event->sock. */ - LOCK(&sock->lock); - #ifdef NETMGR_TRACE ISC_LIST_UNLINK(sock->active_handles, handle, active_link); #endif @@ -1738,7 +1204,6 @@ nmhandle_deactivate(isc_nmsocket_t *sock, isc_nmhandle_t *handle) { if (!reuse) { nmhandle_free(sock, handle); } - UNLOCK(&sock->lock); } void @@ -1757,19 +1222,18 @@ isc__nmhandle_detach(isc_nmhandle_t **handlep FLARG) { * ensure correct ordering of the isc__nm_process_sock_buffer(). */ sock = handle->sock; - if (sock->tid == isc_nm_tid() && sock->closehandle_cb == NULL) { + if (sock->tid == isc_tid() && sock->closehandle_cb == NULL) { nmhandle_detach_cb(&handle FLARG_PASS); } else { isc__netievent_detach_t *event = - isc__nm_get_netievent_detach(sock->mgr, sock); + isc__nm_get_netievent_detach(sock->worker, sock); /* * we are using implicit "attach" as the last reference * need to be destroyed explicitly in the async callback */ event->handle = handle; FLARG_IEVENT_PASS(event); - isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)event); + isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)event); } } @@ -1813,17 +1277,10 @@ nmhandle_detach_cb(isc_nmhandle_t **handlep FLARG) { /* * The handle is gone now. If the socket has a callback configured * for that (e.g., to perform cleanup after request processing), - * call it now, or schedule it to run asynchronously. + * call it now.. */ if (sock->closehandle_cb != NULL) { - if (sock->tid == isc_nm_tid()) { - sock->closehandle_cb(sock); - } else { - isc__netievent_close_t *event = - isc__nm_get_netievent_close(sock->mgr, sock); - isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)event); - } + sock->closehandle_cb(sock); } if (handle == sock->statichandle) { @@ -1858,11 +1315,11 @@ isc__nm_alloc_dnsbuf(isc_nmsocket_t *sock, size_t len) { if (sock->buf == NULL) { /* We don't have the buffer at all */ size_t alloc_len = len < NM_REG_BUF ? NM_REG_BUF : NM_BIG_BUF; - sock->buf = isc_mem_get(sock->mgr->mctx, alloc_len); + sock->buf = isc_mem_get(sock->worker->mctx, alloc_len); sock->buf_size = alloc_len; } else { /* We have the buffer but it's too small */ - sock->buf = isc_mem_reget(sock->mgr->mctx, sock->buf, + sock->buf = isc_mem_reget(sock->worker->mctx, sock->buf, sock->buf_size, NM_BIG_BUF); sock->buf_size = NM_BIG_BUF; } @@ -1916,7 +1373,7 @@ isc__nm_failed_connect_cb(isc_nmsocket_t *sock, isc__nm_uvreq_t *req, isc_result_t eresult, bool async) { REQUIRE(VALID_NMSOCK(sock)); REQUIRE(VALID_UVREQ(req)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); REQUIRE(req->cb.connect != NULL); isc__nm_incstats(sock, STATID_CONNECTFAIL); @@ -1961,7 +1418,7 @@ isc__nmsocket_connecttimeout_cb(uv_timer_t *timer) { isc__nm_uvreq_t *req = uv_handle_get_data((uv_handle_t *)uvreq); REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); REQUIRE(atomic_load(&sock->connecting)); REQUIRE(VALID_UVREQ(req)); REQUIRE(VALID_NMHANDLE(req->handle)); @@ -2028,7 +1485,7 @@ isc__nmsocket_readtimeout_cb(uv_timer_t *timer) { isc_nmsocket_t *sock = uv_handle_get_data((uv_handle_t *)timer); REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); REQUIRE(atomic_load(&sock->reading)); if (atomic_load(&sock->client)) { @@ -2118,7 +1575,7 @@ isc__nm_uvreq_t * isc__nm_get_read_req(isc_nmsocket_t *sock, isc_sockaddr_t *sockaddr) { isc__nm_uvreq_t *req = NULL; - req = isc__nm_uvreq_get(sock->mgr, sock); + req = isc__nm_uvreq_get(sock->worker, sock); req->cb.recv = sock->recv_cb; req->cbarg = sock->recv_cbarg; @@ -2151,7 +1608,6 @@ isc__nm_alloc_cb(uv_handle_t *handle, size_t size, uv_buf_t *buf) { isc__networker_t *worker = NULL; REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(isc__nm_in_netthread()); /* * The size provided by libuv is only suggested size, and it always * defaults to 64 * 1024 in the current versions of libuv (see @@ -2159,7 +1615,7 @@ isc__nm_alloc_cb(uv_handle_t *handle, size_t size, uv_buf_t *buf) { */ UNUSED(size); - worker = &sock->mgr->workers[sock->tid]; + worker = sock->worker; INSIST(!worker->recvbuf_inuse); INSIST(worker->recvbuf != NULL); @@ -2246,14 +1702,14 @@ isc__nm_stop_reading(isc_nmsocket_t *sock) { } bool -isc__nm_closing(isc_nmsocket_t *sock) { - return (atomic_load(&sock->mgr->closing)); +isc__nm_closing(isc__networker_t *worker) { + return (worker->shuttingdown); } bool isc__nmsocket_closing(isc_nmsocket_t *sock) { return (!isc__nmsocket_active(sock) || atomic_load(&sock->closing) || - isc__nm_closing(sock) || + isc__nm_closing(sock->worker) || (sock->server != NULL && !isc__nmsocket_active(sock->server))); } @@ -2333,7 +1789,7 @@ isc__nm_resume_processing(void *arg) { isc_nmsocket_t *sock = (isc_nmsocket_t *)arg; REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); REQUIRE(!atomic_load(&sock->client)); if (isc__nmsocket_closing(sock)) { @@ -2389,21 +1845,23 @@ isc_nmhandle_settimeout(isc_nmhandle_t *handle, uint32_t timeout) { void isc_nmhandle_keepalive(isc_nmhandle_t *handle, bool value) { isc_nmsocket_t *sock = NULL; + isc_nm_t *netmgr = NULL; REQUIRE(VALID_NMHANDLE(handle)); REQUIRE(VALID_NMSOCK(handle->sock)); sock = handle->sock; + netmgr = sock->worker->netmgr; switch (sock->type) { case isc_nm_tcpsocket: case isc_nm_tcpdnssocket: case isc_nm_tlsdnssocket: atomic_store(&sock->keepalive, value); - sock->read_timeout = value ? atomic_load(&sock->mgr->keepalive) - : atomic_load(&sock->mgr->idle); - sock->write_timeout = value ? atomic_load(&sock->mgr->keepalive) - : atomic_load(&sock->mgr->idle); + sock->read_timeout = value ? atomic_load(&netmgr->keepalive) + : atomic_load(&netmgr->idle); + sock->write_timeout = value ? atomic_load(&netmgr->keepalive) + : atomic_load(&netmgr->idle); break; #if HAVE_LIBNGHTTP2 case isc_nm_tlssocket: @@ -2448,14 +1906,15 @@ isc_nmhandle_netmgr(isc_nmhandle_t *handle) { REQUIRE(VALID_NMHANDLE(handle)); REQUIRE(VALID_NMSOCK(handle->sock)); - return (handle->sock->mgr); + return (handle->sock->worker->netmgr); } +/* FIXME: Use per-worker mempool */ isc__nm_uvreq_t * -isc___nm_uvreq_get(isc_nm_t *mgr, isc_nmsocket_t *sock FLARG) { +isc___nm_uvreq_get(isc__networker_t *worker, isc_nmsocket_t *sock FLARG) { isc__nm_uvreq_t *req = NULL; - REQUIRE(VALID_NM(mgr)); + REQUIRE(worker != NULL); REQUIRE(VALID_NMSOCK(sock)); if (sock != NULL && isc__nmsocket_active(sock)) { @@ -2464,7 +1923,7 @@ isc___nm_uvreq_get(isc_nm_t *mgr, isc_nmsocket_t *sock FLARG) { } if (req == NULL) { - req = isc_mem_get(mgr->mctx, sizeof(*req)); + req = isc_mem_get(worker->mctx, sizeof(*req)); } *req = (isc__nm_uvreq_t){ @@ -2504,10 +1963,10 @@ isc___nm_uvreq_put(isc__nm_uvreq_t **req0, isc_nmsocket_t *sock FLARG) { #if !__SANITIZE_ADDRESS__ && !__SANITIZE_THREAD__ if (!isc__nmsocket_active(sock) || !isc_astack_trypush(sock->inactivereqs, req)) { - isc_mem_put(sock->mgr->mctx, req, sizeof(*req)); + isc_mem_put(sock->worker->mctx, req, sizeof(*req)); } #else /* !__SANITIZE_ADDRESS__ && !__SANITIZE_THREAD__ */ - isc_mem_put(sock->mgr->mctx, req, sizeof(*req)); + isc_mem_put(sock->worker->mctx, req, sizeof(*req)); #endif /* !__SANITIZE_ADDRESS__ && !__SANITIZE_THREAD__ */ if (handle != NULL) { @@ -2690,9 +2149,9 @@ isc__nm_connectcb(isc_nmsocket_t *sock, isc__nm_uvreq_t *uvreq, isc__nm_async_connectcb(NULL, (isc__netievent_t *)&ievent); } else { isc__netievent_connectcb_t *ievent = - isc__nm_get_netievent_connectcb(sock->mgr, sock, uvreq, - eresult); - isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], + isc__nm_get_netievent_connectcb(sock->worker, sock, + uvreq, eresult); + isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent); } } @@ -2709,7 +2168,7 @@ isc__nm_async_connectcb(isc__networker_t *worker, isc__netievent_t *ev0) { REQUIRE(VALID_NMSOCK(sock)); REQUIRE(VALID_UVREQ(uvreq)); REQUIRE(VALID_NMHANDLE(uvreq->handle)); - REQUIRE(ievent->sock->tid == isc_nm_tid()); + REQUIRE(ievent->sock->tid == isc_tid()); REQUIRE(uvreq->cb.connect != NULL); uvreq->cb.connect(uvreq->handle, eresult, uvreq->cbarg); @@ -2725,15 +2184,16 @@ isc__nm_readcb(isc_nmsocket_t *sock, isc__nm_uvreq_t *uvreq, REQUIRE(VALID_NMHANDLE(uvreq->handle)); if (eresult == ISC_R_SUCCESS || eresult == ISC_R_TIMEDOUT) { - isc__netievent_readcb_t ievent = { .sock = sock, + isc__netievent_readcb_t ievent = { .type = netievent_readcb, + .sock = sock, .req = uvreq, .result = eresult }; isc__nm_async_readcb(NULL, (isc__netievent_t *)&ievent); } else { isc__netievent_readcb_t *ievent = isc__nm_get_netievent_readcb( - sock->mgr, sock, uvreq, eresult); - isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], + sock->worker, sock, uvreq, eresult); + isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent); } } @@ -2751,7 +2211,7 @@ isc__nm_async_readcb(isc__networker_t *worker, isc__netievent_t *ev0) { REQUIRE(VALID_NMSOCK(sock)); REQUIRE(VALID_UVREQ(uvreq)); REQUIRE(VALID_NMHANDLE(uvreq->handle)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); region.base = (unsigned char *)uvreq->uvbuf.base; region.length = uvreq->uvbuf.len; @@ -2776,10 +2236,9 @@ isc__nm_sendcb(isc_nmsocket_t *sock, isc__nm_uvreq_t *uvreq, return; } - isc__netievent_sendcb_t *ievent = - isc__nm_get_netievent_sendcb(sock->mgr, sock, uvreq, eresult); - isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)ievent); + isc__netievent_sendcb_t *ievent = isc__nm_get_netievent_sendcb( + sock->worker, sock, uvreq, eresult); + isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent); } void @@ -2794,27 +2253,13 @@ isc__nm_async_sendcb(isc__networker_t *worker, isc__netievent_t *ev0) { REQUIRE(VALID_NMSOCK(sock)); REQUIRE(VALID_UVREQ(uvreq)); REQUIRE(VALID_NMHANDLE(uvreq->handle)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); uvreq->cb.send(uvreq->handle, eresult, uvreq->cbarg); isc__nm_uvreq_put(&uvreq, sock); } -static void -isc__nm_async_close(isc__networker_t *worker, isc__netievent_t *ev0) { - isc__netievent_close_t *ievent = (isc__netievent_close_t *)ev0; - isc_nmsocket_t *sock = ievent->sock; - - REQUIRE(VALID_NMSOCK(ievent->sock)); - REQUIRE(sock->tid == isc_nm_tid()); - REQUIRE(sock->closehandle_cb != NULL); - - UNUSED(worker); - - ievent->sock->closehandle_cb(sock); -} - void isc__nm_async_detach(isc__networker_t *worker, isc__netievent_t *ev0) { isc__netievent_detach_t *ievent = (isc__netievent_detach_t *)ev0; @@ -2822,7 +2267,7 @@ isc__nm_async_detach(isc__networker_t *worker, isc__netievent_t *ev0) { REQUIRE(VALID_NMSOCK(ievent->sock)); REQUIRE(VALID_NMHANDLE(ievent->handle)); - REQUIRE(ievent->sock->tid == isc_nm_tid()); + REQUIRE(ievent->sock->tid == isc_tid()); UNUSED(worker); @@ -2900,13 +2345,15 @@ isc__nmsocket_shutdown(isc_nmsocket_t *sock) { static void shutdown_walk_cb(uv_handle_t *handle, void *arg) { - isc_nmsocket_t *sock = uv_handle_get_data(handle); + isc_nmsocket_t *sock = NULL; UNUSED(arg); if (uv_is_closing(handle)) { return; } + sock = uv_handle_get_data(handle); + switch (handle->type) { case UV_UDP: isc__nmsocket_shutdown(sock); @@ -2932,58 +2379,6 @@ shutdown_walk_cb(uv_handle_t *handle, void *arg) { } } -void -isc__nm_async_shutdown(isc__networker_t *worker, isc__netievent_t *ev0) { - UNUSED(ev0); - - uv_walk(&worker->loop, shutdown_walk_cb, NULL); -} - -bool -isc__nm_acquire_interlocked(isc_nm_t *mgr) { - if (!isc__nm_in_netthread()) { - return (false); - } - - LOCK(&mgr->lock); - bool success = atomic_compare_exchange_strong( - &mgr->interlocked, &(int){ ISC_NETMGR_NON_INTERLOCKED }, - isc_nm_tid()); - - UNLOCK(&mgr->lock); - return (success); -} - -void -isc__nm_drop_interlocked(isc_nm_t *mgr) { - if (!isc__nm_in_netthread()) { - return; - } - - LOCK(&mgr->lock); - int tid = atomic_exchange(&mgr->interlocked, - ISC_NETMGR_NON_INTERLOCKED); - INSIST(tid != ISC_NETMGR_NON_INTERLOCKED); - BROADCAST(&mgr->wkstatecond); - UNLOCK(&mgr->lock); -} - -void -isc__nm_acquire_interlocked_force(isc_nm_t *mgr) { - if (!isc__nm_in_netthread()) { - return; - } - - LOCK(&mgr->lock); - while (!atomic_compare_exchange_strong( - &mgr->interlocked, &(int){ ISC_NETMGR_NON_INTERLOCKED }, - isc_nm_tid())) - { - WAIT(&mgr->wkstatecond, &mgr->lock); - } - UNLOCK(&mgr->lock); -} - void isc_nm_setstats(isc_nm_t *mgr, isc_stats_t *stats) { REQUIRE(VALID_NM(mgr)); @@ -2998,8 +2393,9 @@ isc__nm_incstats(isc_nmsocket_t *sock, isc__nm_statid_t id) { REQUIRE(VALID_NMSOCK(sock)); REQUIRE(id < STATID_MAX); - if (sock->statsindex != NULL && sock->mgr->stats != NULL) { - isc_stats_increment(sock->mgr->stats, sock->statsindex[id]); + if (sock->statsindex != NULL && sock->worker->netmgr->stats != NULL) { + isc_stats_increment(sock->worker->netmgr->stats, + sock->statsindex[id]); } } @@ -3008,8 +2404,9 @@ isc__nm_decstats(isc_nmsocket_t *sock, isc__nm_statid_t id) { REQUIRE(VALID_NMSOCK(sock)); REQUIRE(id < STATID_MAX); - if (sock->statsindex != NULL && sock->mgr->stats != NULL) { - isc_stats_decrement(sock->mgr->stats, sock->statsindex[id]); + if (sock->statsindex != NULL && sock->worker->netmgr->stats != NULL) { + isc_stats_decrement(sock->worker->netmgr->stats, + sock->statsindex[id]); } } @@ -3103,73 +2500,6 @@ isc__nm_set_network_buffers(isc_nm_t *nm, uv_handle_t *handle) { } } -static isc_threadresult_t -isc__nm_work_run(isc_threadarg_t arg) { - isc__nm_work_t *work = (isc__nm_work_t *)arg; - - work->cb(work->data); - - return ((isc_threadresult_t)0); -} - -static void -isc__nm_work_cb(uv_work_t *req) { - isc__nm_work_t *work = uv_req_get_data((uv_req_t *)req); - - if (isc_tid_v == SIZE_MAX) { - isc__trampoline_t *trampoline_arg = - isc__trampoline_get(isc__nm_work_run, work); - (void)isc__trampoline_run(trampoline_arg); - } else { - (void)isc__nm_work_run((isc_threadarg_t)work); - } -} - -static void -isc__nm_after_work_cb(uv_work_t *req, int status) { - isc_result_t result = ISC_R_SUCCESS; - isc__nm_work_t *work = uv_req_get_data((uv_req_t *)req); - isc_nm_t *netmgr = work->netmgr; - - if (status != 0) { - result = isc_uverr2result(status); - } - - work->after_cb(work->data, result); - - isc_mem_put(netmgr->mctx, work, sizeof(*work)); - - isc_nm_detach(&netmgr); -} - -void -isc_nm_work_offload(isc_nm_t *netmgr, isc_nm_workcb_t work_cb, - isc_nm_after_workcb_t after_work_cb, void *data) { - isc__networker_t *worker = NULL; - isc__nm_work_t *work = NULL; - int r; - - REQUIRE(isc__nm_in_netthread()); - REQUIRE(VALID_NM(netmgr)); - - worker = &netmgr->workers[isc_nm_tid()]; - - work = isc_mem_get(netmgr->mctx, sizeof(*work)); - *work = (isc__nm_work_t){ - .cb = work_cb, - .after_cb = after_work_cb, - .data = data, - }; - - isc_nm_attach(netmgr, &work->netmgr); - - uv_req_set_data((uv_req_t *)&work->req, work); - - r = uv_queue_work(&worker->loop, &work->req, isc__nm_work_cb, - isc__nm_after_work_cb); - UV_RUNTIME_CHECK(uv_queue_work, r); -} - void isc_nm_bad_request(isc_nmhandle_t *handle) { isc_nmsocket_t *sock = NULL; @@ -3204,7 +2534,7 @@ isc_nm_bad_request(isc_nmhandle_t *handle) { bool isc_nm_xfr_allowed(isc_nmhandle_t *handle) { - isc_nmsocket_t *sock; + isc_nmsocket_t *sock = NULL; REQUIRE(VALID_NMHANDLE(handle)); REQUIRE(VALID_NMSOCK(handle->sock)); @@ -3298,16 +2628,9 @@ isc_nm_has_encryption(const isc_nmhandle_t *handle) { return (false); } -uint32_t -isc_nm_getnworkers(const isc_nm_t *netmgr) { - REQUIRE(VALID_NM(netmgr)); - - return (netmgr->nworkers); -} - const char * isc_nm_verify_tls_peer_result_string(const isc_nmhandle_t *handle) { - isc_nmsocket_t *sock; + isc_nmsocket_t *sock = NULL; REQUIRE(VALID_NMHANDLE(handle)); REQUIRE(VALID_NMSOCK(handle->sock)); @@ -3335,7 +2658,7 @@ isc_nm_verify_tls_peer_result_string(const isc_nmhandle_t *handle) { void isc__nm_async_settlsctx(isc__networker_t *worker, isc__netievent_t *ev0) { isc__netievent__tlsctx_t *ev_tlsctx = (isc__netievent__tlsctx_t *)ev0; - const int tid = isc_nm_tid(); + const int tid = isc_tid(); isc_nmsocket_t *listener = ev_tlsctx->sock; isc_tlsctx_t *tlsctx = ev_tlsctx->tlsctx; @@ -3358,12 +2681,13 @@ isc__nm_async_settlsctx(isc__networker_t *worker, isc__netievent_t *ev0) { static void set_tlsctx_workers(isc_nmsocket_t *listener, isc_tlsctx_t *tlsctx) { + uint32_t nloops = isc_loopmgr_nloops(listener->worker->netmgr->loopmgr); /* Update the TLS context reference for every worker thread. */ - for (size_t i = 0; i < isc_nm_getnworkers(listener->mgr); i++) { + for (size_t i = 0; i < nloops; i++) { isc__netievent__tlsctx_t *ievent = - isc__nm_get_netievent_settlsctx(listener->mgr, listener, - tlsctx); - isc__nm_enqueue_ievent(&listener->mgr->workers[i], + isc__nm_get_netievent_settlsctx(listener->worker, + listener, tlsctx); + isc__nm_enqueue_ievent(listener->worker, (isc__netievent_t *)ievent); } } @@ -3435,6 +2759,26 @@ isc__nmsocket_log_tls_session_reuse(isc_nmsocket_t *sock, isc_tls_t *tls) { client_sabuf, local_sabuf); } +static void +isc__networker_destroy(isc__networker_t *worker) { + isc_nm_t *netmgr = worker->netmgr; + worker->netmgr = NULL; + + isc_log_write(isc_lctx, ISC_LOGCATEGORY_GENERAL, ISC_LOGMODULE_NETMGR, + ISC_LOG_DEBUG(1), + "Destroying down network manager worker on loop %p(%d)", + worker->loop, isc_tid()); + + isc_loop_detach(&worker->loop); + + isc_mem_put(worker->mctx, worker->sendbuf, ISC_NETMGR_SENDBUF_SIZE); + isc_mem_putanddetach(&worker->mctx, worker->recvbuf, + ISC_NETMGR_RECVBUF_SIZE); + isc_nm_detach(&netmgr); +} + +ISC_REFCOUNT_IMPL(isc__networker, isc__networker_destroy); + #ifdef NETMGR_TRACE /* * Dump all active sockets in netmgr. We output to stderr @@ -3487,7 +2831,7 @@ static void nmsocket_dump(isc_nmsocket_t *sock) { isc_nmhandle_t *handle = NULL; - LOCK(&sock->lock); + LOCK(&sock->tracelock); fprintf(stderr, "\n=================\n"); fprintf(stderr, "Active %s socket %p, type %s, refs %" PRIuFAST32 "\n", atomic_load(&sock->client) ? "client" : "server", sock, @@ -3520,7 +2864,7 @@ nmsocket_dump(isc_nmsocket_t *sock) { } fprintf(stderr, "\n"); - UNLOCK(&sock->lock); + UNLOCK(&sock->tracelock); } void diff --git a/lib/isc/netmgr/tcp.c b/lib/isc/netmgr/tcp.c index 6b417963e1..60b7a46eb0 100644 --- a/lib/isc/netmgr/tcp.c +++ b/lib/isc/netmgr/tcp.c @@ -34,6 +34,7 @@ #include #include +#include "../loop_p.h" #include "netmgr-int.h" static atomic_uint_fast32_t last_tcpquota_log = 0; @@ -77,11 +78,6 @@ quota_accept_cb(isc_quota_t *quota, void *sock0); static void failed_accept_cb(isc_nmsocket_t *sock, isc_result_t eresult); -static void -stop_tcp_parent(isc_nmsocket_t *sock); -static void -stop_tcp_child(isc_nmsocket_t *sock); - static void failed_accept_cb(isc_nmsocket_t *sock, isc_result_t eresult) { REQUIRE(atomic_load(&sock->accepting)); @@ -121,10 +117,9 @@ tcp_connect_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) { REQUIRE(VALID_NMSOCK(sock)); REQUIRE(VALID_UVREQ(req)); - REQUIRE(isc__nm_in_netthread()); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); - worker = &sock->mgr->workers[sock->tid]; + worker = sock->worker; atomic_store(&sock->connecting, true); @@ -132,11 +127,11 @@ tcp_connect_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) { result = isc__nm_socket_connectiontimeout(sock->fd, 120 * 1000); RUNTIME_CHECK(result == ISC_R_SUCCESS); - r = uv_tcp_init(&worker->loop, &sock->uv_handle.tcp); + r = uv_tcp_init(&worker->loop->loop, &sock->uv_handle.tcp); UV_RUNTIME_CHECK(uv_tcp_init, r); uv_handle_set_data(&sock->uv_handle.handle, sock); - r = uv_timer_init(&worker->loop, &sock->read_timer); + r = uv_timer_init(&worker->loop->loop, &sock->read_timer); UV_RUNTIME_CHECK(uv_timer_init, r); uv_handle_set_data((uv_handle_t *)&sock->read_timer, sock); @@ -144,7 +139,7 @@ tcp_connect_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) { if (r != 0) { isc__nm_closesocket(sock->fd); isc__nm_incstats(sock, STATID_OPENFAIL); - goto done; + return (isc_uverr2result(r)); } isc__nm_incstats(sock, STATID_OPEN); @@ -152,18 +147,19 @@ tcp_connect_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) { r = uv_tcp_bind(&sock->uv_handle.tcp, &req->local.type.sa, 0); if (r != 0) { isc__nm_incstats(sock, STATID_BINDFAIL); - goto done; + return (isc_uverr2result(r)); } } - isc__nm_set_network_buffers(sock->mgr, &sock->uv_handle.handle); + isc__nm_set_network_buffers(sock->worker->netmgr, + &sock->uv_handle.handle); uv_handle_set_data(&req->uv_req.handle, req); r = uv_tcp_connect(&req->uv_req.connect, &sock->uv_handle.tcp, &req->peer.type.sa, tcp_connect_cb); if (r != 0) { isc__nm_incstats(sock, STATID_CONNECTFAIL); - goto done; + return (isc_uverr2result(r)); } uv_handle_set_data((uv_handle_t *)&sock->read_timer, @@ -172,18 +168,7 @@ tcp_connect_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) { atomic_store(&sock->connected, true); -done: - result = isc_uverr2result(r); - LOCK(&sock->lock); - sock->result = result; - SIGNAL(&sock->cond); - if (!atomic_load(&sock->active)) { - WAIT(&sock->scond, &sock->lock); - } - INSIST(atomic_load(&sock->active)); - UNLOCK(&sock->lock); - - return (result); + return (ISC_R_SUCCESS); } void @@ -199,7 +184,7 @@ isc__nm_async_tcpconnect(isc__networker_t *worker, isc__netievent_t *ev0) { REQUIRE(VALID_NMSOCK(sock)); REQUIRE(sock->type == isc_nm_tcpsocket); REQUIRE(sock->parent == NULL); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); result = tcp_connect_direct(sock, req); if (result != ISC_R_SUCCESS) { @@ -222,10 +207,13 @@ tcp_connect_cb(uv_connect_t *uvreq, int status) { isc__nm_uvreq_t *req = NULL; isc_nmsocket_t *sock = uv_handle_get_data((uv_handle_t *)uvreq->handle); struct sockaddr_storage ss; + isc__networker_t *worker = NULL; int r; REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); + + worker = sock->worker; req = uv_handle_get_data((uv_handle_t *)uvreq); @@ -242,7 +230,7 @@ tcp_connect_cb(uv_connect_t *uvreq, int status) { */ isc__nm_uvreq_put(&req, sock); return; - } else if (isc__nm_closing(sock)) { + } else if (isc__nm_closing(worker)) { /* Network manager shutting down */ result = ISC_R_SHUTTINGDOWN; goto error; @@ -308,68 +296,51 @@ isc_nm_tcpconnect(isc_nm_t *mgr, isc_sockaddr_t *local, isc_sockaddr_t *peer, isc__netievent_tcpconnect_t *ievent = NULL; isc__nm_uvreq_t *req = NULL; sa_family_t sa_family; + isc__networker_t *worker = &mgr->workers[isc_tid()]; + uv_os_sock_t fd = -1; REQUIRE(VALID_NM(mgr)); REQUIRE(local != NULL); REQUIRE(peer != NULL); + if (isc__nm_closing(worker)) { + cb(NULL, ISC_R_SHUTTINGDOWN, cbarg); + return; + } + sa_family = peer->type.sa.sa_family; - sock = isc_mem_get(mgr->mctx, sizeof(*sock)); - isc__nmsocket_init(sock, mgr, isc_nm_tcpsocket, local); + result = isc__nm_socket(sa_family, SOCK_STREAM, 0, &fd); + if (result != ISC_R_SUCCESS) { + cb(NULL, result, cbarg); + return; + } + + sock = isc_mem_get(worker->mctx, sizeof(*sock)); + isc__nmsocket_init(sock, worker, isc_nm_tcpsocket, local); sock->connect_timeout = timeout; - sock->result = ISC_R_UNSET; - sock->fd = (uv_os_sock_t)-1; + sock->fd = fd; atomic_init(&sock->client, true); - req = isc__nm_uvreq_get(mgr, sock); + req = isc__nm_uvreq_get(worker, sock); req->cb.connect = cb; req->cbarg = cbarg; req->peer = *peer; req->local = *local; req->handle = isc__nmhandle_get(sock, &req->peer, &sock->iface); - result = isc__nm_socket(sa_family, SOCK_STREAM, 0, &sock->fd); - if (result != ISC_R_SUCCESS) { - if (isc__nm_in_netthread()) { - sock->tid = isc_nm_tid(); - isc__nmsocket_clearcb(sock); - isc__nm_connectcb(sock, req, result, false); - } else { - isc__nmsocket_clearcb(sock); - sock->tid = isc_random_uniform(mgr->nworkers); - isc__nm_connectcb(sock, req, result, true); - } - atomic_store(&sock->closed, true); - isc__nmsocket_detach(&sock); - return; - } - (void)isc__nm_socket_min_mtu(sock->fd, sa_family); (void)isc__nm_socket_tcp_maxseg(sock->fd, NM_MAXSEG); - ievent = isc__nm_get_netievent_tcpconnect(mgr, sock, req); + ievent = isc__nm_get_netievent_tcpconnect(worker, sock, req); + + atomic_store(&sock->active, true); + isc__nm_async_tcpconnect(&mgr->workers[sock->tid], + (isc__netievent_t *)ievent); + isc__nm_put_netievent_tcpconnect(worker, ievent); - if (isc__nm_in_netthread()) { - atomic_store(&sock->active, true); - sock->tid = isc_nm_tid(); - isc__nm_async_tcpconnect(&mgr->workers[sock->tid], - (isc__netievent_t *)ievent); - isc__nm_put_netievent_tcpconnect(mgr, ievent); - } else { - atomic_init(&sock->active, false); - sock->tid = isc_random_uniform(mgr->nworkers); - isc__nm_enqueue_ievent(&mgr->workers[sock->tid], - (isc__netievent_t *)ievent); - } - LOCK(&sock->lock); - while (sock->result == ISC_R_UNSET) { - WAIT(&sock->cond, &sock->lock); - } atomic_store(&sock->active, true); - BROADCAST(&sock->scond); - UNLOCK(&sock->lock); } static uv_os_sock_t @@ -401,13 +372,14 @@ start_tcp_child(isc_nm_t *mgr, isc_sockaddr_t *iface, isc_nmsocket_t *sock, uv_os_sock_t fd, int tid) { isc__netievent_tcplisten_t *ievent = NULL; isc_nmsocket_t *csock = &sock->children[tid]; + isc__networker_t *worker = &mgr->workers[tid]; - isc__nmsocket_init(csock, mgr, isc_nm_tcpsocket, iface); + isc__nmsocket_init(csock, worker, isc_nm_tcpsocket, iface); csock->parent = sock; csock->accept_cb = sock->accept_cb; csock->accept_cbarg = sock->accept_cbarg; csock->backlog = sock->backlog; - csock->tid = tid; + /* * We don't attach to quota, just assign - to avoid * increasing quota unnecessarily. @@ -424,92 +396,84 @@ start_tcp_child(isc_nm_t *mgr, isc_sockaddr_t *iface, isc_nmsocket_t *sock, } REQUIRE(csock->fd >= 0); - ievent = isc__nm_get_netievent_tcplisten(mgr, csock); - isc__nm_maybe_enqueue_ievent(&mgr->workers[tid], - (isc__netievent_t *)ievent); -} + ievent = isc__nm_get_netievent_tcplisten(csock->worker, csock); -static void -enqueue_stoplistening(isc_nmsocket_t *sock) { - isc__netievent_tcpstop_t *ievent = - isc__nm_get_netievent_tcpstop(sock->mgr, sock); - isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)ievent); + if (tid == 0) { + isc__nm_process_ievent(csock->worker, + (isc__netievent_t *)ievent); + } else { + isc__nm_enqueue_ievent(csock->worker, + (isc__netievent_t *)ievent); + } } isc_result_t isc_nm_listentcp(isc_nm_t *mgr, uint32_t workers, isc_sockaddr_t *iface, isc_nm_accept_cb_t accept_cb, void *accept_cbarg, int backlog, isc_quota_t *quota, isc_nmsocket_t **sockp) { - isc_result_t result = ISC_R_SUCCESS; isc_nmsocket_t *sock = NULL; size_t children_size = 0; uv_os_sock_t fd = -1; + isc_result_t result = ISC_R_UNSET; + isc__networker_t *worker = &mgr->workers[0]; REQUIRE(VALID_NM(mgr)); + REQUIRE(isc_tid() == 0); - sock = isc_mem_get(mgr->mctx, sizeof(*sock)); - isc__nmsocket_init(sock, mgr, isc_nm_tcplistener, iface); + if (workers == 0) { + workers = mgr->nloops; + } + REQUIRE(workers <= mgr->nloops); + + sock = isc_mem_get(worker->mctx, sizeof(*sock)); + isc__nmsocket_init(sock, worker, isc_nm_tcplistener, iface); atomic_init(&sock->rchildren, 0); - sock->nchildren = (workers == ISC_NM_LISTEN_ALL) - ? (uint32_t)mgr->nworkers - : workers; + sock->nchildren = (workers == ISC_NM_LISTEN_ALL) ? (uint32_t)mgr->nloops + : workers; children_size = sock->nchildren * sizeof(sock->children[0]); - sock->children = isc_mem_get(mgr->mctx, children_size); + sock->children = isc_mem_get(worker->mctx, children_size); memset(sock->children, 0, children_size); - sock->result = ISC_R_UNSET; + isc_barrier_init(&sock->barrier, sock->nchildren); sock->accept_cb = accept_cb; sock->accept_cbarg = accept_cbarg; sock->backlog = backlog; sock->pquota = quota; - sock->tid = 0; - sock->fd = -1; - if (!mgr->load_balance_sockets) { fd = isc__nm_tcp_lb_socket(mgr, iface->type.sa.sa_family); } - isc_barrier_init(&sock->startlistening, sock->nchildren); - - for (size_t i = 0; i < sock->nchildren; i++) { - if ((int)i == isc_nm_tid()) { - continue; - } + for (size_t i = 1; i < sock->nchildren; i++) { start_tcp_child(mgr, iface, sock, fd, i); } - if (isc__nm_in_netthread()) { - start_tcp_child(mgr, iface, sock, fd, isc_nm_tid()); - } + start_tcp_child(mgr, iface, sock, fd, 0); if (!mgr->load_balance_sockets) { isc__nm_closesocket(fd); } LOCK(&sock->lock); - while (atomic_load(&sock->rchildren) != sock->nchildren) { - WAIT(&sock->cond, &sock->lock); - } result = sock->result; - atomic_store(&sock->active, true); UNLOCK(&sock->lock); - INSIST(result != ISC_R_UNSET); - if (result == ISC_R_SUCCESS) { - REQUIRE(atomic_load(&sock->rchildren) == sock->nchildren); - *sockp = sock; - } else { + atomic_store(&sock->active, true); + + if (result != ISC_R_SUCCESS) { atomic_store(&sock->active, false); - enqueue_stoplistening(sock); + isc__nm_tcp_stoplistening(sock); isc_nmsocket_close(&sock); + + return (result); } - return (result); + REQUIRE(atomic_load(&sock->rchildren) == sock->nchildren); + *sockp = sock; + return (ISC_R_SUCCESS); } void @@ -519,37 +483,32 @@ isc__nm_async_tcplisten(isc__networker_t *worker, isc__netievent_t *ev0) { int r; int flags = 0; isc_nmsocket_t *sock = NULL; - isc_result_t result; - isc_nm_t *mgr; + isc_result_t result = ISC_R_UNSET; REQUIRE(VALID_NMSOCK(ievent->sock)); - REQUIRE(ievent->sock->tid == isc_nm_tid()); + REQUIRE(ievent->sock->tid == isc_tid()); REQUIRE(VALID_NMSOCK(ievent->sock->parent)); sock = ievent->sock; sa_family = sock->iface.type.sa.sa_family; - mgr = sock->mgr; REQUIRE(sock->type == isc_nm_tcpsocket); REQUIRE(sock->parent != NULL); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); (void)isc__nm_socket_min_mtu(sock->fd, sa_family); (void)isc__nm_socket_tcp_maxseg(sock->fd, NM_MAXSEG); - r = uv_tcp_init(&worker->loop, &sock->uv_handle.tcp); + r = uv_tcp_init(&worker->loop->loop, &sock->uv_handle.tcp); UV_RUNTIME_CHECK(uv_tcp_init, r); - uv_handle_set_data(&sock->uv_handle.handle, sock); /* This keeps the socket alive after everything else is gone */ isc__nmsocket_attach(sock, &(isc_nmsocket_t *){ NULL }); - r = uv_timer_init(&worker->loop, &sock->read_timer); + r = uv_timer_init(&worker->loop->loop, &sock->read_timer); UV_RUNTIME_CHECK(uv_timer_init, r); uv_handle_set_data((uv_handle_t *)&sock->read_timer, sock); - LOCK(&sock->parent->lock); - r = uv_tcp_open(&sock->uv_handle.tcp, sock->fd); if (r < 0) { isc__nm_closesocket(sock->fd); @@ -562,7 +521,7 @@ isc__nm_async_tcplisten(isc__networker_t *worker, isc__netievent_t *ev0) { flags = UV_TCP_IPV6ONLY; } - if (mgr->load_balance_sockets) { + if (sock->worker->netmgr->load_balance_sockets) { r = isc__nm_tcp_freebind(&sock->uv_handle.tcp, &sock->iface.type.sa, flags); if (r < 0) { @@ -570,11 +529,13 @@ isc__nm_async_tcplisten(isc__networker_t *worker, isc__netievent_t *ev0) { goto done; } } else { + LOCK(&sock->parent->lock); if (sock->parent->fd == -1) { r = isc__nm_tcp_freebind(&sock->uv_handle.tcp, &sock->iface.type.sa, flags); if (r < 0) { isc__nm_incstats(sock, STATID_BINDFAIL); + UNLOCK(&sock->parent->lock); goto done; } sock->parent->uv_handle.tcp.flags = @@ -585,9 +546,11 @@ isc__nm_async_tcplisten(isc__networker_t *worker, isc__netievent_t *ev0) { sock->uv_handle.tcp.flags = sock->parent->uv_handle.tcp.flags; } + UNLOCK(&sock->parent->lock); } - isc__nm_set_network_buffers(sock->mgr, &sock->uv_handle.handle); + isc__nm_set_network_buffers(sock->worker->netmgr, + &sock->uv_handle.handle); /* * The callback will run in the same thread uv_listen() was called @@ -608,18 +571,22 @@ isc__nm_async_tcplisten(isc__networker_t *worker, isc__netievent_t *ev0) { done: result = isc_uverr2result(r); + atomic_fetch_add(&sock->parent->rchildren, 1); + if (result != ISC_R_SUCCESS) { sock->pquota = NULL; } - atomic_fetch_add(&sock->parent->rchildren, 1); + LOCK(&sock->parent->lock); if (sock->parent->result == ISC_R_UNSET) { sock->parent->result = result; + } else { + REQUIRE(sock->parent->result == result); } - SIGNAL(&sock->parent->cond); UNLOCK(&sock->parent->lock); - isc_barrier_wait(&sock->parent->startlistening); + REQUIRE(!worker->loop->paused); + isc_barrier_wait(&sock->parent->barrier); } static void @@ -634,7 +601,7 @@ tcp_connection_cb(uv_stream_t *server, int status) { } REQUIRE(VALID_NMSOCK(ssock)); - REQUIRE(ssock->tid == isc_nm_tid()); + REQUIRE(ssock->tid == isc_tid()); if (isc__nmsocket_closing(ssock)) { result = ISC_R_CANCELED; @@ -655,21 +622,50 @@ done: isc__nm_accept_connection_log(result, can_log_tcp_quota()); } +static void +stop_tcp_child(isc_nmsocket_t *sock, uint32_t tid) { + isc_nmsocket_t *csock = NULL; + isc__netievent_tcpstop_t *ievent = NULL; + + csock = &sock->children[tid]; + REQUIRE(VALID_NMSOCK(csock)); + + atomic_store(&csock->active, false); + ievent = isc__nm_get_netievent_tcpstop(csock->worker, csock); + + if (tid == 0) { + isc__nm_process_ievent(csock->worker, + (isc__netievent_t *)ievent); + } else { + isc__nm_enqueue_ievent(csock->worker, + (isc__netievent_t *)ievent); + } +} + +static void +stop_tcp_parent(isc_nmsocket_t *sock) { + /* Stop the parent */ + atomic_store(&sock->closed, true); + isc__nmsocket_prep_destroy(sock); +} + void isc__nm_tcp_stoplistening(isc_nmsocket_t *sock) { REQUIRE(VALID_NMSOCK(sock)); REQUIRE(sock->type == isc_nm_tcplistener); + REQUIRE(sock->tid == isc_tid()); + REQUIRE(sock->tid == 0); - if (!atomic_compare_exchange_strong(&sock->closing, &(bool){ false }, - true)) { - UNREACHABLE(); + RUNTIME_CHECK(atomic_compare_exchange_strong(&sock->closing, + &(bool){ false }, true)); + + for (size_t i = 1; i < sock->nchildren; i++) { + stop_tcp_child(sock, i); } - if (!isc__nm_in_netthread()) { - enqueue_stoplistening(sock); - } else { - stop_tcp_parent(sock); - } + stop_tcp_child(sock, 0); + + stop_tcp_parent(sock); } void @@ -680,14 +676,19 @@ isc__nm_async_tcpstop(isc__networker_t *worker, isc__netievent_t *ev0) { UNUSED(worker); REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); + REQUIRE(sock->parent != NULL); + REQUIRE(sock->type == isc_nm_tcpsocket); - if (sock->parent != NULL) { - stop_tcp_child(sock); - return; - } + RUNTIME_CHECK(atomic_compare_exchange_strong(&sock->closing, + &(bool){ false }, true)); - stop_tcp_parent(sock); + tcp_close_direct(sock); + + (void)atomic_fetch_sub(&sock->parent->rchildren, 1); + + REQUIRE(!worker->loop->paused); + isc_barrier_wait(&sock->parent->barrier); } void @@ -728,6 +729,7 @@ isc__nm_tcp_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg) { isc_nmsocket_t *sock = handle->sock; isc__netievent_tcpstartread_t *ievent = NULL; + isc_nm_t *netmgr = sock->worker->netmgr; REQUIRE(sock->type == isc_nm_tcpsocket); REQUIRE(sock->statichandle == handle); @@ -736,13 +738,12 @@ isc__nm_tcp_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg) { sock->recv_cbarg = cbarg; sock->recv_read = true; if (sock->read_timeout == 0) { - sock->read_timeout = - (atomic_load(&sock->keepalive) - ? atomic_load(&sock->mgr->keepalive) - : atomic_load(&sock->mgr->idle)); + sock->read_timeout = (atomic_load(&sock->keepalive) + ? atomic_load(&netmgr->keepalive) + : atomic_load(&netmgr->idle)); } - ievent = isc__nm_get_netievent_tcpstartread(sock->mgr, sock); + ievent = isc__nm_get_netievent_tcpstartread(sock->worker, sock); /* * This MUST be done asynchronously, no matter which thread we're @@ -750,8 +751,7 @@ isc__nm_tcp_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg) { * isc_nm_read() again; if we tried to do that synchronously * we'd clash in processbuffer() and grow the stack indefinitely. */ - isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)ievent); + isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent); return; } @@ -764,7 +764,7 @@ isc__nm_async_tcpstartread(isc__networker_t *worker, isc__netievent_t *ev0) { isc_result_t result; REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); UNUSED(worker); if (isc__nmsocket_closing(sock)) { @@ -798,10 +798,9 @@ isc__nm_tcp_pauseread(isc_nmhandle_t *handle) { return; } - ievent = isc__nm_get_netievent_tcppauseread(sock->mgr, sock); + ievent = isc__nm_get_netievent_tcppauseread(sock->worker, sock); - isc__nm_maybe_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)ievent); + isc__nm_maybe_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent); return; } @@ -813,7 +812,7 @@ isc__nm_async_tcppauseread(isc__networker_t *worker, isc__netievent_t *ev0) { isc_nmsocket_t *sock = ievent->sock; REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); UNUSED(worker); isc__nmsocket_timer_stop(sock); @@ -828,7 +827,7 @@ isc__nm_tcp_resumeread(isc_nmhandle_t *handle) { isc__netievent_tcpstartread_t *ievent = NULL; isc_nmsocket_t *sock = handle->sock; - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); if (sock->recv_cb == NULL) { /* We are no longer reading */ @@ -846,22 +845,24 @@ isc__nm_tcp_resumeread(isc_nmhandle_t *handle) { return; } - ievent = isc__nm_get_netievent_tcpstartread(sock->mgr, sock); + ievent = isc__nm_get_netievent_tcpstartread(sock->worker, sock); - isc__nm_maybe_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)ievent); + isc__nm_maybe_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent); } void isc__nm_tcp_read_cb(uv_stream_t *stream, ssize_t nread, const uv_buf_t *buf) { isc_nmsocket_t *sock = uv_handle_get_data((uv_handle_t *)stream); isc__nm_uvreq_t *req = NULL; + isc_nm_t *netmgr = NULL; REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); REQUIRE(atomic_load(&sock->reading)); REQUIRE(buf != NULL); + netmgr = sock->worker->netmgr; + if (isc__nmsocket_closing(sock)) { isc__nm_tcp_failed_read_cb(sock, ISC_R_CANCELED); goto free; @@ -888,10 +889,9 @@ isc__nm_tcp_read_cb(uv_stream_t *stream, ssize_t nread, const uv_buf_t *buf) { req->uvbuf.len = nread; if (!atomic_load(&sock->client)) { - sock->read_timeout = - (atomic_load(&sock->keepalive) - ? atomic_load(&sock->mgr->keepalive) - : atomic_load(&sock->mgr->idle)); + sock->read_timeout = (atomic_load(&sock->keepalive) + ? atomic_load(&netmgr->keepalive) + : atomic_load(&netmgr->idle)); } isc__nm_readcb(sock, req, ISC_R_SUCCESS); @@ -925,9 +925,8 @@ quota_accept_cb(isc_quota_t *quota, void *sock0) { /* * Create a tcpaccept event and pass it using the async channel. */ - ievent = isc__nm_get_netievent_tcpaccept(sock->mgr, sock, quota); - isc__nm_maybe_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)ievent); + ievent = isc__nm_get_netievent_tcpaccept(sock->worker, sock, quota); + isc__nm_maybe_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent); } /* @@ -942,7 +941,7 @@ isc__nm_async_tcpaccept(isc__networker_t *worker, isc__netievent_t *ev0) { UNUSED(worker); REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); result = accept_connection(sock, ievent->quota); isc__nm_accept_connection_log(result, can_log_tcp_quota()); @@ -959,7 +958,7 @@ accept_connection(isc_nmsocket_t *ssock, isc_quota_t *quota) { isc_nmhandle_t *handle = NULL; REQUIRE(VALID_NMSOCK(ssock)); - REQUIRE(ssock->tid == isc_nm_tid()); + REQUIRE(ssock->tid == isc_tid()); if (isc__nmsocket_closing(ssock)) { if (quota != NULL) { @@ -968,22 +967,22 @@ accept_connection(isc_nmsocket_t *ssock, isc_quota_t *quota) { return (ISC_R_CANCELED); } - csock = isc_mem_get(ssock->mgr->mctx, sizeof(isc_nmsocket_t)); - isc__nmsocket_init(csock, ssock->mgr, isc_nm_tcpsocket, &ssock->iface); - csock->tid = ssock->tid; + csock = isc_mem_get(ssock->worker->mctx, sizeof(isc_nmsocket_t)); + isc__nmsocket_init(csock, ssock->worker, isc_nm_tcpsocket, + &ssock->iface); isc__nmsocket_attach(ssock, &csock->server); csock->recv_cb = ssock->recv_cb; csock->recv_cbarg = ssock->recv_cbarg; csock->quota = quota; atomic_init(&csock->accepting, true); - worker = &csock->mgr->workers[isc_nm_tid()]; + worker = csock->worker; - r = uv_tcp_init(&worker->loop, &csock->uv_handle.tcp); + r = uv_tcp_init(&worker->loop->loop, &csock->uv_handle.tcp); UV_RUNTIME_CHECK(uv_tcp_init, r); uv_handle_set_data(&csock->uv_handle.handle, csock); - r = uv_timer_init(&worker->loop, &csock->read_timer); + r = uv_timer_init(&worker->loop->loop, &csock->read_timer); UV_RUNTIME_CHECK(uv_timer_init, r); uv_handle_set_data((uv_handle_t *)&csock->read_timer, csock); @@ -1030,7 +1029,7 @@ accept_connection(isc_nmsocket_t *ssock, isc_quota_t *quota) { isc__nm_incstats(csock, STATID_ACCEPT); - csock->read_timeout = atomic_load(&csock->mgr->init); + csock->read_timeout = atomic_load(&csock->worker->netmgr->init); atomic_fetch_add(&ssock->parent->active_child_connections, 1); @@ -1071,7 +1070,7 @@ isc__nm_tcp_send(isc_nmhandle_t *handle, const isc_region_t *region, REQUIRE(sock->type == isc_nm_tcpsocket); - uvreq = isc__nm_uvreq_get(sock->mgr, sock); + uvreq = isc__nm_uvreq_get(sock->worker, sock); uvreq->uvbuf.base = (char *)region->base; uvreq->uvbuf.len = region->length; @@ -1080,9 +1079,8 @@ isc__nm_tcp_send(isc_nmhandle_t *handle, const isc_region_t *region, uvreq->cb.send = cb; uvreq->cbarg = cbarg; - ievent = isc__nm_get_netievent_tcpsend(sock->mgr, sock, uvreq); - isc__nm_maybe_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)ievent); + ievent = isc__nm_get_netievent_tcpsend(sock->worker, sock, uvreq); + isc__nm_maybe_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent); return; } @@ -1118,16 +1116,16 @@ isc__nm_async_tcpsend(isc__networker_t *worker, isc__netievent_t *ev0) { isc__netievent_tcpsend_t *ievent = (isc__netievent_tcpsend_t *)ev0; isc_nmsocket_t *sock = ievent->sock; isc__nm_uvreq_t *uvreq = ievent->req; + isc_nm_t *netmgr = sock->worker->netmgr; REQUIRE(sock->type == isc_nm_tcpsocket); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); UNUSED(worker); if (sock->write_timeout == 0) { - sock->write_timeout = - (atomic_load(&sock->keepalive) - ? atomic_load(&sock->mgr->keepalive) - : atomic_load(&sock->mgr->idle)); + sock->write_timeout = (atomic_load(&sock->keepalive) + ? atomic_load(&netmgr->keepalive) + : atomic_load(&netmgr->idle)); } result = tcp_send_direct(sock, uvreq); @@ -1141,7 +1139,7 @@ static isc_result_t tcp_send_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) { REQUIRE(VALID_NMSOCK(sock)); REQUIRE(VALID_UVREQ(req)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); REQUIRE(sock->type == isc_nm_tcpsocket); int r; @@ -1171,8 +1169,9 @@ tcp_stop_cb(uv_handle_t *handle) { uv_handle_set_data(handle, NULL); REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); REQUIRE(atomic_load(&sock->closing)); + REQUIRE(sock->type == isc_nm_tcpsocket); if (!atomic_compare_exchange_strong(&sock->closed, &(bool){ false }, true)) { @@ -1189,7 +1188,7 @@ tcp_stop_cb(uv_handle_t *handle) { static void tcp_close_sock(isc_nmsocket_t *sock) { REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); REQUIRE(atomic_load(&sock->closing)); if (!atomic_compare_exchange_strong(&sock->closed, &(bool){ false }, @@ -1230,60 +1229,10 @@ read_timer_close_cb(uv_handle_t *handle) { } } -static void -stop_tcp_child(isc_nmsocket_t *sock) { - REQUIRE(sock->type == isc_nm_tcpsocket); - REQUIRE(sock->tid == isc_nm_tid()); - - if (!atomic_compare_exchange_strong(&sock->closing, &(bool){ false }, - true)) { - return; - } - - tcp_close_direct(sock); - - atomic_fetch_sub(&sock->parent->rchildren, 1); - - isc_barrier_wait(&sock->parent->stoplistening); -} - -static void -stop_tcp_parent(isc_nmsocket_t *sock) { - isc_nmsocket_t *csock = NULL; - - REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); - REQUIRE(sock->type == isc_nm_tcplistener); - - isc_barrier_init(&sock->stoplistening, sock->nchildren); - - for (size_t i = 0; i < sock->nchildren; i++) { - csock = &sock->children[i]; - REQUIRE(VALID_NMSOCK(csock)); - - if ((int)i == isc_nm_tid()) { - /* - * We need to schedule closing the other sockets first - */ - continue; - } - - atomic_store(&csock->active, false); - enqueue_stoplistening(csock); - } - - csock = &sock->children[isc_nm_tid()]; - atomic_store(&csock->active, false); - stop_tcp_child(csock); - - atomic_store(&sock->closed, true); - isc__nmsocket_prep_destroy(sock); -} - static void tcp_close_direct(isc_nmsocket_t *sock) { REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); REQUIRE(atomic_load(&sock->closing)); if (sock->server != NULL) { @@ -1318,16 +1267,16 @@ isc__nm_tcp_close(isc_nmsocket_t *sock) { return; } - if (sock->tid == isc_nm_tid()) { + if (sock->tid == isc_tid()) { tcp_close_direct(sock); } else { /* * We need to create an event and pass it using async channel */ isc__netievent_tcpclose_t *ievent = - isc__nm_get_netievent_tcpclose(sock->mgr, sock); + isc__nm_get_netievent_tcpclose(sock->worker, sock); - isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], + isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent); } } @@ -1338,7 +1287,7 @@ isc__nm_async_tcpclose(isc__networker_t *worker, isc__netievent_t *ev0) { isc_nmsocket_t *sock = ievent->sock; REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); UNUSED(worker); @@ -1351,8 +1300,7 @@ tcp_close_connect_cb(uv_handle_t *handle) { REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(isc__nm_in_netthread()); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); isc__nmsocket_prep_destroy(sock); isc__nmsocket_detach(&sock); @@ -1360,10 +1308,14 @@ tcp_close_connect_cb(uv_handle_t *handle) { void isc__nm_tcp_shutdown(isc_nmsocket_t *sock) { + isc__networker_t *worker = NULL; + REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); REQUIRE(sock->type == isc_nm_tcpsocket); + worker = sock->worker; + /* * If the socket is active, mark it inactive and * continue. If it isn't active, stop now. @@ -1384,7 +1336,7 @@ isc__nm_tcp_shutdown(isc_nmsocket_t *sock) { } if (sock->statichandle != NULL) { - if (isc__nm_closing(sock)) { + if (isc__nm_closing(worker)) { isc__nm_failed_read_cb(sock, ISC_R_SHUTTINGDOWN, false); } else { isc__nm_failed_read_cb(sock, ISC_R_CANCELED, false); @@ -1412,9 +1364,8 @@ isc__nm_tcp_cancelread(isc_nmhandle_t *handle) { REQUIRE(VALID_NMSOCK(sock)); REQUIRE(sock->type == isc_nm_tcpsocket); - ievent = isc__nm_get_netievent_tcpcancel(sock->mgr, sock, handle); - isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)ievent); + ievent = isc__nm_get_netievent_tcpcancel(sock->worker, sock, handle); + isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent); } void @@ -1423,7 +1374,7 @@ isc__nm_async_tcpcancel(isc__networker_t *worker, isc__netievent_t *ev0) { isc_nmsocket_t *sock = ievent->sock; REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); UNUSED(worker); uv_timer_stop(&sock->read_timer); diff --git a/lib/isc/netmgr/tcpdns.c b/lib/isc/netmgr/tcpdns.c index ce281a118f..203d18f8c4 100644 --- a/lib/isc/netmgr/tcpdns.c +++ b/lib/isc/netmgr/tcpdns.c @@ -72,45 +72,37 @@ accept_connection(isc_nmsocket_t *ssock, isc_quota_t *quota); static void quota_accept_cb(isc_quota_t *quota, void *sock0); -static void -stop_tcpdns_parent(isc_nmsocket_t *sock); -static void -stop_tcpdns_child(isc_nmsocket_t *sock); - static isc_result_t tcpdns_connect_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) { isc__networker_t *worker = NULL; - isc_result_t result = ISC_R_UNSET; int r; REQUIRE(VALID_NMSOCK(sock)); REQUIRE(VALID_UVREQ(req)); - REQUIRE(isc__nm_in_netthread()); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); - worker = &sock->mgr->workers[sock->tid]; + worker = sock->worker; atomic_store(&sock->connecting, true); - r = uv_tcp_init(&worker->loop, &sock->uv_handle.tcp); + r = uv_tcp_init(&worker->loop->loop, &sock->uv_handle.tcp); UV_RUNTIME_CHECK(uv_tcp_init, r); uv_handle_set_data(&sock->uv_handle.handle, sock); - r = uv_timer_init(&worker->loop, &sock->read_timer); + r = uv_timer_init(&worker->loop->loop, &sock->read_timer); UV_RUNTIME_CHECK(uv_timer_init, r); uv_handle_set_data((uv_handle_t *)&sock->read_timer, sock); - if (isc__nm_closing(sock)) { - result = ISC_R_SHUTTINGDOWN; - goto error; + if (isc__nm_closing(worker)) { + return (ISC_R_SHUTTINGDOWN); } r = uv_tcp_open(&sock->uv_handle.tcp, sock->fd); if (r != 0) { isc__nm_closesocket(sock->fd); isc__nm_incstats(sock, STATID_OPENFAIL); - goto done; + return (isc_uverr2result(r)); } isc__nm_incstats(sock, STATID_OPEN); @@ -122,18 +114,19 @@ tcpdns_connect_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) { */ if (r != 0 && r != UV_EINVAL) { isc__nm_incstats(sock, STATID_BINDFAIL); - goto done; + return (isc_uverr2result(r)); } } - isc__nm_set_network_buffers(sock->mgr, &sock->uv_handle.handle); + isc__nm_set_network_buffers(sock->worker->netmgr, + &sock->uv_handle.handle); uv_handle_set_data(&req->uv_req.handle, req); r = uv_tcp_connect(&req->uv_req.connect, &sock->uv_handle.tcp, &req->peer.type.sa, tcpdns_connect_cb); if (r != 0) { isc__nm_incstats(sock, STATID_CONNECTFAIL); - goto done; + return (isc_uverr2result(r)); } uv_handle_set_data((uv_handle_t *)&sock->read_timer, @@ -142,19 +135,7 @@ tcpdns_connect_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) { atomic_store(&sock->connected, true); -done: - result = isc_uverr2result(r); -error: - LOCK(&sock->lock); - sock->result = result; - SIGNAL(&sock->cond); - if (!atomic_load(&sock->active)) { - WAIT(&sock->scond, &sock->lock); - } - INSIST(atomic_load(&sock->active)); - UNLOCK(&sock->lock); - - return (result); + return (ISC_R_SUCCESS); } void @@ -170,7 +151,7 @@ isc__nm_async_tcpdnsconnect(isc__networker_t *worker, isc__netievent_t *ev0) { REQUIRE(VALID_NMSOCK(sock)); REQUIRE(sock->type == isc_nm_tcpdnssocket); REQUIRE(sock->parent == NULL); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); result = tcpdns_connect_direct(sock, req); if (result != ISC_R_SUCCESS) { @@ -192,10 +173,13 @@ tcpdns_connect_cb(uv_connect_t *uvreq, int status) { isc__nm_uvreq_t *req = NULL; isc_nmsocket_t *sock = uv_handle_get_data((uv_handle_t *)uvreq->handle); struct sockaddr_storage ss; + isc__networker_t *worker = NULL; int r; REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); + + worker = sock->worker; req = uv_handle_get_data((uv_handle_t *)uvreq); @@ -205,7 +189,7 @@ tcpdns_connect_cb(uv_connect_t *uvreq, int status) { if (atomic_load(&sock->timedout)) { result = ISC_R_TIMEDOUT; goto error; - } else if (isc__nm_closing(sock)) { + } else if (isc__nm_closing(worker)) { /* Network manager shutting down */ result = ISC_R_SHUTTINGDOWN; goto error; @@ -271,6 +255,7 @@ isc_nm_tcpdnsconnect(isc_nm_t *mgr, isc_sockaddr_t *local, isc_sockaddr_t *peer, isc__netievent_tcpdnsconnect_t *ievent = NULL; isc__nm_uvreq_t *req = NULL; sa_family_t sa_family; + isc__networker_t *worker = &mgr->workers[isc_tid()]; REQUIRE(VALID_NM(mgr)); REQUIRE(local != NULL); @@ -278,14 +263,13 @@ isc_nm_tcpdnsconnect(isc_nm_t *mgr, isc_sockaddr_t *local, isc_sockaddr_t *peer, sa_family = peer->type.sa.sa_family; - sock = isc_mem_get(mgr->mctx, sizeof(*sock)); - isc__nmsocket_init(sock, mgr, isc_nm_tcpdnssocket, local); + sock = isc_mem_get(worker->mctx, sizeof(*sock)); + isc__nmsocket_init(sock, worker, isc_nm_tcpdnssocket, local); sock->connect_timeout = timeout; - sock->result = ISC_R_UNSET; atomic_init(&sock->client, true); - req = isc__nm_uvreq_get(mgr, sock); + req = isc__nm_uvreq_get(worker, sock); req->cb.connect = cb; req->cbarg = cbarg; req->peer = *peer; @@ -294,9 +278,6 @@ isc_nm_tcpdnsconnect(isc_nm_t *mgr, isc_sockaddr_t *local, isc_sockaddr_t *peer, result = isc__nm_socket(sa_family, SOCK_STREAM, 0, &sock->fd); if (result != ISC_R_SUCCESS) { - if (isc__nm_in_netthread()) { - sock->tid = isc_nm_tid(); - } isc__nmsocket_clearcb(sock); isc__nm_connectcb(sock, req, result, true); atomic_store(&sock->closed, true); @@ -311,28 +292,13 @@ isc_nm_tcpdnsconnect(isc_nm_t *mgr, isc_sockaddr_t *local, isc_sockaddr_t *peer, result = isc__nm_socket_connectiontimeout(sock->fd, 120 * 1000); RUNTIME_CHECK(result == ISC_R_SUCCESS); - ievent = isc__nm_get_netievent_tcpdnsconnect(mgr, sock, req); + ievent = isc__nm_get_netievent_tcpdnsconnect(sock->worker, sock, req); + + atomic_store(&sock->active, true); + isc__nm_async_tcpdnsconnect(sock->worker, (isc__netievent_t *)ievent); + isc__nm_put_netievent_tcpdnsconnect(sock->worker, ievent); - if (isc__nm_in_netthread()) { - atomic_store(&sock->active, true); - sock->tid = isc_nm_tid(); - isc__nm_async_tcpdnsconnect(&mgr->workers[sock->tid], - (isc__netievent_t *)ievent); - isc__nm_put_netievent_tcpdnsconnect(mgr, ievent); - } else { - atomic_init(&sock->active, false); - sock->tid = isc_random_uniform(mgr->nworkers); - isc__nm_enqueue_ievent(&mgr->workers[sock->tid], - (isc__netievent_t *)ievent); - } - - LOCK(&sock->lock); - while (sock->result == ISC_R_UNSET) { - WAIT(&sock->cond, &sock->lock); - } atomic_store(&sock->active, true); - BROADCAST(&sock->scond); - UNLOCK(&sock->lock); } static uv_os_sock_t @@ -359,28 +325,20 @@ isc__nm_tcpdns_lb_socket(isc_nm_t *mgr, sa_family_t sa_family) { return (sock); } -static void -enqueue_stoplistening(isc_nmsocket_t *sock) { - isc__netievent_tcpdnsstop_t *ievent = - isc__nm_get_netievent_tcpdnsstop(sock->mgr, sock); - isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)ievent); -} - static void start_tcpdns_child(isc_nm_t *mgr, isc_sockaddr_t *iface, isc_nmsocket_t *sock, uv_os_sock_t fd, int tid) { isc__netievent_tcpdnslisten_t *ievent = NULL; isc_nmsocket_t *csock = &sock->children[tid]; + isc__networker_t *worker = &mgr->workers[tid]; - isc__nmsocket_init(csock, mgr, isc_nm_tcpdnssocket, iface); + isc__nmsocket_init(csock, worker, isc_nm_tcpdnssocket, iface); csock->parent = sock; csock->accept_cb = sock->accept_cb; csock->accept_cbarg = sock->accept_cbarg; csock->recv_cb = sock->recv_cb; csock->recv_cbarg = sock->recv_cbarg; csock->backlog = sock->backlog; - csock->tid = tid; /* * We don't attach to quota, just assign - to avoid * increasing quota unnecessarily. @@ -389,7 +347,7 @@ start_tcpdns_child(isc_nm_t *mgr, isc_sockaddr_t *iface, isc_nmsocket_t *sock, isc_quota_cb_init(&csock->quotacb, quota_accept_cb, csock); if (mgr->load_balance_sockets) { - UNUSED(fd); + REQUIRE(fd == -1); csock->fd = isc__nm_tcpdns_lb_socket(mgr, iface->type.sa.sa_family); } else { @@ -397,34 +355,48 @@ start_tcpdns_child(isc_nm_t *mgr, isc_sockaddr_t *iface, isc_nmsocket_t *sock, } REQUIRE(csock->fd >= 0); - ievent = isc__nm_get_netievent_tcpdnslisten(mgr, csock); - isc__nm_maybe_enqueue_ievent(&mgr->workers[tid], - (isc__netievent_t *)ievent); + ievent = isc__nm_get_netievent_tcpdnslisten(csock->worker, csock); + + if (tid == 0) { + isc__nm_process_ievent(csock->worker, + (isc__netievent_t *)ievent); + } else { + isc__nm_enqueue_ievent(csock->worker, + (isc__netievent_t *)ievent); + } } + isc_result_t isc_nm_listentcpdns(isc_nm_t *mgr, uint32_t workers, isc_sockaddr_t *iface, isc_nm_recv_cb_t recv_cb, void *recv_cbarg, isc_nm_accept_cb_t accept_cb, void *accept_cbarg, int backlog, isc_quota_t *quota, isc_nmsocket_t **sockp) { - isc_result_t result = ISC_R_SUCCESS; isc_nmsocket_t *sock = NULL; size_t children_size = 0; uv_os_sock_t fd = -1; + isc_result_t result = ISC_R_UNSET; + isc__networker_t *worker = &mgr->workers[0]; REQUIRE(VALID_NM(mgr)); + REQUIRE(isc_tid() == 0); - sock = isc_mem_get(mgr->mctx, sizeof(*sock)); - isc__nmsocket_init(sock, mgr, isc_nm_tcpdnslistener, iface); + if (workers == 0) { + workers = mgr->nloops; + } + REQUIRE(workers <= mgr->nloops); + + sock = isc_mem_get(worker->mctx, sizeof(*sock)); + isc__nmsocket_init(sock, worker, isc_nm_tcpdnslistener, iface); atomic_init(&sock->rchildren, 0); - sock->nchildren = (workers == ISC_NM_LISTEN_ALL) - ? (uint32_t)mgr->nworkers - : workers; + sock->nchildren = (workers == ISC_NM_LISTEN_ALL) ? (uint32_t)mgr->nloops + : workers; children_size = sock->nchildren * sizeof(sock->children[0]); - sock->children = isc_mem_get(mgr->mctx, children_size); + sock->children = isc_mem_get(worker->mctx, children_size); memset(sock->children, 0, children_size); - sock->result = ISC_R_UNSET; + isc_barrier_init(&sock->barrier, sock->nchildren); + sock->accept_cb = accept_cb; sock->accept_cbarg = accept_cbarg; sock->recv_cb = recv_cb; @@ -432,50 +404,38 @@ isc_nm_listentcpdns(isc_nm_t *mgr, uint32_t workers, isc_sockaddr_t *iface, sock->backlog = backlog; sock->pquota = quota; - sock->tid = 0; - sock->fd = -1; - if (!mgr->load_balance_sockets) { fd = isc__nm_tcpdns_lb_socket(mgr, iface->type.sa.sa_family); } - isc_barrier_init(&sock->startlistening, sock->nchildren); - - for (size_t i = 0; i < sock->nchildren; i++) { - if ((int)i == isc_nm_tid()) { - continue; - } + for (size_t i = 1; i < sock->nchildren; i++) { start_tcpdns_child(mgr, iface, sock, fd, i); } - if (isc__nm_in_netthread()) { - start_tcpdns_child(mgr, iface, sock, fd, isc_nm_tid()); - } + start_tcpdns_child(mgr, iface, sock, fd, 0); if (!mgr->load_balance_sockets) { isc__nm_closesocket(fd); } LOCK(&sock->lock); - while (atomic_load(&sock->rchildren) != sock->nchildren) { - WAIT(&sock->cond, &sock->lock); - } result = sock->result; - atomic_store(&sock->active, true); UNLOCK(&sock->lock); - INSIST(result != ISC_R_UNSET); - if (result == ISC_R_SUCCESS) { - REQUIRE(atomic_load(&sock->rchildren) == sock->nchildren); - *sockp = sock; - } else { + atomic_store(&sock->active, true); + + if (result != ISC_R_SUCCESS) { atomic_store(&sock->active, false); - enqueue_stoplistening(sock); + isc__nm_tcpdns_stoplistening(sock); isc_nmsocket_close(&sock); + + return (result); } - return (result); + REQUIRE(atomic_load(&sock->rchildren) == sock->nchildren); + *sockp = sock; + return (ISC_R_SUCCESS); } void @@ -490,32 +450,30 @@ isc__nm_async_tcpdnslisten(isc__networker_t *worker, isc__netievent_t *ev0) { isc_nm_t *mgr = NULL; REQUIRE(VALID_NMSOCK(ievent->sock)); - REQUIRE(ievent->sock->tid == isc_nm_tid()); + REQUIRE(ievent->sock->tid == isc_tid()); REQUIRE(VALID_NMSOCK(ievent->sock->parent)); sock = ievent->sock; sa_family = sock->iface.type.sa.sa_family; - mgr = sock->mgr; + mgr = sock->worker->netmgr; REQUIRE(sock->type == isc_nm_tcpdnssocket); REQUIRE(sock->parent != NULL); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); (void)isc__nm_socket_min_mtu(sock->fd, sa_family); (void)isc__nm_socket_tcp_maxseg(sock->fd, NM_MAXSEG); - r = uv_tcp_init(&worker->loop, &sock->uv_handle.tcp); + r = uv_tcp_init(&worker->loop->loop, &sock->uv_handle.tcp); UV_RUNTIME_CHECK(uv_tcp_init, r); uv_handle_set_data(&sock->uv_handle.handle, sock); /* This keeps the socket alive after everything else is gone */ isc__nmsocket_attach(sock, &(isc_nmsocket_t *){ NULL }); - r = uv_timer_init(&worker->loop, &sock->read_timer); + r = uv_timer_init(&worker->loop->loop, &sock->read_timer); UV_RUNTIME_CHECK(uv_timer_init, r); uv_handle_set_data((uv_handle_t *)&sock->read_timer, sock); - LOCK(&sock->parent->lock); - r = uv_tcp_open(&sock->uv_handle.tcp, sock->fd); if (r < 0) { isc__nm_closesocket(sock->fd); @@ -536,11 +494,13 @@ isc__nm_async_tcpdnslisten(isc__networker_t *worker, isc__netievent_t *ev0) { goto done; } } else { + LOCK(&sock->parent->lock); if (sock->parent->fd == -1) { r = isc__nm_tcp_freebind(&sock->uv_handle.tcp, &sock->iface.type.sa, flags); if (r < 0) { isc__nm_incstats(sock, STATID_BINDFAIL); + UNLOCK(&sock->parent->lock); goto done; } sock->parent->uv_handle.tcp.flags = @@ -551,9 +511,11 @@ isc__nm_async_tcpdnslisten(isc__networker_t *worker, isc__netievent_t *ev0) { sock->uv_handle.tcp.flags = sock->parent->uv_handle.tcp.flags; } + UNLOCK(&sock->parent->lock); } - isc__nm_set_network_buffers(sock->mgr, &sock->uv_handle.handle); + isc__nm_set_network_buffers(sock->worker->netmgr, + &sock->uv_handle.handle); /* * The callback will run in the same thread uv_listen() was called @@ -574,18 +536,22 @@ isc__nm_async_tcpdnslisten(isc__networker_t *worker, isc__netievent_t *ev0) { done: result = isc_uverr2result(r); + atomic_fetch_add(&sock->parent->rchildren, 1); + if (result != ISC_R_SUCCESS) { sock->pquota = NULL; } - atomic_fetch_add(&sock->parent->rchildren, 1); + LOCK(&sock->parent->lock); if (sock->parent->result == ISC_R_UNSET) { sock->parent->result = result; + } else { + REQUIRE(sock->parent->result == result); } - SIGNAL(&sock->parent->cond); UNLOCK(&sock->parent->lock); - isc_barrier_wait(&sock->parent->startlistening); + REQUIRE(!worker->loop->paused); + isc_barrier_wait(&sock->parent->barrier); } static void @@ -600,7 +566,7 @@ tcpdns_connection_cb(uv_stream_t *server, int status) { } REQUIRE(VALID_NMSOCK(ssock)); - REQUIRE(ssock->tid == isc_nm_tid()); + REQUIRE(ssock->tid == isc_tid()); if (isc__nmsocket_closing(ssock)) { result = ISC_R_CANCELED; @@ -621,21 +587,48 @@ done: isc__nm_accept_connection_log(result, can_log_tcpdns_quota()); } +static void +stop_tcpdns_child(isc_nmsocket_t *sock, uint32_t tid) { + isc_nmsocket_t *csock = NULL; + isc__netievent_tcpstop_t *ievent = NULL; + + csock = &sock->children[tid]; + REQUIRE(VALID_NMSOCK(csock)); + + atomic_store(&csock->active, false); + ievent = isc__nm_get_netievent_tcpdnsstop(csock->worker, csock); + + if (tid == 0) { + isc__nm_process_ievent(csock->worker, + (isc__netievent_t *)ievent); + } else { + isc__nm_enqueue_ievent(csock->worker, + (isc__netievent_t *)ievent); + } +} + +static void +stop_tcpdns_parent(isc_nmsocket_t *sock) { + /* Stop the parent */ + atomic_store(&sock->closed, true); + isc__nmsocket_prep_destroy(sock); +} + void isc__nm_tcpdns_stoplistening(isc_nmsocket_t *sock) { REQUIRE(VALID_NMSOCK(sock)); REQUIRE(sock->type == isc_nm_tcpdnslistener); - if (!atomic_compare_exchange_strong(&sock->closing, &(bool){ false }, - true)) { - UNREACHABLE(); + RUNTIME_CHECK(atomic_compare_exchange_strong(&sock->closing, + &(bool){ false }, true)); + + for (size_t i = 1; i < sock->nchildren; i++) { + stop_tcpdns_child(sock, i); } - if (!isc__nm_in_netthread()) { - enqueue_stoplistening(sock); - } else { - stop_tcpdns_parent(sock); - } + stop_tcpdns_child(sock, 0); + + stop_tcpdns_parent(sock); } void @@ -647,14 +640,18 @@ isc__nm_async_tcpdnsstop(isc__networker_t *worker, isc__netievent_t *ev0) { UNUSED(worker); REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); + REQUIRE(sock->parent != NULL); - if (sock->parent != NULL) { - stop_tcpdns_child(sock); - return; - } + RUNTIME_CHECK(atomic_compare_exchange_strong(&sock->closing, + &(bool){ false }, true)); - stop_tcpdns_parent(sock); + tcpdns_close_direct(sock); + + (void)atomic_fetch_sub(&sock->parent->rchildren, 1); + + REQUIRE(!worker->loop->paused); + isc_barrier_wait(&sock->parent->barrier); } void @@ -695,6 +692,7 @@ isc__nm_tcpdns_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg) { isc_nmsocket_t *sock = handle->sock; isc__netievent_tcpdnsread_t *ievent = NULL; + isc_nm_t *netmgr = sock->worker->netmgr; REQUIRE(sock->type == isc_nm_tcpdnssocket); REQUIRE(sock->statichandle == handle); @@ -703,13 +701,12 @@ isc__nm_tcpdns_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg) { sock->recv_cbarg = cbarg; sock->recv_read = true; if (sock->read_timeout == 0) { - sock->read_timeout = - (atomic_load(&sock->keepalive) - ? atomic_load(&sock->mgr->keepalive) - : atomic_load(&sock->mgr->idle)); + sock->read_timeout = (atomic_load(&sock->keepalive) + ? atomic_load(&netmgr->keepalive) + : atomic_load(&netmgr->idle)); } - ievent = isc__nm_get_netievent_tcpdnsread(sock->mgr, sock); + ievent = isc__nm_get_netievent_tcpdnsread(sock->worker, sock); /* * This MUST be done asynchronously, no matter which thread we're @@ -717,8 +714,7 @@ isc__nm_tcpdns_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg) { * isc_nm_read() again; if we tried to do that synchronously * we'd clash in processbuffer() and grow the stack indefinitely. */ - isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)ievent); + isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent); return; } @@ -733,7 +729,7 @@ isc__nm_async_tcpdnsread(isc__networker_t *worker, isc__netievent_t *ev0) { UNUSED(worker); REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); if (isc__nmsocket_closing(sock)) { result = ISC_R_CANCELED; @@ -763,7 +759,7 @@ isc__nm_tcpdns_processbuffer(isc_nmsocket_t *sock) { isc_nmhandle_t *handle = NULL; REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); if (isc__nmsocket_closing(sock)) { return (ISC_R_CANCELED); @@ -857,7 +853,7 @@ isc__nm_tcpdns_read_cb(uv_stream_t *stream, ssize_t nread, isc_result_t result; REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); REQUIRE(atomic_load(&sock->reading)); REQUIRE(buf != NULL); @@ -894,7 +890,7 @@ isc__nm_tcpdns_read_cb(uv_stream_t *stream, ssize_t nread, sock->buf_len += len; if (!atomic_load(&sock->client)) { - sock->read_timeout = atomic_load(&sock->mgr->idle); + sock->read_timeout = atomic_load(&sock->worker->netmgr->idle); } result = isc__nm_process_sock_buffer(sock); @@ -925,9 +921,8 @@ quota_accept_cb(isc_quota_t *quota, void *sock0) { */ isc__netievent_tcpdnsaccept_t *ievent = - isc__nm_get_netievent_tcpdnsaccept(sock->mgr, sock, quota); - isc__nm_maybe_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)ievent); + isc__nm_get_netievent_tcpdnsaccept(sock->worker, sock, quota); + isc__nm_maybe_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent); } /* @@ -942,7 +937,7 @@ isc__nm_async_tcpdnsaccept(isc__networker_t *worker, isc__netievent_t *ev0) { UNUSED(worker); REQUIRE(VALID_NMSOCK(ievent->sock)); - REQUIRE(ievent->sock->tid == isc_nm_tid()); + REQUIRE(ievent->sock->tid == isc_tid()); result = accept_connection(ievent->sock, ievent->quota); isc__nm_accept_connection_log(result, can_log_tcpdns_quota()); @@ -960,7 +955,7 @@ accept_connection(isc_nmsocket_t *ssock, isc_quota_t *quota) { isc_nmhandle_t *handle = NULL; REQUIRE(VALID_NMSOCK(ssock)); - REQUIRE(ssock->tid == isc_nm_tid()); + REQUIRE(ssock->tid == isc_tid()); if (isc__nmsocket_closing(ssock)) { if (quota != NULL) { @@ -971,23 +966,22 @@ accept_connection(isc_nmsocket_t *ssock, isc_quota_t *quota) { REQUIRE(ssock->accept_cb != NULL); - csock = isc_mem_get(ssock->mgr->mctx, sizeof(isc_nmsocket_t)); - isc__nmsocket_init(csock, ssock->mgr, isc_nm_tcpdnssocket, + csock = isc_mem_get(ssock->worker->mctx, sizeof(isc_nmsocket_t)); + isc__nmsocket_init(csock, ssock->worker, isc_nm_tcpdnssocket, &ssock->iface); - csock->tid = ssock->tid; isc__nmsocket_attach(ssock, &csock->server); csock->recv_cb = ssock->recv_cb; csock->recv_cbarg = ssock->recv_cbarg; csock->quota = quota; atomic_init(&csock->accepting, true); - worker = &csock->mgr->workers[csock->tid]; + worker = csock->worker; - r = uv_tcp_init(&worker->loop, &csock->uv_handle.tcp); + r = uv_tcp_init(&worker->loop->loop, &csock->uv_handle.tcp); UV_RUNTIME_CHECK(uv_tcp_init, r); uv_handle_set_data(&csock->uv_handle.handle, csock); - r = uv_timer_init(&worker->loop, &csock->read_timer); + r = uv_timer_init(&worker->loop->loop, &csock->read_timer); UV_RUNTIME_CHECK(uv_timer_init, r); uv_handle_set_data((uv_handle_t *)&csock->read_timer, csock); @@ -1041,7 +1035,7 @@ accept_connection(isc_nmsocket_t *ssock, isc_quota_t *quota) { isc__nm_incstats(csock, STATID_ACCEPT); - csock->read_timeout = atomic_load(&csock->mgr->init); + csock->read_timeout = atomic_load(&csock->worker->netmgr->init); csock->closehandle_cb = isc__nm_resume_processing; @@ -1062,9 +1056,10 @@ accept_connection(isc_nmsocket_t *ssock, isc_quota_t *quota) { * The initial timer has been set, update the read timeout for the next * reads. */ - csock->read_timeout = (atomic_load(&csock->keepalive) - ? atomic_load(&csock->mgr->keepalive) - : atomic_load(&csock->mgr->idle)); + csock->read_timeout = + (atomic_load(&csock->keepalive) + ? atomic_load(&csock->worker->netmgr->keepalive) + : atomic_load(&csock->worker->netmgr->idle)); isc_nmhandle_detach(&handle); @@ -1102,7 +1097,7 @@ isc__nm_tcpdns_send(isc_nmhandle_t *handle, isc_region_t *region, REQUIRE(VALID_NMSOCK(sock)); REQUIRE(sock->type == isc_nm_tcpdnssocket); - uvreq = isc__nm_uvreq_get(sock->mgr, sock); + uvreq = isc__nm_uvreq_get(sock->worker, sock); *(uint16_t *)uvreq->tcplen = htons(region->length); uvreq->uvbuf.base = (char *)region->base; uvreq->uvbuf.len = region->length; @@ -1112,9 +1107,8 @@ isc__nm_tcpdns_send(isc_nmhandle_t *handle, isc_region_t *region, uvreq->cb.send = cb; uvreq->cbarg = cbarg; - ievent = isc__nm_get_netievent_tcpdnssend(sock->mgr, sock, uvreq); - isc__nm_maybe_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)ievent); + ievent = isc__nm_get_netievent_tcpdnssend(sock->worker, sock, uvreq); + isc__nm_maybe_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent); return; } @@ -1158,7 +1152,7 @@ isc__nm_async_tcpdnssend(isc__networker_t *worker, isc__netievent_t *ev0) { REQUIRE(VALID_UVREQ(ievent->req)); REQUIRE(VALID_NMSOCK(ievent->sock)); REQUIRE(ievent->sock->type == isc_nm_tcpdnssocket); - REQUIRE(ievent->sock->tid == isc_nm_tid()); + REQUIRE(ievent->sock->tid == isc_tid()); sock = ievent->sock; uvreq = ievent->req; @@ -1166,8 +1160,8 @@ isc__nm_async_tcpdnssend(isc__networker_t *worker, isc__netievent_t *ev0) { if (sock->write_timeout == 0) { sock->write_timeout = (atomic_load(&sock->keepalive) - ? atomic_load(&sock->mgr->keepalive) - : atomic_load(&sock->mgr->idle)); + ? atomic_load(&sock->worker->netmgr->keepalive) + : atomic_load(&sock->worker->netmgr->idle)); } uv_buf_t bufs[2] = { { .base = uvreq->tcplen, .len = 2 }, @@ -1228,7 +1222,7 @@ tcpdns_stop_cb(uv_handle_t *handle) { isc_nmsocket_t *sock = uv_handle_get_data(handle); REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); REQUIRE(atomic_load(&sock->closing)); uv_handle_set_data(handle, NULL); @@ -1248,7 +1242,7 @@ tcpdns_stop_cb(uv_handle_t *handle) { static void tcpdns_close_sock(isc_nmsocket_t *sock) { REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); REQUIRE(atomic_load(&sock->closing)); if (!atomic_compare_exchange_strong(&sock->closed, &(bool){ false }, @@ -1292,60 +1286,10 @@ read_timer_close_cb(uv_handle_t *timer) { } } -static void -stop_tcpdns_child(isc_nmsocket_t *sock) { - REQUIRE(sock->type == isc_nm_tcpdnssocket); - REQUIRE(sock->tid == isc_nm_tid()); - - if (!atomic_compare_exchange_strong(&sock->closing, &(bool){ false }, - true)) { - return; - } - - tcpdns_close_direct(sock); - - atomic_fetch_sub(&sock->parent->rchildren, 1); - - isc_barrier_wait(&sock->parent->stoplistening); -} - -static void -stop_tcpdns_parent(isc_nmsocket_t *sock) { - isc_nmsocket_t *csock = NULL; - - REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); - REQUIRE(sock->type == isc_nm_tcpdnslistener); - - isc_barrier_init(&sock->stoplistening, sock->nchildren); - - for (size_t i = 0; i < sock->nchildren; i++) { - csock = &sock->children[i]; - REQUIRE(VALID_NMSOCK(csock)); - - if ((int)i == isc_nm_tid()) { - /* - * We need to schedule closing the other sockets first - */ - continue; - } - - atomic_store(&csock->active, false); - enqueue_stoplistening(csock); - } - - csock = &sock->children[isc_nm_tid()]; - atomic_store(&csock->active, false); - stop_tcpdns_child(csock); - - atomic_store(&sock->closed, true); - isc__nmsocket_prep_destroy(sock); -} - static void tcpdns_close_direct(isc_nmsocket_t *sock) { REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); REQUIRE(atomic_load(&sock->closing)); if (sock->quota != NULL) { @@ -1374,16 +1318,16 @@ isc__nm_tcpdns_close(isc_nmsocket_t *sock) { return; } - if (sock->tid == isc_nm_tid()) { + if (sock->tid == isc_tid()) { tcpdns_close_direct(sock); } else { /* * We need to create an event and pass it using async channel */ isc__netievent_tcpdnsclose_t *ievent = - isc__nm_get_netievent_tcpdnsclose(sock->mgr, sock); + isc__nm_get_netievent_tcpdnsclose(sock->worker, sock); - isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], + isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent); } } @@ -1397,7 +1341,7 @@ isc__nm_async_tcpdnsclose(isc__networker_t *worker, isc__netievent_t *ev0) { UNUSED(worker); REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); tcpdns_close_direct(sock); } @@ -1408,8 +1352,7 @@ tcpdns_close_connect_cb(uv_handle_t *handle) { REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(isc__nm_in_netthread()); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); isc__nmsocket_prep_destroy(sock); isc__nmsocket_detach(&sock); @@ -1417,10 +1360,14 @@ tcpdns_close_connect_cb(uv_handle_t *handle) { void isc__nm_tcpdns_shutdown(isc_nmsocket_t *sock) { + isc__networker_t *worker = NULL; + REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); REQUIRE(sock->type == isc_nm_tcpdnssocket); + worker = sock->worker; + /* * If the socket is active, mark it inactive and * continue. If it isn't active, stop now. @@ -1441,7 +1388,7 @@ isc__nm_tcpdns_shutdown(isc_nmsocket_t *sock) { } if (sock->statichandle != NULL) { - if (isc__nm_closing(sock)) { + if (isc__nm_closing(worker)) { isc__nm_failed_read_cb(sock, ISC_R_SHUTTINGDOWN, false); } else { isc__nm_failed_read_cb(sock, ISC_R_CANCELED, false); @@ -1469,9 +1416,8 @@ isc__nm_tcpdns_cancelread(isc_nmhandle_t *handle) { REQUIRE(VALID_NMSOCK(sock)); REQUIRE(sock->type == isc_nm_tcpdnssocket); - ievent = isc__nm_get_netievent_tcpdnscancel(sock->mgr, sock, handle); - isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)ievent); + ievent = isc__nm_get_netievent_tcpdnscancel(sock->worker, sock, handle); + isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent); } void @@ -1483,7 +1429,7 @@ isc__nm_async_tcpdnscancel(isc__networker_t *worker, isc__netievent_t *ev0) { UNUSED(worker); REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); isc__nm_failed_read_cb(sock, ISC_R_EOF, false); } diff --git a/lib/isc/netmgr/timer.c b/lib/isc/netmgr/timer.c index 7cfa433f05..0e21382ace 100644 --- a/lib/isc/netmgr/timer.c +++ b/lib/isc/netmgr/timer.c @@ -33,22 +33,20 @@ isc_nm_timer_create(isc_nmhandle_t *handle, isc_nm_timer_cb cb, void *cbarg, isc_nm_timer_t *timer = NULL; int r; - REQUIRE(isc__nm_in_netthread()); REQUIRE(VALID_NMHANDLE(handle)); REQUIRE(VALID_NMSOCK(handle->sock)); sock = handle->sock; - worker = &sock->mgr->workers[isc_nm_tid()]; + worker = sock->worker; /* TODO: per-loop object cache */ - timer = isc_mem_get(sock->mgr->mctx, sizeof(*timer)); + timer = isc_mem_get(worker->mctx, sizeof(*timer)); *timer = (isc_nm_timer_t){ .cb = cb, .cbarg = cbarg }; isc_refcount_init(&timer->references, 1); isc_nmhandle_attach(handle, &timer->handle); - r = uv_timer_init(&worker->loop, &timer->timer); + r = uv_timer_init(&worker->loop->loop, &timer->timer); UV_RUNTIME_CHECK(uv_timer_init, r); - uv_handle_set_data((uv_handle_t *)&timer->timer, timer); *timerp = timer; @@ -67,7 +65,7 @@ static void timer_destroy(uv_handle_t *uvhandle) { isc_nm_timer_t *timer = uv_handle_get_data(uvhandle); isc_nmhandle_t *handle = timer->handle; - isc_mem_t *mctx = timer->handle->sock->mgr->mctx; + isc_mem_t *mctx = timer->handle->sock->worker->mctx; isc_mem_put(mctx, timer, sizeof(*timer)); @@ -86,7 +84,6 @@ isc_nm_timer_detach(isc_nm_timer_t **timerp) { handle = timer->handle; - REQUIRE(isc__nm_in_netthread()); REQUIRE(VALID_NMHANDLE(handle)); REQUIRE(VALID_NMSOCK(handle->sock)); diff --git a/lib/isc/netmgr/tlsdns.c b/lib/isc/netmgr/tlsdns.c index 73f153bb9c..9bd88e0fe7 100644 --- a/lib/isc/netmgr/tlsdns.c +++ b/lib/isc/netmgr/tlsdns.c @@ -65,11 +65,6 @@ accept_connection(isc_nmsocket_t *ssock, isc_quota_t *quota); static void quota_accept_cb(isc_quota_t *quota, void *sock0); -static void -stop_tlsdns_parent(isc_nmsocket_t *sock); -static void -stop_tlsdns_child(isc_nmsocket_t *sock); - static void async_tlsdns_cycle(isc_nmsocket_t *sock) __attribute__((unused)); @@ -120,10 +115,9 @@ tlsdns_connect_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) { REQUIRE(VALID_NMSOCK(sock)); REQUIRE(VALID_UVREQ(req)); - REQUIRE(isc__nm_in_netthread()); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); - worker = &sock->mgr->workers[sock->tid]; + worker = sock->worker; atomic_store(&sock->connecting, true); @@ -131,24 +125,23 @@ tlsdns_connect_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) { result = isc__nm_socket_connectiontimeout(sock->fd, 120 * 1000); RUNTIME_CHECK(result == ISC_R_SUCCESS); - r = uv_tcp_init(&worker->loop, &sock->uv_handle.tcp); + r = uv_tcp_init(&worker->loop->loop, &sock->uv_handle.tcp); UV_RUNTIME_CHECK(uv_tcp_init, r); uv_handle_set_data(&sock->uv_handle.handle, sock); - r = uv_timer_init(&worker->loop, &sock->read_timer); + r = uv_timer_init(&worker->loop->loop, &sock->read_timer); UV_RUNTIME_CHECK(uv_timer_init, r); uv_handle_set_data((uv_handle_t *)&sock->read_timer, sock); - if (isc__nm_closing(sock)) { - result = ISC_R_SHUTTINGDOWN; - goto error; + if (isc__nm_closing(worker)) { + return (ISC_R_SHUTTINGDOWN); } r = uv_tcp_open(&sock->uv_handle.tcp, sock->fd); if (r != 0) { isc__nm_closesocket(sock->fd); isc__nm_incstats(sock, STATID_OPENFAIL); - goto done; + return (isc_uverr2result(r)); } isc__nm_incstats(sock, STATID_OPEN); @@ -160,18 +153,19 @@ tlsdns_connect_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) { */ if (r != 0 && r != UV_EINVAL) { isc__nm_incstats(sock, STATID_BINDFAIL); - goto done; + return (isc_uverr2result(r)); } } - isc__nm_set_network_buffers(sock->mgr, &sock->uv_handle.handle); + isc__nm_set_network_buffers(sock->worker->netmgr, + &sock->uv_handle.handle); uv_handle_set_data(&req->uv_req.handle, req); r = uv_tcp_connect(&req->uv_req.connect, &sock->uv_handle.tcp, &req->peer.type.sa, tlsdns_connect_cb); if (r != 0) { isc__nm_incstats(sock, STATID_CONNECTFAIL); - goto done; + return (isc_uverr2result(r)); } uv_handle_set_data((uv_handle_t *)&sock->read_timer, @@ -180,19 +174,7 @@ tlsdns_connect_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) { atomic_store(&sock->connected, true); -done: - result = isc_uverr2result(r); -error: - LOCK(&sock->lock); - sock->result = result; - SIGNAL(&sock->cond); - if (!atomic_load(&sock->active)) { - WAIT(&sock->scond, &sock->lock); - } - INSIST(atomic_load(&sock->active)); - UNLOCK(&sock->lock); - - return (result); + return (ISC_R_SUCCESS); } void @@ -208,7 +190,7 @@ isc__nm_async_tlsdnsconnect(isc__networker_t *worker, isc__netievent_t *ev0) { REQUIRE(VALID_NMSOCK(sock)); REQUIRE(sock->type == isc_nm_tlsdnssocket); REQUIRE(sock->parent == NULL); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); result = tlsdns_connect_direct(sock, req); if (result != ISC_R_SUCCESS) { @@ -232,10 +214,13 @@ tlsdns_connect_cb(uv_connect_t *uvreq, int status) { isc__nm_uvreq_t *req = NULL; isc_nmsocket_t *sock = uv_handle_get_data((uv_handle_t *)uvreq->handle); struct sockaddr_storage ss; + isc__networker_t *worker = NULL; int r; REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); + + worker = sock->worker; req = uv_handle_get_data((uv_handle_t *)uvreq); @@ -245,7 +230,7 @@ tlsdns_connect_cb(uv_connect_t *uvreq, int status) { if (atomic_load(&sock->timedout)) { result = ISC_R_TIMEDOUT; goto error; - } else if (isc__nm_closing(sock)) { + } else if (isc__nm_closing(worker)) { /* Network manager shutting down */ result = ISC_R_SHUTTINGDOWN; goto error; @@ -359,6 +344,7 @@ isc_nm_tlsdnsconnect(isc_nm_t *mgr, isc_sockaddr_t *local, isc_sockaddr_t *peer, isc__netievent_tlsdnsconnect_t *ievent = NULL; isc__nm_uvreq_t *req = NULL; sa_family_t sa_family; + isc__networker_t *worker = &mgr->workers[isc_tid()]; REQUIRE(VALID_NM(mgr)); REQUIRE(local != NULL); @@ -367,16 +353,15 @@ isc_nm_tlsdnsconnect(isc_nm_t *mgr, isc_sockaddr_t *local, isc_sockaddr_t *peer, sa_family = peer->type.sa.sa_family; - sock = isc_mem_get(mgr->mctx, sizeof(*sock)); - isc__nmsocket_init(sock, mgr, isc_nm_tlsdnssocket, local); + sock = isc_mem_get(worker->mctx, sizeof(*sock)); + isc__nmsocket_init(sock, worker, isc_nm_tlsdnssocket, local); sock->connect_timeout = timeout; - sock->result = ISC_R_UNSET; isc_tlsctx_attach(sslctx, &sock->tls.ctx); atomic_init(&sock->client, true); atomic_init(&sock->connecting, true); - req = isc__nm_uvreq_get(mgr, sock); + req = isc__nm_uvreq_get(sock->worker, sock); req->cb.connect = cb; req->cbarg = cbarg; req->peer = *peer; @@ -395,7 +380,7 @@ isc_nm_tlsdnsconnect(isc_nm_t *mgr, isc_sockaddr_t *local, isc_sockaddr_t *peer, goto failure; } - if (isc__nm_closing(sock)) { + if (isc__nm_closing(worker)) { result = ISC_R_SHUTTINGDOWN; goto failure; } @@ -407,34 +392,17 @@ isc_nm_tlsdnsconnect(isc_nm_t *mgr, isc_sockaddr_t *local, isc_sockaddr_t *peer, result = isc__nm_socket_connectiontimeout(sock->fd, 120 * 1000); RUNTIME_CHECK(result == ISC_R_SUCCESS); - ievent = isc__nm_get_netievent_tlsdnsconnect(mgr, sock, req); + ievent = isc__nm_get_netievent_tlsdnsconnect(sock->worker, sock, req); - if (isc__nm_in_netthread()) { - atomic_store(&sock->active, true); - sock->tid = isc_nm_tid(); - isc__nm_async_tlsdnsconnect(&mgr->workers[sock->tid], - (isc__netievent_t *)ievent); - isc__nm_put_netievent_tlsdnsconnect(mgr, ievent); - } else { - atomic_init(&sock->active, false); - sock->tid = isc_random_uniform(mgr->nworkers); - isc__nm_enqueue_ievent(&mgr->workers[sock->tid], - (isc__netievent_t *)ievent); - } - LOCK(&sock->lock); - while (sock->result == ISC_R_UNSET) { - WAIT(&sock->cond, &sock->lock); - } atomic_store(&sock->active, true); - BROADCAST(&sock->scond); - UNLOCK(&sock->lock); + isc__nm_async_tlsdnsconnect(sock->worker, (isc__netievent_t *)ievent); + isc__nm_put_netievent_tlsdnsconnect(sock->worker, ievent); + + atomic_store(&sock->active, true); + return; failure: - if (isc__nm_in_netthread()) { - sock->tid = isc_nm_tid(); - } - atomic_compare_exchange_enforced(&sock->connecting, &(bool){ true }, false); isc__nmsocket_clearcb(sock); @@ -472,15 +440,15 @@ start_tlsdns_child(isc_nm_t *mgr, isc_sockaddr_t *iface, isc_nmsocket_t *sock, uv_os_sock_t fd, int tid) { isc__netievent_tlsdnslisten_t *ievent = NULL; isc_nmsocket_t *csock = &sock->children[tid]; + isc__networker_t *worker = &mgr->workers[tid]; - isc__nmsocket_init(csock, mgr, isc_nm_tlsdnssocket, iface); + isc__nmsocket_init(csock, worker, isc_nm_tlsdnssocket, iface); csock->parent = sock; csock->accept_cb = sock->accept_cb; csock->accept_cbarg = sock->accept_cbarg; csock->recv_cb = sock->recv_cb; csock->recv_cbarg = sock->recv_cbarg; csock->backlog = sock->backlog; - csock->tid = tid; isc_tlsctx_attach(sock->tls.ctx, &csock->tls.ctx); /* @@ -499,17 +467,15 @@ start_tlsdns_child(isc_nm_t *mgr, isc_sockaddr_t *iface, isc_nmsocket_t *sock, } REQUIRE(csock->fd >= 0); - ievent = isc__nm_get_netievent_tlsdnslisten(mgr, csock); - isc__nm_maybe_enqueue_ievent(&mgr->workers[tid], - (isc__netievent_t *)ievent); -} + ievent = isc__nm_get_netievent_tlsdnslisten(csock->worker, csock); -static void -enqueue_stoplistening(isc_nmsocket_t *sock) { - isc__netievent_tlsdnsstop_t *ievent = - isc__nm_get_netievent_tlsdnsstop(sock->mgr, sock); - isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)ievent); + if (tid == 0) { + isc__nm_process_ievent(csock->worker, + (isc__netievent_t *)ievent); + } else { + isc__nm_enqueue_ievent(csock->worker, + (isc__netievent_t *)ievent); + } } isc_result_t @@ -518,25 +484,32 @@ isc_nm_listentlsdns(isc_nm_t *mgr, uint32_t workers, isc_sockaddr_t *iface, isc_nm_accept_cb_t accept_cb, void *accept_cbarg, int backlog, isc_quota_t *quota, isc_tlsctx_t *sslctx, isc_nmsocket_t **sockp) { - isc_result_t result = ISC_R_SUCCESS; isc_nmsocket_t *sock = NULL; size_t children_size = 0; uv_os_sock_t fd = -1; + isc_result_t result = ISC_R_UNSET; + isc__networker_t *worker = &mgr->workers[isc_tid()]; REQUIRE(VALID_NM(mgr)); + REQUIRE(isc_tid() == 0); - sock = isc_mem_get(mgr->mctx, sizeof(*sock)); - isc__nmsocket_init(sock, mgr, isc_nm_tlsdnslistener, iface); + if (workers == 0) { + workers = mgr->nloops; + } + REQUIRE(workers <= mgr->nloops); + + sock = isc_mem_get(worker->mctx, sizeof(*sock)); + isc__nmsocket_init(sock, worker, isc_nm_tlsdnslistener, iface); atomic_init(&sock->rchildren, 0); - sock->nchildren = (workers == ISC_NM_LISTEN_ALL) - ? (uint32_t)mgr->nworkers - : workers; + sock->nchildren = (workers == ISC_NM_LISTEN_ALL) ? (uint32_t)mgr->nloops + : workers; children_size = sock->nchildren * sizeof(sock->children[0]); - sock->children = isc_mem_get(mgr->mctx, children_size); + sock->children = isc_mem_get(worker->mctx, children_size); memset(sock->children, 0, children_size); - sock->result = ISC_R_UNSET; + isc_barrier_init(&sock->barrier, sock->nchildren); + sock->accept_cb = accept_cb; sock->accept_cbarg = accept_cbarg; sock->recv_cb = recv_cb; @@ -546,50 +519,38 @@ isc_nm_listentlsdns(isc_nm_t *mgr, uint32_t workers, isc_sockaddr_t *iface, isc_tlsctx_attach(sslctx, &sock->tls.ctx); - sock->tid = 0; - sock->fd = -1; - if (!mgr->load_balance_sockets) { fd = isc__nm_tlsdns_lb_socket(mgr, iface->type.sa.sa_family); } - isc_barrier_init(&sock->startlistening, sock->nchildren); - - for (size_t i = 0; i < sock->nchildren; i++) { - if ((int)i == isc_nm_tid()) { - continue; - } + for (size_t i = 1; i < sock->nchildren; i++) { start_tlsdns_child(mgr, iface, sock, fd, i); } - if (isc__nm_in_netthread()) { - start_tlsdns_child(mgr, iface, sock, fd, isc_nm_tid()); - } + start_tlsdns_child(mgr, iface, sock, fd, 0); if (!mgr->load_balance_sockets) { isc__nm_closesocket(fd); } LOCK(&sock->lock); - while (atomic_load(&sock->rchildren) != sock->nchildren) { - WAIT(&sock->cond, &sock->lock); - } result = sock->result; - atomic_store(&sock->active, true); UNLOCK(&sock->lock); - INSIST(result != ISC_R_UNSET); - if (result == ISC_R_SUCCESS) { - REQUIRE(atomic_load(&sock->rchildren) == sock->nchildren); - *sockp = sock; - } else { + atomic_store(&sock->active, true); + + if (result != ISC_R_SUCCESS) { atomic_store(&sock->active, false); - enqueue_stoplistening(sock); + isc__nm_tcpdns_stoplistening(sock); isc_nmsocket_close(&sock); + + return (result); } - return (result); + REQUIRE(atomic_load(&sock->rchildren) == sock->nchildren); + *sockp = sock; + return (ISC_R_SUCCESS); } void @@ -601,35 +562,31 @@ isc__nm_async_tlsdnslisten(isc__networker_t *worker, isc__netievent_t *ev0) { int flags = 0; isc_nmsocket_t *sock = NULL; isc_result_t result = ISC_R_UNSET; - isc_nm_t *mgr; REQUIRE(VALID_NMSOCK(ievent->sock)); - REQUIRE(ievent->sock->tid == isc_nm_tid()); + REQUIRE(ievent->sock->tid == isc_tid()); REQUIRE(VALID_NMSOCK(ievent->sock->parent)); sock = ievent->sock; sa_family = sock->iface.type.sa.sa_family; - mgr = sock->mgr; REQUIRE(sock->type == isc_nm_tlsdnssocket); REQUIRE(sock->parent != NULL); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); (void)isc__nm_socket_min_mtu(sock->fd, sa_family); (void)isc__nm_socket_tcp_maxseg(sock->fd, NM_MAXSEG); - r = uv_tcp_init(&worker->loop, &sock->uv_handle.tcp); + r = uv_tcp_init(&worker->loop->loop, &sock->uv_handle.tcp); UV_RUNTIME_CHECK(uv_tcp_init, r); uv_handle_set_data(&sock->uv_handle.handle, sock); /* This keeps the socket alive after everything else is gone */ isc__nmsocket_attach(sock, &(isc_nmsocket_t *){ NULL }); - r = uv_timer_init(&worker->loop, &sock->read_timer); + r = uv_timer_init(&worker->loop->loop, &sock->read_timer); UV_RUNTIME_CHECK(uv_timer_init, r); uv_handle_set_data((uv_handle_t *)&sock->read_timer, sock); - LOCK(&sock->parent->lock); - r = uv_tcp_open(&sock->uv_handle.tcp, sock->fd); if (r < 0) { isc__nm_closesocket(sock->fd); @@ -642,7 +599,7 @@ isc__nm_async_tlsdnslisten(isc__networker_t *worker, isc__netievent_t *ev0) { flags = UV_TCP_IPV6ONLY; } - if (mgr->load_balance_sockets) { + if (sock->worker->netmgr->load_balance_sockets) { r = isc__nm_tcp_freebind(&sock->uv_handle.tcp, &sock->iface.type.sa, flags); if (r < 0) { @@ -650,11 +607,13 @@ isc__nm_async_tlsdnslisten(isc__networker_t *worker, isc__netievent_t *ev0) { goto done; } } else { + LOCK(&sock->parent->lock); if (sock->parent->fd == -1) { r = isc__nm_tcp_freebind(&sock->uv_handle.tcp, &sock->iface.type.sa, flags); if (r < 0) { isc__nm_incstats(sock, STATID_BINDFAIL); + UNLOCK(&sock->parent->lock); goto done; } sock->parent->uv_handle.tcp.flags = @@ -665,9 +624,11 @@ isc__nm_async_tlsdnslisten(isc__networker_t *worker, isc__netievent_t *ev0) { sock->uv_handle.tcp.flags = sock->parent->uv_handle.tcp.flags; } + UNLOCK(&sock->parent->lock); } - isc__nm_set_network_buffers(sock->mgr, &sock->uv_handle.handle); + isc__nm_set_network_buffers(sock->worker->netmgr, + &sock->uv_handle.handle); /* * The callback will run in the same thread uv_listen() was @@ -689,18 +650,22 @@ isc__nm_async_tlsdnslisten(isc__networker_t *worker, isc__netievent_t *ev0) { done: result = isc_uverr2result(r); + atomic_fetch_add(&sock->parent->rchildren, 1); + if (result != ISC_R_SUCCESS) { sock->pquota = NULL; } - atomic_fetch_add(&sock->parent->rchildren, 1); + LOCK(&sock->parent->lock); if (sock->parent->result == ISC_R_UNSET) { sock->parent->result = result; + } else { + REQUIRE(sock->parent->result == result); } - SIGNAL(&sock->parent->cond); UNLOCK(&sock->parent->lock); - isc_barrier_wait(&sock->parent->startlistening); + REQUIRE(!worker->loop->paused); + isc_barrier_wait(&sock->parent->barrier); } static void @@ -715,7 +680,7 @@ tlsdns_connection_cb(uv_stream_t *server, int status) { } REQUIRE(VALID_NMSOCK(ssock)); - REQUIRE(ssock->tid == isc_nm_tid()); + REQUIRE(ssock->tid == isc_tid()); if (isc__nmsocket_closing(ssock)) { result = ISC_R_CANCELED; @@ -736,21 +701,48 @@ done: isc__nm_accept_connection_log(result, can_log_tlsdns_quota()); } +static void +stop_tlsdns_child(isc_nmsocket_t *sock, uint32_t tid) { + isc_nmsocket_t *csock = NULL; + isc__netievent_tcpstop_t *ievent = NULL; + + csock = &sock->children[tid]; + REQUIRE(VALID_NMSOCK(csock)); + + atomic_store(&csock->active, false); + ievent = isc__nm_get_netievent_tlsdnsstop(csock->worker, csock); + + if (tid == 0) { + isc__nm_process_ievent(csock->worker, + (isc__netievent_t *)ievent); + } else { + isc__nm_enqueue_ievent(csock->worker, + (isc__netievent_t *)ievent); + } +} + +static void +stop_tlsdns_parent(isc_nmsocket_t *sock) { + /* Stop the parent */ + atomic_store(&sock->closed, true); + isc__nmsocket_prep_destroy(sock); +} + void isc__nm_tlsdns_stoplistening(isc_nmsocket_t *sock) { REQUIRE(VALID_NMSOCK(sock)); REQUIRE(sock->type == isc_nm_tlsdnslistener); - if (!atomic_compare_exchange_strong(&sock->closing, &(bool){ false }, - true)) { - UNREACHABLE(); + RUNTIME_CHECK(atomic_compare_exchange_strong(&sock->closing, + &(bool){ false }, true)); + + for (size_t i = 1; i < sock->nchildren; i++) { + stop_tlsdns_child(sock, i); } - if (!isc__nm_in_netthread()) { - enqueue_stoplistening(sock); - } else { - stop_tlsdns_parent(sock); - } + stop_tlsdns_child(sock, 0); + + stop_tlsdns_parent(sock); } static void @@ -758,9 +750,31 @@ tls_shutdown(isc_nmsocket_t *sock) { REQUIRE(VALID_NMSOCK(sock)); isc__netievent_tlsdnsshutdown_t *ievent = - isc__nm_get_netievent_tlsdnsshutdown(sock->mgr, sock); - isc__nm_maybe_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)ievent); + isc__nm_get_netievent_tlsdnsshutdown(sock->worker, sock); + isc__nm_maybe_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent); +} + +void +isc__nm_async_tlsdnsstop(isc__networker_t *worker, isc__netievent_t *ev0) { + isc__netievent_tlsdnsstop_t *ievent = + (isc__netievent_tlsdnsstop_t *)ev0; + isc_nmsocket_t *sock = ievent->sock; + + UNUSED(worker); + + REQUIRE(VALID_NMSOCK(sock)); + REQUIRE(sock->tid == isc_tid()); + REQUIRE(sock->parent != NULL); + + RUNTIME_CHECK(atomic_compare_exchange_strong(&sock->closing, + &(bool){ false }, true)); + + tlsdns_close_direct(sock); + + (void)atomic_fetch_sub(&sock->parent->rchildren, 1); + + REQUIRE(!worker->loop->paused); + isc_barrier_wait(&sock->parent->barrier); } void @@ -827,25 +841,6 @@ isc__nm_async_tlsdnsshutdown(isc__networker_t *worker, isc__netievent_t *ev0) { return; } -void -isc__nm_async_tlsdnsstop(isc__networker_t *worker, isc__netievent_t *ev0) { - isc__netievent_tlsdnsstop_t *ievent = - (isc__netievent_tlsdnsstop_t *)ev0; - isc_nmsocket_t *sock = ievent->sock; - - UNUSED(worker); - - REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); - - if (sock->parent != NULL) { - stop_tlsdns_child(sock); - return; - } - - stop_tlsdns_parent(sock); -} - void isc__nm_tlsdns_failed_read_cb(isc_nmsocket_t *sock, isc_result_t result, bool async) { @@ -897,6 +892,7 @@ isc__nm_tlsdns_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg) { isc_nmsocket_t *sock = handle->sock; isc__netievent_tlsdnsread_t *ievent = NULL; + isc_nm_t *netmgr = sock->worker->netmgr; REQUIRE(sock->type == isc_nm_tlsdnssocket); REQUIRE(sock->statichandle == handle); @@ -905,13 +901,12 @@ isc__nm_tlsdns_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg) { sock->recv_cbarg = cbarg; sock->recv_read = true; if (sock->read_timeout == 0) { - sock->read_timeout = - (atomic_load(&sock->keepalive) - ? atomic_load(&sock->mgr->keepalive) - : atomic_load(&sock->mgr->idle)); + sock->read_timeout = (atomic_load(&sock->keepalive) + ? atomic_load(&netmgr->keepalive) + : atomic_load(&netmgr->idle)); } - ievent = isc__nm_get_netievent_tlsdnsread(sock->mgr, sock); + ievent = isc__nm_get_netievent_tlsdnsread(sock->worker, sock); /* * This MUST be done asynchronously, no matter which thread @@ -920,8 +915,7 @@ isc__nm_tlsdns_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg) { * we'd clash in processbuffer() and grow the stack * indefinitely. */ - isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)ievent); + isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent); return; } @@ -936,7 +930,7 @@ isc__nm_async_tlsdnsread(isc__networker_t *worker, isc__netievent_t *ev0) { UNUSED(worker); REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); if (isc__nmsocket_closing(sock)) { atomic_store(&sock->reading, true); @@ -966,7 +960,7 @@ isc__nm_tlsdns_processbuffer(isc_nmsocket_t *sock) { isc_nmhandle_t *handle = NULL; REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); if (isc__nmsocket_closing(sock)) { return (ISC_R_CANCELED); @@ -1248,7 +1242,7 @@ free_senddata(isc_nmsocket_t *sock, const isc_result_t result) { REQUIRE(sock->tls.senddata.base != NULL); REQUIRE(sock->tls.senddata.length > 0); - isc_mem_put(sock->mgr->mctx, sock->tls.senddata.base, + isc_mem_put(sock->worker->mctx, sock->tls.senddata.base, sock->tls.senddata.length); sock->tls.senddata.base = NULL; sock->tls.senddata.length = 0; @@ -1271,7 +1265,7 @@ tls_write_cb(uv_write_t *req, int status) { isc__nm_uvreq_put(&uvreq, sock); if (status != 0) { - tls_error(sock, result); + tls_error(sock, isc_uverr2result(status)); return; } @@ -1302,7 +1296,8 @@ tls_cycle_output(isc_nmsocket_t *sock) { pending = (int)ISC_NETMGR_TCP_RECVBUF_SIZE; } - sock->tls.senddata.base = isc_mem_get(sock->mgr->mctx, pending); + sock->tls.senddata.base = isc_mem_get(sock->worker->mctx, + pending); sock->tls.senddata.length = pending; /* It's a bit misnomer here, but it does the right thing */ @@ -1423,9 +1418,8 @@ async_tlsdns_cycle(isc_nmsocket_t *sock) { return; } - ievent = isc__nm_get_netievent_tlsdnscycle(sock->mgr, sock); - isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)ievent); + ievent = isc__nm_get_netievent_tlsdnscycle(sock->worker, sock); + isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent); } void @@ -1438,7 +1432,7 @@ isc__nm_async_tlsdnscycle(isc__networker_t *worker, isc__netievent_t *ev0) { UNUSED(worker); REQUIRE(VALID_NMSOCK(ievent->sock)); - REQUIRE(ievent->sock->tid == isc_nm_tid()); + REQUIRE(ievent->sock->tid == isc_tid()); sock = ievent->sock; @@ -1457,7 +1451,7 @@ isc__nm_tlsdns_read_cb(uv_stream_t *stream, ssize_t nread, int rv; REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); REQUIRE(atomic_load(&sock->reading)); REQUIRE(buf != NULL); @@ -1477,7 +1471,7 @@ isc__nm_tlsdns_read_cb(uv_stream_t *stream, ssize_t nread, } if (!atomic_load(&sock->client)) { - sock->read_timeout = atomic_load(&sock->mgr->idle); + sock->read_timeout = atomic_load(&sock->worker->netmgr->idle); } /* @@ -1521,9 +1515,8 @@ quota_accept_cb(isc_quota_t *quota, void *sock0) { */ isc__netievent_tlsdnsaccept_t *ievent = - isc__nm_get_netievent_tlsdnsaccept(sock->mgr, sock, quota); - isc__nm_maybe_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)ievent); + isc__nm_get_netievent_tlsdnsaccept(sock->worker, sock, quota); + isc__nm_maybe_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent); } /* @@ -1538,7 +1531,7 @@ isc__nm_async_tlsdnsaccept(isc__networker_t *worker, isc__netievent_t *ev0) { UNUSED(worker); REQUIRE(VALID_NMSOCK(ievent->sock)); - REQUIRE(ievent->sock->tid == isc_nm_tid()); + REQUIRE(ievent->sock->tid == isc_tid()); result = accept_connection(ievent->sock, ievent->quota); isc__nm_accept_connection_log(result, can_log_tlsdns_quota()); @@ -1555,7 +1548,7 @@ accept_connection(isc_nmsocket_t *ssock, isc_quota_t *quota) { isc_sockaddr_t local; REQUIRE(VALID_NMSOCK(ssock)); - REQUIRE(ssock->tid == isc_nm_tid()); + REQUIRE(ssock->tid == isc_tid()); if (isc__nmsocket_closing(ssock)) { if (quota != NULL) { @@ -1566,10 +1559,9 @@ accept_connection(isc_nmsocket_t *ssock, isc_quota_t *quota) { REQUIRE(ssock->accept_cb != NULL); - csock = isc_mem_get(ssock->mgr->mctx, sizeof(isc_nmsocket_t)); - isc__nmsocket_init(csock, ssock->mgr, isc_nm_tlsdnssocket, + csock = isc_mem_get(ssock->worker->mctx, sizeof(isc_nmsocket_t)); + isc__nmsocket_init(csock, ssock->worker, isc_nm_tlsdnssocket, &ssock->iface); - csock->tid = ssock->tid; isc__nmsocket_attach(ssock, &csock->server); csock->accept_cb = ssock->accept_cb; csock->accept_cbarg = ssock->accept_cbarg; @@ -1578,13 +1570,13 @@ accept_connection(isc_nmsocket_t *ssock, isc_quota_t *quota) { csock->quota = quota; atomic_init(&csock->accepting, true); - worker = &csock->mgr->workers[csock->tid]; + worker = csock->worker; - r = uv_tcp_init(&worker->loop, &csock->uv_handle.tcp); + r = uv_tcp_init(&worker->loop->loop, &csock->uv_handle.tcp); UV_RUNTIME_CHECK(uv_tcp_init, r); uv_handle_set_data(&csock->uv_handle.handle, csock); - r = uv_timer_init(&worker->loop, &csock->read_timer); + r = uv_timer_init(&worker->loop->loop, &csock->read_timer); UV_RUNTIME_CHECK(uv_timer_init, r); uv_handle_set_data((uv_handle_t *)&csock->read_timer, csock); @@ -1657,7 +1649,7 @@ accept_connection(isc_nmsocket_t *ssock, isc_quota_t *quota) { isc__nm_incstats(csock, STATID_ACCEPT); - csock->read_timeout = atomic_load(&csock->mgr->init); + csock->read_timeout = atomic_load(&csock->worker->netmgr->init); csock->closehandle_cb = isc__nm_resume_processing; @@ -1675,9 +1667,10 @@ accept_connection(isc_nmsocket_t *ssock, isc_quota_t *quota) { * The initial timer has been set, update the read timeout for * the next reads. */ - csock->read_timeout = (atomic_load(&csock->keepalive) - ? atomic_load(&csock->mgr->keepalive) - : atomic_load(&csock->mgr->idle)); + csock->read_timeout = + (atomic_load(&csock->keepalive) + ? atomic_load(&csock->worker->netmgr->keepalive) + : atomic_load(&csock->worker->netmgr->idle)); result = isc__nm_process_sock_buffer(csock); if (result != ISC_R_SUCCESS) { @@ -1717,7 +1710,7 @@ isc__nm_tlsdns_send(isc_nmhandle_t *handle, isc_region_t *region, REQUIRE(VALID_NMSOCK(sock)); REQUIRE(sock->type == isc_nm_tlsdnssocket); - uvreq = isc__nm_uvreq_get(sock->mgr, sock); + uvreq = isc__nm_uvreq_get(sock->worker, sock); *(uint16_t *)uvreq->tcplen = htons(region->length); uvreq->uvbuf.base = (char *)region->base; uvreq->uvbuf.len = region->length; @@ -1727,9 +1720,8 @@ isc__nm_tlsdns_send(isc_nmhandle_t *handle, isc_region_t *region, uvreq->cb.send = cb; uvreq->cbarg = cbarg; - ievent = isc__nm_get_netievent_tlsdnssend(sock->mgr, sock, uvreq); - isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)ievent); + ievent = isc__nm_get_netievent_tlsdnssend(sock->worker, sock, uvreq); + isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent); return; } @@ -1747,13 +1739,13 @@ isc__nm_async_tlsdnssend(isc__networker_t *worker, isc__netievent_t *ev0) { UNUSED(worker); REQUIRE(sock->type == isc_nm_tlsdnssocket); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); if (sock->write_timeout == 0) { sock->write_timeout = (atomic_load(&sock->keepalive) - ? atomic_load(&sock->mgr->keepalive) - : atomic_load(&sock->mgr->idle)); + ? atomic_load(&sock->worker->netmgr->keepalive) + : atomic_load(&sock->worker->netmgr->idle)); } result = tlsdns_send_direct(sock, uvreq); @@ -1766,9 +1758,8 @@ isc__nm_async_tlsdnssend(isc__networker_t *worker, isc__netievent_t *ev0) { static void tlsdns_send_enqueue(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) { isc__netievent_tlsdnssend_t *ievent = - isc__nm_get_netievent_tlsdnssend(sock->mgr, sock, req); - isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)ievent); + isc__nm_get_netievent_tlsdnssend(sock->worker, sock, req); + isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent); } static isc_result_t @@ -1782,7 +1773,7 @@ tlsdns_send_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) { REQUIRE(VALID_NMSOCK(sock)); REQUIRE(VALID_UVREQ(req)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); REQUIRE(sock->type == isc_nm_tlsdnssocket); result = tls_pop_error(sock); @@ -1803,7 +1794,7 @@ tlsdns_send_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) { * There's no SSL_writev(), so we need to use a local buffer to * assemble the whole message */ - worker = &sock->mgr->workers[sock->tid]; + worker = sock->worker; sendlen = req->uvbuf.len + sizeof(uint16_t); memmove(worker->sendbuf, req->tcplen, sizeof(uint16_t)); memmove(worker->sendbuf + sizeof(uint16_t), req->uvbuf.base, @@ -1846,7 +1837,7 @@ tlsdns_stop_cb(uv_handle_t *handle) { isc_nmsocket_t *sock = uv_handle_get_data(handle); REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); REQUIRE(atomic_load(&sock->closing)); uv_handle_set_data(handle, NULL); @@ -1873,7 +1864,7 @@ tlsdns_stop_cb(uv_handle_t *handle) { static void tlsdns_close_sock(isc_nmsocket_t *sock) { REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); REQUIRE(atomic_load(&sock->closing)); if (!atomic_compare_exchange_strong(&sock->closed, &(bool){ false }, @@ -1933,61 +1924,10 @@ read_timer_close_cb(uv_handle_t *handle) { } } -static void -stop_tlsdns_child(isc_nmsocket_t *sock) { - REQUIRE(sock->type == isc_nm_tlsdnssocket); - REQUIRE(sock->tid == isc_nm_tid()); - - if (!atomic_compare_exchange_strong(&sock->closing, &(bool){ false }, - true)) { - return; - } - - tlsdns_close_direct(sock); - - atomic_fetch_sub(&sock->parent->rchildren, 1); - - isc_barrier_wait(&sock->parent->stoplistening); -} - -static void -stop_tlsdns_parent(isc_nmsocket_t *sock) { - isc_nmsocket_t *csock = NULL; - - REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); - REQUIRE(sock->type == isc_nm_tlsdnslistener); - - isc_barrier_init(&sock->stoplistening, sock->nchildren); - - for (size_t i = 0; i < sock->nchildren; i++) { - csock = &sock->children[i]; - - REQUIRE(VALID_NMSOCK(csock)); - - if ((int)i == isc_nm_tid()) { - /* - * We need to schedule closing the other sockets first - */ - continue; - } - - atomic_store(&csock->active, false); - enqueue_stoplistening(csock); - } - - csock = &sock->children[isc_nm_tid()]; - atomic_store(&csock->active, false); - stop_tlsdns_child(csock); - - atomic_store(&sock->closed, true); - isc__nmsocket_prep_destroy(sock); -} - static void tlsdns_close_direct(isc_nmsocket_t *sock) { REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); REQUIRE(atomic_load(&sock->closing)); REQUIRE(sock->tls.pending_req == NULL); @@ -2018,7 +1958,7 @@ isc__nm_tlsdns_close(isc_nmsocket_t *sock) { return; } - if (sock->tid == isc_nm_tid()) { + if (sock->tid == isc_tid()) { tlsdns_close_direct(sock); } else { /* @@ -2026,9 +1966,9 @@ isc__nm_tlsdns_close(isc_nmsocket_t *sock) { * channel */ isc__netievent_tlsdnsclose_t *ievent = - isc__nm_get_netievent_tlsdnsclose(sock->mgr, sock); + isc__nm_get_netievent_tlsdnsclose(sock->worker, sock); - isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], + isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent); } } @@ -2042,7 +1982,7 @@ isc__nm_async_tlsdnsclose(isc__networker_t *worker, isc__netievent_t *ev0) { UNUSED(worker); REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); tlsdns_close_direct(sock); } @@ -2053,8 +1993,7 @@ tlsdns_close_connect_cb(uv_handle_t *handle) { REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(isc__nm_in_netthread()); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); isc__nmsocket_prep_destroy(sock); isc__nmsocket_detach(&sock); @@ -2062,10 +2001,14 @@ tlsdns_close_connect_cb(uv_handle_t *handle) { void isc__nm_tlsdns_shutdown(isc_nmsocket_t *sock) { + isc__networker_t *worker = NULL; + REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); REQUIRE(sock->type == isc_nm_tlsdnssocket); + worker = sock->worker; + /* * If the socket is active, mark it inactive and * continue. If it isn't active, stop now. @@ -2110,7 +2053,7 @@ isc__nm_tlsdns_shutdown(isc_nmsocket_t *sock) { } if (sock->statichandle != NULL) { - if (isc__nm_closing(sock)) { + if (isc__nm_closing(worker)) { isc__nm_failed_read_cb(sock, ISC_R_SHUTTINGDOWN, false); } else { isc__nm_failed_read_cb(sock, ISC_R_CANCELED, false); @@ -2138,9 +2081,8 @@ isc__nm_tlsdns_cancelread(isc_nmhandle_t *handle) { REQUIRE(VALID_NMSOCK(sock)); REQUIRE(sock->type == isc_nm_tlsdnssocket); - ievent = isc__nm_get_netievent_tlsdnscancel(sock->mgr, sock, handle); - isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)ievent); + ievent = isc__nm_get_netievent_tlsdnscancel(sock->worker, sock, handle); + isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent); } void @@ -2152,7 +2094,7 @@ isc__nm_async_tlsdnscancel(isc__networker_t *worker, isc__netievent_t *ev0) { UNUSED(worker); REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); isc__nm_failed_read_cb(sock, ISC_R_EOF, false); } @@ -2228,7 +2170,7 @@ tlsdns_keep_client_tls_session(isc_nmsocket_t *sock) { * Ensure that the isc_tls_t is being accessed from * within the worker thread the socket is bound to. */ - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); if (sock->tls.client_sess_cache != NULL && sock->tls.client_session_saved == false) { diff --git a/lib/isc/netmgr/tlsstream.c b/lib/isc/netmgr/tlsstream.c index 01a8f65aaa..8a61d327ec 100644 --- a/lib/isc/netmgr/tlsstream.c +++ b/lib/isc/netmgr/tlsstream.c @@ -104,7 +104,7 @@ inactive(isc_nmsocket_t *sock) { atomic_load(&sock->outerhandle->sock->closing) || (sock->listener != NULL && !isc__nmsocket_active(sock->listener)) || - isc__nm_closing(sock)); + isc__nm_closing(sock->worker)); } static void @@ -152,12 +152,12 @@ tls_senddone(isc_nmhandle_t *handle, isc_result_t eresult, void *cbarg) { * requests. See the mirroring code in the tls_send_outgoing() * function. */ if (send_req->data.length > sizeof(send_req->smallbuf)) { - isc_mem_put(handle->sock->mgr->mctx, send_req->data.base, + isc_mem_put(handle->sock->worker->mctx, send_req->data.base, send_req->data.length); } else { INSIST(&send_req->smallbuf[0] == send_req->data.base); } - isc_mem_put(handle->sock->mgr->mctx, send_req, sizeof(*send_req)); + isc_mem_put(handle->sock->worker->mctx, send_req, sizeof(*send_req)); tlssock->tlsstream.nsending--; if (finish && eresult == ISC_R_SUCCESS) { @@ -205,7 +205,7 @@ tls_failed_read_cb(isc_nmsocket_t *sock, const isc_result_t result) { } else if (sock->recv_cb != NULL && sock->statichandle != NULL) { isc__nm_uvreq_t *req = NULL; INSIST(VALID_NMHANDLE(sock->statichandle)); - req = isc__nm_uvreq_get(sock->mgr, sock); + req = isc__nm_uvreq_get(sock->worker, sock); req->cb.recv = sock->recv_cb; req->cbarg = sock->recv_cbarg; isc_nmhandle_attach(sock->statichandle, &req->handle); @@ -229,9 +229,8 @@ tls_failed_read_cb(isc_nmsocket_t *sock, const isc_result_t result) { static void async_tls_do_bio(isc_nmsocket_t *sock) { isc__netievent_tlsdobio_t *ievent = - isc__nm_get_netievent_tlsdobio(sock->mgr, sock); - isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)ievent); + isc__nm_get_netievent_tlsdobio(sock->worker, sock); + isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent); } static int @@ -265,13 +264,13 @@ tls_send_outgoing(isc_nmsocket_t *sock, bool finish, isc_nmhandle_t *tlshandle, pending = TLS_BUF_SIZE; } - send_req = isc_mem_get(sock->mgr->mctx, sizeof(*send_req)); + send_req = isc_mem_get(sock->worker->mctx, sizeof(*send_req)); *send_req = (isc_nmsocket_tls_send_req_t){ .finish = finish, .data.length = pending }; /* Let's try to avoid a memory allocation for small write requests */ if ((size_t)pending > sizeof(send_req->smallbuf)) { - send_req->data.base = isc_mem_get(sock->mgr->mctx, pending); + send_req->data.base = isc_mem_get(sock->worker->mctx, pending); } else { send_req->data.base = &send_req->smallbuf[0]; } @@ -385,7 +384,7 @@ tls_do_bio(isc_nmsocket_t *sock, isc_region_t *received_data, int saved_errno = 0; REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); /* We will resume read if TLS layer wants us to */ if (sock->tlsstream.reading && sock->outerhandle) { @@ -590,11 +589,14 @@ tls_readcb(isc_nmhandle_t *handle, isc_result_t result, isc_region_t *region, REQUIRE(VALID_NMSOCK(tlssock)); REQUIRE(VALID_NMHANDLE(handle)); - REQUIRE(tlssock->tid == isc_nm_tid()); + REQUIRE(tlssock->tid == isc_tid()); if (result != ISC_R_SUCCESS) { tls_failed_read_cb(tlssock, result); return; + } else if (isc__nmsocket_closing(handle->sock)) { + tls_failed_read_cb(tlssock, ISC_R_CANCELED); + return; } tls_do_bio(tlssock, region, NULL, false); @@ -602,7 +604,7 @@ tls_readcb(isc_nmhandle_t *handle, isc_result_t result, isc_region_t *region, static isc_result_t initialize_tls(isc_nmsocket_t *sock, bool server) { - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); sock->tlsstream.bio_in = BIO_new(BIO_s_mem()); if (sock->tlsstream.bio_in == NULL) { @@ -640,7 +642,6 @@ tlslisten_acceptcb(isc_nmhandle_t *handle, isc_result_t result, void *cbarg) { isc_nmsocket_t *tlslistensock = (isc_nmsocket_t *)cbarg; isc_nmsocket_t *tlssock = NULL; isc_tlsctx_t *tlsctx = NULL; - int tid; /* If accept() was unsuccessful we can't do anything */ if (result != ISC_R_SUCCESS) { @@ -655,13 +656,12 @@ tlslisten_acceptcb(isc_nmhandle_t *handle, isc_result_t result, void *cbarg) { /* * We need to create a 'wrapper' tlssocket for this connection. */ - tlssock = isc_mem_get(handle->sock->mgr->mctx, sizeof(*tlssock)); - isc__nmsocket_init(tlssock, handle->sock->mgr, isc_nm_tlssocket, + tlssock = isc_mem_get(handle->sock->worker->mctx, sizeof(*tlssock)); + isc__nmsocket_init(tlssock, handle->sock->worker, isc_nm_tlssocket, &handle->sock->iface); - tid = isc_nm_tid(); /* We need to initialize SSL now to reference SSL_CTX properly */ - tlsctx = tls_get_listener_tlsctx(tlslistensock, tid); + tlsctx = tls_get_listener_tlsctx(tlslistensock, isc_tid()); RUNTIME_CHECK(tlsctx != NULL); isc_tlsctx_attach(tlsctx, &tlssock->tlsstream.ctx); tlssock->tlsstream.tls = isc_tls_create(tlssock->tlsstream.ctx); @@ -675,8 +675,8 @@ tlslisten_acceptcb(isc_nmhandle_t *handle, isc_result_t result, void *cbarg) { isc__nmsocket_attach(tlslistensock, &tlssock->listener); isc_nmhandle_attach(handle, &tlssock->outerhandle); tlssock->peer = handle->sock->peer; - tlssock->read_timeout = atomic_load(&handle->sock->mgr->init); - tlssock->tid = tid; + tlssock->read_timeout = + atomic_load(&handle->sock->worker->netmgr->init); /* * Hold a reference to tlssock in the TCP socket: it will @@ -699,16 +699,23 @@ isc_nm_listentls(isc_nm_t *mgr, uint32_t workers, isc_sockaddr_t *iface, isc_result_t result; isc_nmsocket_t *tlssock = NULL; isc_nmsocket_t *tsock = NULL; + isc__networker_t *worker = &mgr->workers[isc_tid()]; REQUIRE(VALID_NM(mgr)); - if (atomic_load(&mgr->closing)) { + REQUIRE(isc_tid() == 0); + + if (isc__nm_closing(worker)) { return (ISC_R_SHUTTINGDOWN); } - tlssock = isc_mem_get(mgr->mctx, sizeof(*tlssock)); + if (workers == 0) { + workers = mgr->nloops; + } + REQUIRE(workers <= mgr->nloops); - isc__nmsocket_init(tlssock, mgr, isc_nm_tlslistener, iface); - tlssock->result = ISC_R_UNSET; + tlssock = isc_mem_get(worker->mctx, sizeof(*tlssock)); + + isc__nmsocket_init(tlssock, worker, isc_nm_tlslistener, iface); tlssock->accept_cb = accept_cb; tlssock->accept_cbarg = accept_cbarg; tls_init_listener_tlsctx(tlssock, sslctx); @@ -750,7 +757,7 @@ isc__nm_async_tlssend(isc__networker_t *worker, isc__netievent_t *ev0) { isc__nm_uvreq_t *req = ievent->req; REQUIRE(VALID_UVREQ(req)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); UNUSED(worker); @@ -786,7 +793,7 @@ isc__nm_tls_send(isc_nmhandle_t *handle, const isc_region_t *region, return; } - uvreq = isc__nm_uvreq_get(sock->mgr, sock); + uvreq = isc__nm_uvreq_get(sock->worker, sock); isc_nmhandle_attach(handle, &uvreq->handle); uvreq->cb.send = cb; uvreq->cbarg = cbarg; @@ -796,9 +803,8 @@ isc__nm_tls_send(isc_nmhandle_t *handle, const isc_region_t *region, /* * We need to create an event and pass it using async channel */ - ievent = isc__nm_get_netievent_tlssend(sock->mgr, sock, uvreq); - isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)ievent); + ievent = isc__nm_get_netievent_tlssend(sock->worker, sock, uvreq); + isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent); } void @@ -807,7 +813,7 @@ isc__nm_async_tlsstartread(isc__networker_t *worker, isc__netievent_t *ev0) { (isc__netievent_tlsstartread_t *)ev0; isc_nmsocket_t *sock = ievent->sock; - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); UNUSED(worker); @@ -824,7 +830,7 @@ isc__nm_tls_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg) { sock = handle->sock; REQUIRE(VALID_NMSOCK(sock)); REQUIRE(sock->statichandle == handle); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); REQUIRE(sock->recv_cb == NULL); if (inactive(sock)) { @@ -835,9 +841,8 @@ isc__nm_tls_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg) { sock->recv_cb = cb; sock->recv_cbarg = cbarg; - ievent = isc__nm_get_netievent_tlsstartread(sock->mgr, sock); - isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)ievent); + ievent = isc__nm_get_netievent_tlsstartread(sock->worker, sock); + isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent); } void @@ -873,7 +878,7 @@ isc__nm_tls_resumeread(isc_nmhandle_t *handle) { static void tls_close_direct(isc_nmsocket_t *sock) { REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); /* * At this point we're certain that there are no * external references, we can close everything. @@ -906,9 +911,8 @@ isc__nm_tls_close(isc_nmsocket_t *sock) { return; } - ievent = isc__nm_get_netievent_tlsclose(sock->mgr, sock); - isc__nm_maybe_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)ievent); + ievent = isc__nm_get_netievent_tlsclose(sock->worker, sock); + isc__nm_maybe_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent); } void @@ -916,7 +920,7 @@ isc__nm_async_tlsclose(isc__networker_t *worker, isc__netievent_t *ev0) { isc__netievent_tlsclose_t *ievent = (isc__netievent_tlsclose_t *)ev0; isc_nmsocket_t *sock = ievent->sock; - REQUIRE(ievent->sock->tid == isc_nm_tid()); + REQUIRE(ievent->sock->tid == isc_tid()); UNUSED(worker); @@ -956,21 +960,17 @@ isc_nm_tlsconnect(isc_nm_t *mgr, isc_sockaddr_t *local, isc_sockaddr_t *peer, isc_tlsctx_client_session_cache_t *client_sess_cache, unsigned int timeout) { isc_nmsocket_t *nsock = NULL; -#if defined(NETMGR_TRACE) && defined(NETMGR_TRACE_VERBOSE) - fprintf(stderr, "TLS: isc_nm_tlsconnect(): in net thread: %s\n", - isc__nm_in_netthread() ? "yes" : "no"); -#endif /* NETMGR_TRACE */ + isc__networker_t *worker = &mgr->workers[isc_tid()]; REQUIRE(VALID_NM(mgr)); - if (atomic_load(&mgr->closing)) { + if (isc__nm_closing(worker)) { cb(NULL, ISC_R_SHUTTINGDOWN, cbarg); return; } - nsock = isc_mem_get(mgr->mctx, sizeof(*nsock)); - isc__nmsocket_init(nsock, mgr, isc_nm_tlssocket, local); - nsock->result = ISC_R_UNSET; + nsock = isc_mem_get(worker->mctx, sizeof(*nsock)); + isc__nmsocket_init(nsock, worker, isc_nm_tlssocket, local); nsock->connect_cb = cb; nsock->connect_cbarg = cbarg; nsock->connect_timeout = timeout; @@ -991,10 +991,12 @@ static void tcp_connected(isc_nmhandle_t *handle, isc_result_t result, void *cbarg) { isc_nmsocket_t *tlssock = (isc_nmsocket_t *)cbarg; isc_nmhandle_t *tlshandle = NULL; + isc__networker_t *worker = NULL; REQUIRE(VALID_NMSOCK(tlssock)); - tlssock->tid = isc_nm_tid(); + worker = tlssock->worker; + if (result != ISC_R_SUCCESS) { goto error; } @@ -1003,7 +1005,7 @@ tcp_connected(isc_nmhandle_t *handle, isc_result_t result, void *cbarg) { tlssock->iface = handle->sock->iface; tlssock->peer = handle->sock->peer; - if (isc__nm_closing(tlssock)) { + if (isc__nm_closing(worker)) { result = ISC_R_SHUTTINGDOWN; goto error; } @@ -1068,12 +1070,12 @@ isc__nm_tls_cancelread(isc_nmhandle_t *handle) { REQUIRE(sock->type == isc_nm_tlssocket); - if (sock->tid == isc_nm_tid()) { + if (sock->tid == isc_tid()) { tls_cancelread(sock); } else { - ievent = isc__nm_get_netievent_tlscancel(sock->mgr, sock, + ievent = isc__nm_get_netievent_tlscancel(sock->worker, sock, handle); - isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], + isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent); } } @@ -1084,8 +1086,7 @@ isc__nm_async_tlscancel(isc__networker_t *worker, isc__netievent_t *ev0) { isc_nmsocket_t *sock = ievent->sock; REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(worker->id == sock->tid); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); UNUSED(worker); tls_cancelread(sock); @@ -1221,14 +1222,15 @@ static void tls_init_listener_tlsctx(isc_nmsocket_t *listener, isc_tlsctx_t *ctx) { size_t nworkers; - REQUIRE(VALID_NM(listener->mgr)); + REQUIRE(VALID_NMSOCK(listener)); REQUIRE(ctx != NULL); - nworkers = (size_t)isc_nm_getnworkers(listener->mgr); + nworkers = + (size_t)isc_loopmgr_nloops(listener->worker->netmgr->loopmgr); INSIST(nworkers > 0); listener->tlsstream.listener_tls_ctx = isc_mem_get( - listener->mgr->mctx, sizeof(isc_tlsctx_t *) * nworkers); + listener->worker->mctx, sizeof(isc_tlsctx_t *) * nworkers); listener->tlsstream.n_listener_tls_ctx = nworkers; for (size_t i = 0; i < nworkers; i++) { listener->tlsstream.listener_tls_ctx[i] = NULL; @@ -1239,7 +1241,7 @@ tls_init_listener_tlsctx(isc_nmsocket_t *listener, isc_tlsctx_t *ctx) { static void tls_cleanup_listener_tlsctx(isc_nmsocket_t *listener) { - REQUIRE(VALID_NM(listener->mgr)); + REQUIRE(VALID_NMSOCK(listener)); if (listener->tlsstream.listener_tls_ctx == NULL) { return; @@ -1248,7 +1250,8 @@ tls_cleanup_listener_tlsctx(isc_nmsocket_t *listener) { for (size_t i = 0; i < listener->tlsstream.n_listener_tls_ctx; i++) { isc_tlsctx_free(&listener->tlsstream.listener_tls_ctx[i]); } - isc_mem_put(listener->mgr->mctx, listener->tlsstream.listener_tls_ctx, + isc_mem_put(listener->worker->mctx, + listener->tlsstream.listener_tls_ctx, sizeof(isc_tlsctx_t *) * listener->tlsstream.n_listener_tls_ctx); listener->tlsstream.n_listener_tls_ctx = 0; @@ -1256,7 +1259,7 @@ tls_cleanup_listener_tlsctx(isc_nmsocket_t *listener) { static isc_tlsctx_t * tls_get_listener_tlsctx(isc_nmsocket_t *listener, const int tid) { - REQUIRE(VALID_NM(listener->mgr)); + REQUIRE(VALID_NMSOCK(listener)); REQUIRE(tid >= 0); if (listener->tlsstream.listener_tls_ctx == NULL) { @@ -1281,7 +1284,7 @@ tls_keep_client_tls_session(isc_nmsocket_t *sock) { * Ensure that the isc_tls_t is being accessed from * within the worker thread the socket is bound to. */ - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); if (sock->tlsstream.client_sess_cache != NULL && sock->tlsstream.client_session_saved == false) { diff --git a/lib/isc/netmgr/udp.c b/lib/isc/netmgr/udp.c index b4aa2ea763..ee8e4caf29 100644 --- a/lib/isc/netmgr/udp.c +++ b/lib/isc/netmgr/udp.c @@ -30,6 +30,7 @@ #include #include +#include "../loop_p.h" #include "netmgr-int.h" #ifdef HAVE_NET_ROUTE_H @@ -58,10 +59,6 @@ #endif /* if defined(HAVE_LINUX_NETLINK_H) && defined(HAVE_LINUX_RTNETLINK_H) \ */ -static isc_result_t -udp_send_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req, - isc_sockaddr_t *peer); - static void udp_recv_cb(uv_udp_t *handle, ssize_t nrecv, const uv_buf_t *buf, const struct sockaddr *addr, unsigned flags); @@ -75,14 +72,6 @@ udp_close_cb(uv_handle_t *handle); static void read_timer_close_cb(uv_handle_t *handle); -static void -udp_close_direct(isc_nmsocket_t *sock); - -static void -stop_udp_parent(isc_nmsocket_t *sock); -static void -stop_udp_child(isc_nmsocket_t *sock); - static uv_os_sock_t isc__nm_udp_lb_socket(isc_nm_t *mgr, sa_family_t sa_family) { isc_result_t result; @@ -111,16 +100,16 @@ start_udp_child(isc_nm_t *mgr, isc_sockaddr_t *iface, isc_nmsocket_t *sock, uv_os_sock_t fd, int tid) { isc_nmsocket_t *csock; isc__netievent_udplisten_t *ievent = NULL; + isc__networker_t *worker = &mgr->workers[tid]; csock = &sock->children[tid]; - isc__nmsocket_init(csock, mgr, isc_nm_udpsocket, iface); + isc__nmsocket_init(csock, worker, isc_nm_udpsocket, iface); csock->parent = sock; - csock->iface = sock->iface; - atomic_init(&csock->reading, true); csock->recv_cb = sock->recv_cb; csock->recv_cbarg = sock->recv_cbarg; - csock->tid = tid; + + atomic_init(&csock->reading, true); if (mgr->load_balance_sockets) { UNUSED(fd); @@ -131,91 +120,84 @@ start_udp_child(isc_nm_t *mgr, isc_sockaddr_t *iface, isc_nmsocket_t *sock, } REQUIRE(csock->fd >= 0); - ievent = isc__nm_get_netievent_udplisten(mgr, csock); - isc__nm_maybe_enqueue_ievent(&mgr->workers[tid], - (isc__netievent_t *)ievent); -} + ievent = isc__nm_get_netievent_udplisten(worker, csock); -static void -enqueue_stoplistening(isc_nmsocket_t *sock) { - isc__netievent_udpstop_t *ievent = - isc__nm_get_netievent_udpstop(sock->mgr, sock); - isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)ievent); + if (tid == 0) { + isc__nm_process_ievent(&mgr->workers[tid], + (isc__netievent_t *)ievent); + } else { + isc__nm_enqueue_ievent(&mgr->workers[tid], + (isc__netievent_t *)ievent); + } } isc_result_t isc_nm_listenudp(isc_nm_t *mgr, uint32_t workers, isc_sockaddr_t *iface, isc_nm_recv_cb_t cb, void *cbarg, isc_nmsocket_t **sockp) { - isc_result_t result = ISC_R_SUCCESS; + isc_result_t result = ISC_R_UNSET; isc_nmsocket_t *sock = NULL; size_t children_size = 0; - REQUIRE(VALID_NM(mgr)); uv_os_sock_t fd = -1; + isc__networker_t *worker = &mgr->workers[0]; - /* - * We are creating mgr->nworkers duplicated sockets, one - * socket for each worker thread. - */ - sock = isc_mem_get(mgr->mctx, sizeof(isc_nmsocket_t)); - isc__nmsocket_init(sock, mgr, isc_nm_udplistener, iface); + REQUIRE(VALID_NM(mgr)); + REQUIRE(isc_tid() == 0); + + if (isc__nm_closing(worker)) { + return (ISC_R_SHUTTINGDOWN); + } + + if (workers == 0) { + workers = mgr->nloops; + } + REQUIRE(workers <= mgr->nloops); + + sock = isc_mem_get(worker->mctx, sizeof(isc_nmsocket_t)); + isc__nmsocket_init(sock, worker, isc_nm_udplistener, iface); atomic_init(&sock->rchildren, 0); - sock->nchildren = (workers == ISC_NM_LISTEN_ALL) - ? (uint32_t)mgr->nworkers - : workers; + sock->nchildren = (workers == ISC_NM_LISTEN_ALL) ? (uint32_t)mgr->nloops + : workers; children_size = sock->nchildren * sizeof(sock->children[0]); - sock->children = isc_mem_get(mgr->mctx, children_size); + sock->children = isc_mem_get(worker->mctx, children_size); memset(sock->children, 0, children_size); + isc_barrier_init(&sock->barrier, sock->nchildren); + sock->recv_cb = cb; sock->recv_cbarg = cbarg; - sock->result = ISC_R_UNSET; - - sock->tid = 0; - sock->fd = -1; if (!mgr->load_balance_sockets) { fd = isc__nm_udp_lb_socket(mgr, iface->type.sa.sa_family); } - isc_barrier_init(&sock->startlistening, sock->nchildren); - - for (size_t i = 0; i < sock->nchildren; i++) { - if ((int)i == isc_nm_tid()) { - continue; - } + for (size_t i = 1; i < sock->nchildren; i++) { start_udp_child(mgr, iface, sock, fd, i); } - if (isc__nm_in_netthread()) { - start_udp_child(mgr, iface, sock, fd, isc_nm_tid()); - } + start_udp_child(mgr, iface, sock, fd, 0); if (!mgr->load_balance_sockets) { isc__nm_closesocket(fd); } LOCK(&sock->lock); - while (atomic_load(&sock->rchildren) != sock->nchildren) { - WAIT(&sock->cond, &sock->lock); - } result = sock->result; - atomic_store(&sock->active, true); UNLOCK(&sock->lock); - INSIST(result != ISC_R_UNSET); - if (result == ISC_R_SUCCESS) { - REQUIRE(atomic_load(&sock->rchildren) == sock->nchildren); - *sockp = sock; - } else { - atomic_store(&sock->active, false); - enqueue_stoplistening(sock); - isc_nmsocket_close(&sock); - } + atomic_store(&sock->active, true); - return (result); + if (result != ISC_R_SUCCESS) { + atomic_store(&sock->active, false); + isc__nm_udp_stoplistening(sock); + isc_nmsocket_close(&sock); + + return (result); + } + REQUIRE(atomic_load(&sock->rchildren) == sock->nchildren); + *sockp = sock; + return (ISC_R_SUCCESS); } #ifdef USE_ROUTE_SOCKET @@ -251,92 +233,40 @@ route_socket(uv_os_sock_t *fdp) { static isc_result_t route_connect_direct(isc_nmsocket_t *sock) { isc__networker_t *worker = NULL; - isc_result_t result = ISC_R_UNSET; int r; - REQUIRE(isc__nm_in_netthread()); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); - worker = &sock->mgr->workers[isc_nm_tid()]; + worker = sock->worker; atomic_store(&sock->connecting, true); - r = uv_udp_init(&worker->loop, &sock->uv_handle.udp); + r = uv_udp_init(&worker->loop->loop, &sock->uv_handle.udp); UV_RUNTIME_CHECK(uv_udp_init, r); uv_handle_set_data(&sock->uv_handle.handle, sock); - r = uv_timer_init(&worker->loop, &sock->read_timer); + r = uv_timer_init(&worker->loop->loop, &sock->read_timer); UV_RUNTIME_CHECK(uv_timer_init, r); uv_handle_set_data((uv_handle_t *)&sock->read_timer, sock); - if (isc__nm_closing(sock)) { - result = ISC_R_SHUTTINGDOWN; - goto error; + if (isc__nm_closing(worker)) { + return (ISC_R_SHUTTINGDOWN); } r = uv_udp_open(&sock->uv_handle.udp, sock->fd); if (r != 0) { - goto done; + return (isc_uverr2result(r)); } - isc__nm_set_network_buffers(sock->mgr, &sock->uv_handle.handle); + isc__nm_set_network_buffers(sock->worker->netmgr, + &sock->uv_handle.handle); atomic_store(&sock->connecting, false); atomic_store(&sock->connected, true); -done: - result = isc_uverr2result(r); -error: - - LOCK(&sock->lock); - sock->result = result; - SIGNAL(&sock->cond); - if (!atomic_load(&sock->active)) { - WAIT(&sock->scond, &sock->lock); - } - INSIST(atomic_load(&sock->active)); - UNLOCK(&sock->lock); - - return (result); + return (ISC_R_SUCCESS); } -/* - * Asynchronous 'udpconnect' call handler: open a new UDP socket and - * call the 'open' callback with a handle. - */ -void -isc__nm_async_routeconnect(isc__networker_t *worker, isc__netievent_t *ev0) { - isc__netievent_routeconnect_t *ievent = - (isc__netievent_routeconnect_t *)ev0; - isc_nmsocket_t *sock = ievent->sock; - isc__nm_uvreq_t *req = ievent->req; - isc_result_t result; - - UNUSED(worker); - - REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->type == isc_nm_udpsocket); - REQUIRE(sock->parent == NULL); - REQUIRE(sock->tid == isc_nm_tid()); - - result = route_connect_direct(sock); - if (result != ISC_R_SUCCESS) { - atomic_store(&sock->active, false); - isc__nm_udp_close(sock); - isc__nm_connectcb(sock, req, result, true); - } else { - /* - * The callback has to be called after the socket has been - * initialized - */ - isc__nm_connectcb(sock, req, ISC_R_SUCCESS, true); - } - - /* - * The sock is now attached to the handle. - */ - isc__nmsocket_detach(&sock); -} #endif /* USE_ROUTE_SOCKET */ isc_result_t @@ -344,60 +274,49 @@ isc_nm_routeconnect(isc_nm_t *mgr, isc_nm_cb_t cb, void *cbarg) { #ifdef USE_ROUTE_SOCKET isc_result_t result = ISC_R_SUCCESS; isc_nmsocket_t *sock = NULL; - isc__netievent_udpconnect_t *event = NULL; isc__nm_uvreq_t *req = NULL; + isc__networker_t *worker = &mgr->workers[isc_tid()]; + uv_os_sock_t fd = -1; REQUIRE(VALID_NM(mgr)); + REQUIRE(isc_tid() == 0); - sock = isc_mem_get(mgr->mctx, sizeof(*sock)); - isc__nmsocket_init(sock, mgr, isc_nm_udpsocket, NULL); + if (isc__nm_closing(worker)) { + return (ISC_R_SHUTTINGDOWN); + } + + result = route_socket(&fd); + if (result != ISC_R_SUCCESS) { + return (result); + } + + sock = isc_mem_get(worker->mctx, sizeof(*sock)); + isc__nmsocket_init(sock, worker, isc_nm_udpsocket, NULL); sock->connect_cb = cb; sock->connect_cbarg = cbarg; - sock->result = ISC_R_UNSET; atomic_init(&sock->client, true); sock->route_sock = true; + sock->fd = fd; - req = isc__nm_uvreq_get(mgr, sock); + req = isc__nm_uvreq_get(worker, sock); req->cb.connect = cb; req->cbarg = cbarg; req->handle = isc__nmhandle_get(sock, NULL, NULL); - result = route_socket(&sock->fd); - if (result != ISC_R_SUCCESS) { - if (isc__nm_in_netthread()) { - sock->tid = isc_nm_tid(); - } - isc__nmsocket_clearcb(sock); - isc__nm_connectcb(sock, req, result, true); - atomic_store(&sock->closed, true); - isc__nmsocket_detach(&sock); - return (result); - } - - event = isc__nm_get_netievent_routeconnect(mgr, sock, req); - - if (isc__nm_in_netthread()) { - atomic_store(&sock->active, true); - sock->tid = isc_nm_tid(); - isc__nm_async_routeconnect(&mgr->workers[sock->tid], - (isc__netievent_t *)event); - isc__nm_put_netievent_routeconnect(mgr, event); - } else { - atomic_init(&sock->active, false); - sock->tid = 0; - isc__nm_enqueue_ievent(&mgr->workers[sock->tid], - (isc__netievent_t *)event); - } - LOCK(&sock->lock); - while (sock->result == ISC_R_UNSET) { - WAIT(&sock->cond, &sock->lock); - } atomic_store(&sock->active, true); - BROADCAST(&sock->scond); - UNLOCK(&sock->lock); - return (sock->result); + result = route_connect_direct(sock); + if (result != ISC_R_SUCCESS) { + atomic_store(&sock->active, false); + isc__nm_udp_close(sock); + } + + isc__nm_connectcb(sock, req, result, true); + + isc__nmsocket_detach(&sock); + + return (ISC_R_SUCCESS); #else /* USE_ROUTE_SOCKET */ UNUSED(mgr); UNUSED(cb); @@ -421,34 +340,33 @@ isc__nm_async_udplisten(isc__networker_t *worker, isc__netievent_t *ev0) { isc_nm_t *mgr = NULL; REQUIRE(VALID_NMSOCK(ievent->sock)); - REQUIRE(ievent->sock->tid == isc_nm_tid()); + REQUIRE(ievent->sock->tid == isc_tid()); REQUIRE(VALID_NMSOCK(ievent->sock->parent)); sock = ievent->sock; sa_family = sock->iface.type.sa.sa_family; - mgr = sock->mgr; + mgr = sock->worker->netmgr; REQUIRE(sock->type == isc_nm_udpsocket); REQUIRE(sock->parent != NULL); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); (void)isc__nm_socket_min_mtu(sock->fd, sa_family); #if HAVE_DECL_UV_UDP_RECVMMSG uv_init_flags |= UV_UDP_RECVMMSG; #endif - r = uv_udp_init_ex(&worker->loop, &sock->uv_handle.udp, uv_init_flags); + r = uv_udp_init_ex(&worker->loop->loop, &sock->uv_handle.udp, + uv_init_flags); UV_RUNTIME_CHECK(uv_udp_init_ex, r); uv_handle_set_data(&sock->uv_handle.handle, sock); /* This keeps the socket alive after everything else is gone */ isc__nmsocket_attach(sock, &(isc_nmsocket_t *){ NULL }); - r = uv_timer_init(&worker->loop, &sock->read_timer); + r = uv_timer_init(&worker->loop->loop, &sock->read_timer); UV_RUNTIME_CHECK(uv_timer_init, r); uv_handle_set_data((uv_handle_t *)&sock->read_timer, sock); - LOCK(&sock->parent->lock); - r = uv_udp_open(&sock->uv_handle.udp, sock->fd); if (r < 0) { isc__nm_closesocket(sock->fd); @@ -470,6 +388,7 @@ isc__nm_async_udplisten(isc__networker_t *worker, isc__netievent_t *ev0) { goto done; } } else { + LOCK(&sock->parent->lock); if (sock->parent->fd == -1) { /* This thread is first, bind the socket */ r = isc__nm_udp_freebind(&sock->uv_handle.udp, @@ -477,6 +396,7 @@ isc__nm_async_udplisten(isc__networker_t *worker, isc__netievent_t *ev0) { uv_bind_flags); if (r < 0) { isc__nm_incstats(sock, STATID_BINDFAIL); + UNLOCK(&sock->parent->lock); goto done; } sock->parent->uv_handle.udp.flags = @@ -487,9 +407,10 @@ isc__nm_async_udplisten(isc__networker_t *worker, isc__netievent_t *ev0) { sock->uv_handle.udp.flags = sock->parent->uv_handle.udp.flags; } + UNLOCK(&sock->parent->lock); } - isc__nm_set_network_buffers(sock->mgr, &sock->uv_handle.handle); + isc__nm_set_network_buffers(mgr, &sock->uv_handle.handle); r = uv_udp_recv_start(&sock->uv_handle.udp, isc__nm_alloc_cb, udp_recv_cb); @@ -503,30 +424,64 @@ isc__nm_async_udplisten(isc__networker_t *worker, isc__netievent_t *ev0) { done: result = isc_uverr2result(r); atomic_fetch_add(&sock->parent->rchildren, 1); + + LOCK(&sock->parent->lock); if (sock->parent->result == ISC_R_UNSET) { sock->parent->result = result; + } else { + REQUIRE(sock->parent->result == result); } - SIGNAL(&sock->parent->cond); UNLOCK(&sock->parent->lock); - isc_barrier_wait(&sock->parent->startlistening); + REQUIRE(!worker->loop->paused); + isc_barrier_wait(&sock->parent->barrier); +} + +static void +stop_udp_child(isc_nmsocket_t *sock, uint32_t tid) { + isc_nmsocket_t *csock = NULL; + isc__netievent_udpstop_t *ievent = NULL; + + csock = &sock->children[tid]; + REQUIRE(VALID_NMSOCK(csock)); + + atomic_store(&csock->active, false); + ievent = isc__nm_get_netievent_udpstop(csock->worker, csock); + + if (tid == 0) { + isc__nm_process_ievent(csock->worker, + (isc__netievent_t *)ievent); + } else { + isc__nm_enqueue_ievent(csock->worker, + (isc__netievent_t *)ievent); + } +} + +static void +stop_udp_parent(isc_nmsocket_t *sock) { + /* Stop the parent */ + atomic_store(&sock->closed, true); + isc__nmsocket_prep_destroy(sock); } void isc__nm_udp_stoplistening(isc_nmsocket_t *sock) { REQUIRE(VALID_NMSOCK(sock)); REQUIRE(sock->type == isc_nm_udplistener); + REQUIRE(sock->tid == isc_tid()); + REQUIRE(sock->tid == 0); - if (!atomic_compare_exchange_strong(&sock->closing, &(bool){ false }, - true)) { - UNREACHABLE(); + RUNTIME_CHECK(atomic_compare_exchange_strong(&sock->closing, + &(bool){ false }, true)); + + /* Stop all the children */ + for (size_t i = 1; i < sock->nchildren; i++) { + stop_udp_child(sock, i); } - if (!isc__nm_in_netthread()) { - enqueue_stoplistening(sock); - } else { - stop_udp_parent(sock); - } + stop_udp_child(sock, 0); + + stop_udp_parent(sock); } /* @@ -540,14 +495,15 @@ isc__nm_async_udpstop(isc__networker_t *worker, isc__netievent_t *ev0) { UNUSED(worker); REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); + REQUIRE(sock->parent != NULL); - if (sock->parent != NULL) { - stop_udp_child(sock); - return; - } + isc__nm_udp_close(sock); - stop_udp_parent(sock); + (void)atomic_fetch_sub(&sock->parent->rchildren, 1); + + REQUIRE(!worker->loop->paused); + isc_barrier_wait(&sock->parent->barrier); } /* @@ -565,8 +521,7 @@ udp_recv_cb(uv_udp_t *handle, ssize_t nrecv, const uv_buf_t *buf, isc_sockaddr_t sockaddr, *sa = NULL; REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); - REQUIRE(atomic_load(&sock->reading)); + REQUIRE(sock->tid == isc_tid()); /* * When using recvmmsg(2), if no errors occur, there will be a final @@ -583,12 +538,13 @@ udp_recv_cb(uv_udp_t *handle, ssize_t nrecv, const uv_buf_t *buf, #else UNUSED(flags); #endif - /* + * Possible reasons to return now without processing: + * * - If we're simulating a firewall blocking UDP packets * bigger than 'maxudp' bytes for testing purposes. */ - maxudp = atomic_load(&sock->mgr->maxudp); + maxudp = atomic_load(&sock->worker->netmgr->maxudp); if ((maxudp != 0 && (uint32_t)nrecv > maxudp)) { /* * We need to keep the read_cb intact in case, so the @@ -615,6 +571,14 @@ udp_recv_cb(uv_udp_t *handle, ssize_t nrecv, const uv_buf_t *buf, goto free; } + /* + * - If the network manager is shutting down + */ + if (isc__nm_closing(sock->worker)) { + isc__nm_failed_read_cb(sock, ISC_R_SHUTTINGDOWN, false); + goto free; + } + /* * - If the socket is no longer active. */ @@ -673,6 +637,28 @@ free: isc__nm_free_uvbuf(sock, buf); } +static void +udp_send_cb(uv_udp_send_t *req, int status) { + isc_result_t result = ISC_R_SUCCESS; + isc__nm_uvreq_t *uvreq = uv_handle_get_data((uv_handle_t *)req); + isc_nmsocket_t *sock = NULL; + + REQUIRE(VALID_UVREQ(uvreq)); + REQUIRE(VALID_NMHANDLE(uvreq->handle)); + + sock = uvreq->sock; + + REQUIRE(VALID_NMSOCK(sock)); + REQUIRE(sock->tid == isc_tid()); + + if (status < 0) { + result = isc_uverr2result(status); + isc__nm_incstats(sock, STATID_SENDFAIL); + } + + isc__nm_sendcb(sock, uvreq, result, false); +} + /* * Send the data in 'region' to a peer via a UDP socket. We try to find * a proper sibling/child socket so that we won't have to jump to @@ -682,13 +668,19 @@ void isc__nm_udp_send(isc_nmhandle_t *handle, const isc_region_t *region, isc_nm_cb_t cb, void *cbarg) { isc_nmsocket_t *sock = handle->sock; - isc_nmsocket_t *rsock = NULL; - isc_sockaddr_t *peer = &handle->peer; + const isc_sockaddr_t *peer = &handle->peer; + const struct sockaddr *sa = &peer->type.sa; isc__nm_uvreq_t *uvreq = NULL; - uint32_t maxudp = atomic_load(&sock->mgr->maxudp); - int ntid; + isc__networker_t *worker = NULL; + uint32_t maxudp; + int r; - INSIST(sock->type == isc_nm_udpsocket); + REQUIRE(VALID_NMSOCK(sock)); + REQUIRE(sock->type == isc_nm_udpsocket); + REQUIRE(sock->tid == isc_tid()); + + worker = sock->worker; + maxudp = atomic_load(&worker->netmgr->maxudp); /* * We're simulating a firewall blocking UDP packets bigger than @@ -703,31 +695,17 @@ isc__nm_udp_send(isc_nmhandle_t *handle, const isc_region_t *region, return; } - if (atomic_load(&sock->client)) { - /* - * When we are sending from the client socket, we directly use - * the socket provided. - */ - rsock = sock; - goto send; - } else { - /* - * When we are sending from the server socket, we either use the - * socket associated with the network thread we are in, or we - * use the thread from the socket associated with the handle. - */ - INSIST(sock->parent != NULL); - - if (isc__nm_in_netthread()) { - ntid = isc_nm_tid(); - } else { - ntid = sock->tid; - } - rsock = &sock->parent->children[ntid]; + if (isc__nm_closing(worker)) { + cb(handle, ISC_R_SHUTTINGDOWN, cbarg); + return; } -send: - uvreq = isc__nm_uvreq_get(rsock->mgr, rsock); + if (isc__nmsocket_closing(sock)) { + cb(handle, ISC_R_CANCELED, cbarg); + return; + } + + uvreq = isc__nm_uvreq_get(sock->worker, sock); uvreq->uvbuf.base = (char *)region->base; uvreq->uvbuf.len = region->length; @@ -736,90 +714,6 @@ send: uvreq->cb.send = cb; uvreq->cbarg = cbarg; - if (isc_nm_tid() == rsock->tid) { - REQUIRE(rsock->tid == isc_nm_tid()); - isc__netievent_udpsend_t ievent = { .sock = rsock, - .req = uvreq, - .peer = *peer }; - - isc__nm_async_udpsend(NULL, (isc__netievent_t *)&ievent); - } else { - isc__netievent_udpsend_t *ievent = - isc__nm_get_netievent_udpsend(sock->mgr, rsock); - ievent->peer = *peer; - ievent->req = uvreq; - - isc__nm_enqueue_ievent(&sock->mgr->workers[rsock->tid], - (isc__netievent_t *)ievent); - } -} - -/* - * Asynchronous 'udpsend' event handler: send a packet on a UDP socket. - */ -void -isc__nm_async_udpsend(isc__networker_t *worker, isc__netievent_t *ev0) { - isc_result_t result; - isc__netievent_udpsend_t *ievent = (isc__netievent_udpsend_t *)ev0; - isc_nmsocket_t *sock = ievent->sock; - isc__nm_uvreq_t *uvreq = ievent->req; - - REQUIRE(sock->type == isc_nm_udpsocket); - REQUIRE(sock->tid == isc_nm_tid()); - UNUSED(worker); - - if (isc__nmsocket_closing(sock)) { - isc__nm_failed_send_cb(sock, uvreq, ISC_R_CANCELED); - return; - } - - result = udp_send_direct(sock, uvreq, &ievent->peer); - if (result != ISC_R_SUCCESS) { - isc__nm_incstats(sock, STATID_SENDFAIL); - isc__nm_failed_send_cb(sock, uvreq, result); - } -} - -static void -udp_send_cb(uv_udp_send_t *req, int status) { - isc_result_t result = ISC_R_SUCCESS; - isc__nm_uvreq_t *uvreq = uv_handle_get_data((uv_handle_t *)req); - isc_nmsocket_t *sock = NULL; - - REQUIRE(VALID_UVREQ(uvreq)); - REQUIRE(VALID_NMHANDLE(uvreq->handle)); - - sock = uvreq->sock; - - REQUIRE(sock->tid == isc_nm_tid()); - - if (status < 0) { - result = isc_uverr2result(status); - isc__nm_incstats(sock, STATID_SENDFAIL); - } - - isc__nm_sendcb(sock, uvreq, result, false); -} - -/* - * udp_send_direct sends buf to a peer on a socket. Sock has to be in - * the same thread as the callee. - */ -static isc_result_t -udp_send_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req, - isc_sockaddr_t *peer) { - const struct sockaddr *sa = &peer->type.sa; - int r; - - REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(VALID_UVREQ(req)); - REQUIRE(sock->tid == isc_nm_tid()); - REQUIRE(sock->type == isc_nm_udpsocket); - - if (isc__nmsocket_closing(sock)) { - return (ISC_R_CANCELED); - } - #if UV_VERSION_HEX >= UV_VERSION(1, 27, 0) /* * If we used uv_udp_connect() (and not the shim version for @@ -832,46 +726,32 @@ udp_send_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req, } #endif - r = uv_udp_send(&req->uv_req.udp_send, &sock->uv_handle.udp, - &req->uvbuf, 1, sa, udp_send_cb); + r = uv_udp_send(&uvreq->uv_req.udp_send, &sock->uv_handle.udp, + &uvreq->uvbuf, 1, sa, udp_send_cb); if (r < 0) { - return (isc_uverr2result(r)); + isc__nm_incstats(sock, STATID_SENDFAIL); + isc__nm_failed_send_cb(sock, uvreq, isc_uverr2result(r)); } - - return (ISC_R_SUCCESS); } static isc_result_t udp_connect_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) { - isc__networker_t *worker = NULL; int uv_bind_flags = UV_UDP_REUSEADDR; - isc_result_t result = ISC_R_UNSET; int r; + isc__networker_t *worker = sock->worker; - REQUIRE(isc__nm_in_netthread()); - REQUIRE(sock->tid == isc_nm_tid()); - - worker = &sock->mgr->workers[isc_nm_tid()]; - - atomic_store(&sock->connecting, true); - - r = uv_udp_init(&worker->loop, &sock->uv_handle.udp); + r = uv_udp_init(&worker->loop->loop, &sock->uv_handle.udp); UV_RUNTIME_CHECK(uv_udp_init, r); uv_handle_set_data(&sock->uv_handle.handle, sock); - r = uv_timer_init(&worker->loop, &sock->read_timer); + r = uv_timer_init(&worker->loop->loop, &sock->read_timer); UV_RUNTIME_CHECK(uv_timer_init, r); uv_handle_set_data((uv_handle_t *)&sock->read_timer, sock); - if (isc__nm_closing(sock)) { - result = ISC_R_SHUTTINGDOWN; - goto error; - } - r = uv_udp_open(&sock->uv_handle.udp, sock->fd); if (r != 0) { isc__nm_incstats(sock, STATID_OPENFAIL); - goto done; + return (isc_uverr2result(r)); } isc__nm_incstats(sock, STATID_OPEN); @@ -887,10 +767,11 @@ udp_connect_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) { uv_bind_flags); if (r != 0) { isc__nm_incstats(sock, STATID_BINDFAIL); - goto done; + return (isc_uverr2result(r)); } - isc__nm_set_network_buffers(sock->mgr, &sock->uv_handle.handle); + isc__nm_set_network_buffers(sock->worker->netmgr, + &sock->uv_handle.handle); /* * On FreeBSD the UDP connect() call sometimes results in a @@ -903,65 +784,11 @@ udp_connect_direct(isc_nmsocket_t *sock, isc__nm_uvreq_t *req) { } while (r == UV_EADDRINUSE && --req->connect_tries > 0); if (r != 0) { isc__nm_incstats(sock, STATID_CONNECTFAIL); - goto done; + return (isc_uverr2result(r)); } isc__nm_incstats(sock, STATID_CONNECT); - atomic_store(&sock->connecting, false); - atomic_store(&sock->connected, true); - -done: - result = isc_uverr2result(r); -error: - - LOCK(&sock->lock); - sock->result = result; - SIGNAL(&sock->cond); - if (!atomic_load(&sock->active)) { - WAIT(&sock->scond, &sock->lock); - } - INSIST(atomic_load(&sock->active)); - UNLOCK(&sock->lock); - - return (result); -} - -/* - * Asynchronous 'udpconnect' call handler: open a new UDP socket and - * call the 'open' callback with a handle. - */ -void -isc__nm_async_udpconnect(isc__networker_t *worker, isc__netievent_t *ev0) { - isc__netievent_udpconnect_t *ievent = - (isc__netievent_udpconnect_t *)ev0; - isc_nmsocket_t *sock = ievent->sock; - isc__nm_uvreq_t *req = ievent->req; - isc_result_t result; - - UNUSED(worker); - - REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->type == isc_nm_udpsocket); - REQUIRE(sock->parent == NULL); - REQUIRE(sock->tid == isc_nm_tid()); - - result = udp_connect_direct(sock, req); - if (result != ISC_R_SUCCESS) { - atomic_store(&sock->active, false); - isc__nm_udp_close(sock); - isc__nm_connectcb(sock, req, result, true); - } else { - /* - * The callback has to be called after the socket has been - * initialized - */ - isc__nm_connectcb(sock, req, ISC_R_SUCCESS, true); - } - - /* - * The sock is now attached to the handle. - */ - isc__nmsocket_detach(&sock); + return (ISC_R_SUCCESS); } void @@ -969,45 +796,40 @@ isc_nm_udpconnect(isc_nm_t *mgr, isc_sockaddr_t *local, isc_sockaddr_t *peer, isc_nm_cb_t cb, void *cbarg, unsigned int timeout) { isc_result_t result = ISC_R_SUCCESS; isc_nmsocket_t *sock = NULL; - isc__netievent_udpconnect_t *event = NULL; isc__nm_uvreq_t *req = NULL; sa_family_t sa_family; + isc__networker_t *worker = &mgr->workers[isc_tid()]; + uv_os_sock_t fd = -1; REQUIRE(VALID_NM(mgr)); REQUIRE(local != NULL); REQUIRE(peer != NULL); + if (isc__nm_closing(worker)) { + cb(NULL, ISC_R_SHUTTINGDOWN, cbarg); + return; + } + sa_family = peer->type.sa.sa_family; - sock = isc_mem_get(mgr->mctx, sizeof(isc_nmsocket_t)); - isc__nmsocket_init(sock, mgr, isc_nm_udpsocket, local); + result = isc__nm_socket(sa_family, SOCK_DGRAM, 0, &fd); + if (result != ISC_R_SUCCESS) { + cb(NULL, result, cbarg); + return; + } + + /* Initialize the new socket */ + /* FIXME: Use per-worker mempool for new sockets */ + sock = isc_mem_get(worker->mctx, sizeof(isc_nmsocket_t)); + isc__nmsocket_init(sock, worker, isc_nm_udpsocket, local); sock->connect_cb = cb; sock->connect_cbarg = cbarg; sock->read_timeout = timeout; sock->peer = *peer; - sock->result = ISC_R_UNSET; atomic_init(&sock->client, true); - req = isc__nm_uvreq_get(mgr, sock); - req->cb.connect = cb; - req->cbarg = cbarg; - req->peer = *peer; - req->local = *local; - req->handle = isc__nmhandle_get(sock, &req->peer, &sock->iface); - - result = isc__nm_socket(sa_family, SOCK_DGRAM, 0, &sock->fd); - if (result != ISC_R_SUCCESS) { - if (isc__nm_in_netthread()) { - sock->tid = isc_nm_tid(); - } - isc__nmsocket_clearcb(sock); - isc__nm_connectcb(sock, req, result, true); - atomic_store(&sock->closed, true); - isc__nmsocket_detach(&sock); - return; - } - + sock->fd = fd; result = isc__nm_socket_reuse(sock->fd); RUNTIME_CHECK(result == ISC_R_SUCCESS || result == ISC_R_NOTIMPLEMENTED); @@ -1022,27 +844,30 @@ isc_nm_udpconnect(isc_nm_t *mgr, isc_sockaddr_t *local, isc_sockaddr_t *peer, (void)isc__nm_socket_min_mtu(sock->fd, sa_family); - event = isc__nm_get_netievent_udpconnect(mgr, sock, req); + /* Initialize the request */ + req = isc__nm_uvreq_get(worker, sock); + req->cb.connect = cb; + req->cbarg = cbarg; + req->peer = *peer; + req->local = *local; + req->handle = isc__nmhandle_get(sock, &req->peer, &sock->iface); - if (isc__nm_in_netthread()) { - atomic_store(&sock->active, true); - sock->tid = isc_nm_tid(); - isc__nm_async_udpconnect(&mgr->workers[sock->tid], - (isc__netievent_t *)event); - isc__nm_put_netievent_udpconnect(mgr, event); - } else { - atomic_init(&sock->active, false); - sock->tid = isc_random_uniform(mgr->nworkers); - isc__nm_enqueue_ievent(&mgr->workers[sock->tid], - (isc__netievent_t *)event); - } - LOCK(&sock->lock); - while (sock->result == ISC_R_UNSET) { - WAIT(&sock->cond, &sock->lock); - } atomic_store(&sock->active, true); - BROADCAST(&sock->scond); - UNLOCK(&sock->lock); + atomic_store(&sock->connecting, true); + + result = udp_connect_direct(sock, req); + if (result != ISC_R_SUCCESS) { + atomic_store(&sock->active, false); + isc__nm_failed_connect_cb(sock, req, result, true); + isc__nmsocket_detach(&sock); + return; + } + + atomic_store(&sock->connecting, false); + atomic_store(&sock->connected, true); + + isc__nm_connectcb(sock, req, ISC_R_SUCCESS, true); + isc__nmsocket_detach(&sock); } void @@ -1050,30 +875,37 @@ isc__nm_udp_read_cb(uv_udp_t *handle, ssize_t nrecv, const uv_buf_t *buf, const struct sockaddr *addr, unsigned flags) { isc_nmsocket_t *sock = uv_handle_get_data((uv_handle_t *)handle); REQUIRE(VALID_NMSOCK(sock)); + REQUIRE(atomic_load(&sock->client)); + REQUIRE(sock->parent == NULL); - udp_recv_cb(handle, nrecv, buf, addr, flags); /* - * If a caller calls isc_nm_read() on a listening socket, we can - * get here, but we MUST NOT stop reading from the listener - * socket. The only difference between listener and connected - * sockets is that the former has sock->parent set and later - * does not. + * This function can only be reached when calling isc_nm_read() on + * a UDP client socket. There's no point calling isc_nm_read() on + * a UDP listener socket; those are always reading. + * + * The reason why we stop the timer and the reading after calling the + * callback is because there's a time window where a second UDP packet + * might be received between isc__nm_stop_reading() call and + * isc_nm_read() call from the callback and such UDP datagram would be + * lost like tears in the rain. */ - if (!sock->parent) { - isc__nmsocket_timer_stop(sock); - isc__nm_stop_reading(sock); - } + udp_recv_cb(handle, nrecv, buf, addr, flags); + + isc__nmsocket_timer_stop(sock); + isc__nm_stop_reading(sock); } void isc__nm_udp_failed_read_cb(isc_nmsocket_t *sock, isc_result_t result) { REQUIRE(VALID_NMSOCK(sock)); REQUIRE(result != ISC_R_SUCCESS); + REQUIRE(sock->tid == isc_tid()); if (atomic_load(&sock->client)) { isc__nmsocket_timer_stop(sock); isc__nm_stop_reading(sock); + /* Nobody expects the callback if isc_nm_read() wasn't called */ if (!sock->recv_read) { goto destroy; } @@ -1122,32 +954,40 @@ isc__nm_async_udpread(isc__networker_t *worker, isc__netievent_t *ev0) { UNUSED(worker); REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); - if (isc__nm_closing(sock)) { + if (isc__nm_closing(worker)) { result = ISC_R_SHUTTINGDOWN; - } else if (isc__nmsocket_closing(sock)) { - result = ISC_R_CANCELED; - } else { - result = isc__nm_start_reading(sock); + goto fail; } + if (isc__nmsocket_closing(sock)) { + result = ISC_R_CANCELED; + goto fail; + } + + result = isc__nm_start_reading(sock); if (result != ISC_R_SUCCESS) { - atomic_store(&sock->reading, true); - isc__nm_failed_read_cb(sock, result, false); - return; + goto fail; } isc__nmsocket_timer_start(sock); + return; + +fail: + atomic_store(&sock->reading, true); /* required by the next call */ + isc__nm_failed_read_cb(sock, result, false); } void isc__nm_udp_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg) { + isc_nmsocket_t *sock = NULL; + REQUIRE(VALID_NMHANDLE(handle)); - REQUIRE(VALID_NMSOCK(handle->sock)); - isc_nmsocket_t *sock = handle->sock; + sock = handle->sock; + REQUIRE(VALID_NMSOCK(sock)); REQUIRE(sock->type == isc_nm_udpsocket); REQUIRE(sock->statichandle == handle); REQUIRE(!sock->recv_read); @@ -1156,45 +996,25 @@ isc__nm_udp_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg) { sock->recv_cbarg = cbarg; sock->recv_read = true; - if (!atomic_load(&sock->reading) && sock->tid == isc_nm_tid()) { + if (!atomic_load(&sock->reading) && sock->tid == isc_tid()) { isc__netievent_udpread_t ievent = { .sock = sock }; - isc__nm_async_udpread(NULL, (isc__netievent_t *)&ievent); + isc__nm_async_udpread(sock->worker, + (isc__netievent_t *)&ievent); } else { isc__netievent_udpread_t *ievent = - isc__nm_get_netievent_udpread(sock->mgr, sock); - isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], + isc__nm_get_netievent_udpread(sock->worker, sock); + isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent); } } -static void -udp_stop_cb(uv_handle_t *handle) { - isc_nmsocket_t *sock = uv_handle_get_data(handle); - uv_handle_set_data(handle, NULL); - - REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); - REQUIRE(atomic_load(&sock->closing)); - - if (!atomic_compare_exchange_strong(&sock->closed, &(bool){ false }, - true)) { - UNREACHABLE(); - } - - isc__nm_incstats(sock, STATID_CLOSE); - - atomic_store(&sock->listening, false); - - isc__nmsocket_detach(&sock); -} - static void udp_close_cb(uv_handle_t *handle) { isc_nmsocket_t *sock = uv_handle_get_data(handle); uv_handle_set_data(handle, NULL); REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); REQUIRE(atomic_load(&sock->closing)); if (!atomic_compare_exchange_strong(&sock->closed, &(bool){ false }, @@ -1205,13 +1025,20 @@ udp_close_cb(uv_handle_t *handle) { isc__nm_incstats(sock, STATID_CLOSE); if (sock->server != NULL) { + /* server socket (accept) */ isc__nmsocket_detach(&sock->server); } - atomic_store(&sock->connected, false); - atomic_store(&sock->listening, false); - - isc__nmsocket_prep_destroy(sock); + if (sock->parent != NULL) { + /* listening socket (listen) */ + atomic_store(&sock->listening, false); + isc__nmsocket_detach(&sock); + } else { + /* client and server sockets */ + atomic_store(&sock->connected, false); + atomic_store(&sock->listening, false); + isc__nmsocket_prep_destroy(sock); + } } static void @@ -1219,111 +1046,33 @@ read_timer_close_cb(uv_handle_t *handle) { isc_nmsocket_t *sock = uv_handle_get_data(handle); uv_handle_set_data(handle, NULL); - if (sock->parent) { - uv_close(&sock->uv_handle.handle, udp_stop_cb); - } else { - uv_close(&sock->uv_handle.handle, udp_close_cb); - } -} - -static void -stop_udp_child(isc_nmsocket_t *sock) { - REQUIRE(sock->type == isc_nm_udpsocket); - REQUIRE(sock->tid == isc_nm_tid()); - - if (!atomic_compare_exchange_strong(&sock->closing, &(bool){ false }, - true)) { - return; - } - - udp_close_direct(sock); - - atomic_fetch_sub(&sock->parent->rchildren, 1); - - isc_barrier_wait(&sock->parent->stoplistening); -} - -static void -stop_udp_parent(isc_nmsocket_t *sock) { - isc_nmsocket_t *csock = NULL; - - REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); - REQUIRE(sock->type == isc_nm_udplistener); - - isc_barrier_init(&sock->stoplistening, sock->nchildren); - - for (size_t i = 0; i < sock->nchildren; i++) { - csock = &sock->children[i]; - REQUIRE(VALID_NMSOCK(csock)); - - if ((int)i == isc_nm_tid()) { - /* - * We need to schedule closing the other sockets first - */ - continue; - } - - atomic_store(&csock->active, false); - enqueue_stoplistening(csock); - } - - csock = &sock->children[isc_nm_tid()]; - atomic_store(&csock->active, false); - stop_udp_child(csock); - - atomic_store(&sock->closed, true); - isc__nmsocket_prep_destroy(sock); -} - -static void -udp_close_direct(isc_nmsocket_t *sock) { - REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); - - uv_handle_set_data((uv_handle_t *)&sock->read_timer, sock); - uv_close((uv_handle_t *)&sock->read_timer, read_timer_close_cb); -} - -void -isc__nm_async_udpclose(isc__networker_t *worker, isc__netievent_t *ev0) { - isc__netievent_udpclose_t *ievent = (isc__netievent_udpclose_t *)ev0; - isc_nmsocket_t *sock = ievent->sock; - - REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); - UNUSED(worker); - - udp_close_direct(sock); + uv_close(&sock->uv_handle.handle, udp_close_cb); } void isc__nm_udp_close(isc_nmsocket_t *sock) { REQUIRE(VALID_NMSOCK(sock)); REQUIRE(sock->type == isc_nm_udpsocket); - REQUIRE(!isc__nmsocket_active(sock)); + REQUIRE(sock->tid == isc_tid()); if (!atomic_compare_exchange_strong(&sock->closing, &(bool){ false }, true)) { return; } - if (sock->tid == isc_nm_tid()) { - udp_close_direct(sock); - } else { - isc__netievent_udpclose_t *ievent = - isc__nm_get_netievent_udpclose(sock->mgr, sock); - isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)ievent); - } + uv_close((uv_handle_t *)&sock->read_timer, read_timer_close_cb); } void isc__nm_udp_shutdown(isc_nmsocket_t *sock) { + isc__networker_t *worker = NULL; + REQUIRE(VALID_NMSOCK(sock)); - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); REQUIRE(sock->type == isc_nm_udpsocket); + worker = sock->worker; + /* * If the socket is active, mark it inactive and * continue. If it isn't active, stop now. @@ -1346,7 +1095,7 @@ isc__nm_udp_shutdown(isc_nmsocket_t *sock) { * interested in the callback. */ if (sock->statichandle != NULL) { - if (isc__nm_closing(sock)) { + if (isc__nm_closing(worker)) { isc__nm_failed_read_cb(sock, ISC_R_SHUTTINGDOWN, false); } else { isc__nm_failed_read_cb(sock, ISC_R_CANCELED, false); @@ -1354,12 +1103,17 @@ isc__nm_udp_shutdown(isc_nmsocket_t *sock) { return; } + /* + * Ignore the listening sockets + */ + if (sock->parent != NULL) { + return; + } + /* * Otherwise, we just send the socket to abyss... */ - if (sock->parent == NULL) { - isc__nmsocket_prep_destroy(sock); - } + isc__nmsocket_prep_destroy(sock); } void @@ -1374,10 +1128,9 @@ isc__nm_udp_cancelread(isc_nmhandle_t *handle) { REQUIRE(VALID_NMSOCK(sock)); REQUIRE(sock->type == isc_nm_udpsocket); - ievent = isc__nm_get_netievent_udpcancel(sock->mgr, sock, handle); + ievent = isc__nm_get_netievent_udpcancel(sock->worker, sock, handle); - isc__nm_enqueue_ievent(&sock->mgr->workers[sock->tid], - (isc__netievent_t *)ievent); + isc__nm_enqueue_ievent(sock->worker, (isc__netievent_t *)ievent); } void @@ -1391,7 +1144,7 @@ isc__nm_async_udpcancel(isc__networker_t *worker, isc__netievent_t *ev0) { sock = ievent->sock; - REQUIRE(sock->tid == isc_nm_tid()); + REQUIRE(sock->tid == isc_tid()); REQUIRE(atomic_load(&sock->client)); isc__nm_failed_read_cb(sock, ISC_R_EOF, false); diff --git a/lib/isc/netmgr_p.h b/lib/isc/netmgr_p.h deleted file mode 100644 index 73171a98c3..0000000000 --- a/lib/isc/netmgr_p.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (C) Internet Systems Consortium, Inc. ("ISC") - * - * SPDX-License-Identifier: MPL-2.0 - * - * This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at https://mozilla.org/MPL/2.0/. - * - * See the COPYRIGHT file distributed with this work for additional - * information regarding copyright ownership. - */ - -#pragma once - -#include -#include - -void -isc__netmgr_create(isc_mem_t *mctx, uint32_t workers, isc_nm_t **netgmrp); -/*%< - * Creates a new network manager with 'workers' worker threads, - * and starts it running. - */ - -void -isc__netmgr_destroy(isc_nm_t **netmgrp); -/*%< - * Similar to isc_nm_detach(), but actively waits for all other references - * to be gone before returning. - */ - -void -isc__netmgr_shutdown(isc_nm_t *mgr); -/*%< - * Shut down all active connections, freeing associated resources; - * prevent new connections from being established. - */ diff --git a/lib/isc/task.c b/lib/isc/task.c index 729e7712a9..314fa56ebb 100644 --- a/lib/isc/task.c +++ b/lib/isc/task.c @@ -13,20 +13,17 @@ /*! \file */ -/* - * XXXRTH Need to document the states a task can be in, and the rules - * for changing states. - */ - #include #include -#include +#include #include #include #include #include +#include #include +#include #include #include #include @@ -36,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -49,7 +47,7 @@ #include #endif /* HAVE_JSON_C */ -#include "task_p.h" +#include "loop_p.h" /* * Task manager is built around 'as little locking as possible' concept. @@ -63,11 +61,13 @@ */ #ifdef ISC_TASK_TRACE -#define XTRACE(m) \ - fprintf(stderr, "task %p thread %zu: %s\n", task, isc_tid_v, (m)) +#define XTRACE(m) \ + fprintf(stderr, "task %p.tid %zu thread %zu: %s\n", task, \ + (size_t)task->tid, (size_t)task->tid, (m)) #define XTTRACE(t, m) \ - fprintf(stderr, "task %p thread %zu: %s\n", (t), isc_tid_v, (m)) -#define XTHREADTRACE(m) fprintf(stderr, "thread %zu: %s\n", isc_tid_v, (m)) + fprintf(stderr, "task %p thread %zu: %s\n", (t), (size_t)isc_tid(), (m)) +#define XTHREADTRACE(m) \ + fprintf(stderr, "thread %zu: %s\n", (size_t)isc_tid(), (m)) #else /* ifdef ISC_TASK_TRACE */ #define XTRACE(m) #define XTTRACE(t, m) @@ -108,18 +108,16 @@ struct isc_task { isc_taskmgr_t *manager; isc_mutex_t lock; /* Locked by task lock. */ - int tid; + isc_loop_t *loop; + uint32_t tid; task_state_t state; isc_refcount_t references; isc_eventlist_t events; unsigned int nevents; - unsigned int quantum; isc_stdtime_t now; isc_time_t tnow; char name[16]; void *tag; - /* Protected by atomics */ - atomic_bool shuttingdown; /* Locked by task manager lock. */ #if TASKMGR_TRACE char func[PATH_MAX]; @@ -128,11 +126,16 @@ struct isc_task { void *backtrace[ISC__TASKTRACE_SIZE]; int backtrace_size; #endif + LINK(isc_task_t) qlink; LINK(isc_task_t) link; }; -#define TASK_MANAGER_MAGIC ISC_MAGIC('T', 'S', 'K', 'M') -#define VALID_MANAGER(m) ISC_MAGIC_VALID(m, TASK_MANAGER_MAGIC) +#define TASK_SHUTTINGDOWN(t) (atomic_load_acquire(&(t)->manager->shuttingdown)) + +#define TASK_TASKMGR_MAGIC ISC_MAGIC('T', 'S', 'K', 'M') +#define VALID_TASKMGR(m) ISC_MAGIC_VALID(m, TASK_TASKMGR_MAGIC) + +typedef ISC_LIST(isc_task_t) isc_tasklist_t; struct isc_taskmgr { /* Not locked. */ @@ -140,80 +143,84 @@ struct isc_taskmgr { isc_refcount_t references; isc_mem_t *mctx; isc_mutex_t lock; - atomic_uint_fast32_t tasks_count; - isc_nm_t *netmgr; - uint32_t nworkers; + isc_loopmgr_t *loopmgr; + uint32_t nloops; /* Locked by task manager lock. */ - unsigned int default_quantum; - LIST(isc_task_t) tasks; + isc_mutex_t *locks; + isc_tasklist_t *tasks; atomic_uint_fast32_t mode; - atomic_bool exclusive_req; - bool exiting; + uint32_t exclusive_req; + atomic_bool shuttingdown; isc_task_t *excl; }; -#define DEFAULT_DEFAULT_QUANTUM 25 - -/*% - * The following are intended for internal use (indicated by "isc__" - * prefix) but are not declared as static, allowing direct access from - * unit tests etc. - */ - -bool -isc_task_purgeevent(isc_task_t *task, isc_event_t *event); -void -isc_taskmgr_setexcltask(isc_taskmgr_t *mgr, isc_task_t *task); -isc_result_t -isc_taskmgr_excltask(isc_taskmgr_t *mgr, isc_task_t **taskp); +static void +task_setstate(isc_task_t *task, task_state_t state); /*** *** Tasks. ***/ static void -task_destroy(isc_task_t *task) { - isc_taskmgr_t *manager = task->manager; - isc_mem_t *mctx = manager->mctx; +task_destroy(void *arg) { + isc_task_t *task = arg; + isc_loop_t *loop = task->loop; + isc_taskmgr_t *taskmgr = task->manager; REQUIRE(EMPTY(task->events)); - REQUIRE(task->nevents == 0); - REQUIRE(task->state == task_state_done); XTRACE("task_finished"); + task_setstate(task, task_state_done); + isc_refcount_destroy(&task->references); - LOCK(&manager->lock); - UNLINK(manager->tasks, task, link); - atomic_fetch_sub(&manager->tasks_count, 1); - UNLOCK(&manager->lock); + LOCK(&taskmgr->locks[task->tid]); + UNLINK(taskmgr->tasks[task->tid], task, link); + UNLOCK(&taskmgr->locks[task->tid]); isc_mutex_destroy(&task->lock); task->magic = 0; - isc_mem_put(mctx, task, sizeof(*task)); - isc_taskmgr_detach(&manager); + isc_mem_put(loop->mctx, task, sizeof(*task)); + + isc_taskmgr_detach(&taskmgr); + + isc_loop_detach(&loop); } +ISC_REFCOUNT_IMPL(isc_task, task_destroy); + +static isc_result_t +task_run(isc_task_t *task); +static void +task_ready(isc_task_t *task); +static void +task__run(void *arg); isc_result_t -isc__task_create(isc_taskmgr_t *manager, unsigned int quantum, - isc_task_t **taskp, int tid ISC__TASKFLARG) { +isc__task_create(isc_taskmgr_t *taskmgr, isc_task_t **taskp, + int tid ISC__TASKFLARG) { isc_task_t *task = NULL; - bool exiting; + isc_loop_t *loop = NULL; - REQUIRE(VALID_MANAGER(manager)); + REQUIRE(VALID_TASKMGR(taskmgr)); REQUIRE(taskp != NULL && *taskp == NULL); - REQUIRE(tid >= 0 && tid < (int)manager->nworkers); + REQUIRE(tid >= 0 && tid < (int)taskmgr->nloops); - XTRACE("isc_task_create"); + if (atomic_load(&taskmgr->shuttingdown)) { + return (ISC_R_SHUTTINGDOWN); + } - task = isc_mem_get(manager->mctx, sizeof(*task)); + loop = isc_loop_get(taskmgr->loopmgr, tid); + + task = isc_mem_get(loop->mctx, sizeof(*task)); *task = (isc_task_t){ - .state = task_state_idle, .tid = tid, + .state = task_state_idle, }; + isc_loop_attach(loop, &task->loop); + #if TASKMGR_TRACE strlcpy(task->func, func, sizeof(task->func)); strlcpy(task->file, file, sizeof(task->file)); @@ -222,55 +229,73 @@ isc__task_create(isc_taskmgr_t *manager, unsigned int quantum, ISC__TASKTRACE_SIZE); #endif - isc_taskmgr_attach(manager, &task->manager); + isc_taskmgr_attach(taskmgr, &task->manager); isc_mutex_init(&task->lock); isc_refcount_init(&task->references, 1); + INIT_LIST(task->events); - task->quantum = (quantum > 0) ? quantum : manager->default_quantum; - atomic_init(&task->shuttingdown, false); + isc_time_settoepoch(&task->tnow); - memset(task->name, 0, sizeof(task->name)); + INIT_LINK(task, link); + INIT_LINK(task, qlink); + task->magic = TASK_MAGIC; - LOCK(&manager->lock); - exiting = manager->exiting; - if (!exiting) { - APPEND(manager->tasks, task, link); - atomic_fetch_add(&manager->tasks_count, 1); - } - UNLOCK(&manager->lock); - - if (exiting) { - isc_refcount_decrement(&task->references); - isc_refcount_destroy(&task->references); - isc_mutex_destroy(&task->lock); - isc_taskmgr_detach(&task->manager); - isc_mem_put(manager->mctx, task, sizeof(*task)); - return (ISC_R_SHUTTINGDOWN); - } + LOCK(&taskmgr->locks[task->tid]); + APPEND(taskmgr->tasks[task->tid], task, link); + UNLOCK(&taskmgr->locks[task->tid]); *taskp = task; return (ISC_R_SUCCESS); } -void -isc_task_attach(isc_task_t *source, isc_task_t **targetp) { - /* - * Attach *targetp to source. - */ +static void +task_setstate(isc_task_t *task, task_state_t state) { + switch (state) { + case task_state_idle: + INSIST(task->state == task_state_running); + break; + case task_state_ready: + if (task->state == task_state_idle) { + INSIST(EMPTY(task->events)); + } else { + INSIST(task->state == task_state_running); + } + break; + case task_state_running: + INSIST(task->state == task_state_ready); + break; + case task_state_done: + INSIST(task->state == task_state_ready || + task->state == task_state_running || + task->state == task_state_idle); + break; + default: + UNREACHABLE(); + } - REQUIRE(VALID_TASK(source)); - REQUIRE(targetp != NULL && *targetp == NULL); + task->state = state; +} - XTTRACE(source, "isc_task_attach"); +static void +task__run(void *arg) { + isc_task_t *task = arg; + isc_result_t result = task_run(task); - isc_refcount_increment(&source->references); - - *targetp = source; + switch (result) { + case ISC_R_QUOTA: + task_ready(task); + break; + case ISC_R_SUCCESS: + case ISC_R_NOMORE: + break; + default: + UNREACHABLE(); + } } /* @@ -280,41 +305,7 @@ isc_task_attach(isc_task_t *source, isc_task_t **targetp) { */ static void task_ready(isc_task_t *task) { - isc_taskmgr_t *manager = task->manager; - REQUIRE(VALID_MANAGER(manager)); - - XTRACE("task_ready"); - - isc_task_attach(task, &(isc_task_t *){ NULL }); - LOCK(&task->lock); - isc_nm_task_enqueue(manager->netmgr, task, task->tid); - UNLOCK(&task->lock); -} - -void -isc_task_ready(isc_task_t *task) { - task_ready(task); -} - -void -isc_task_detach(isc_task_t **taskp) { - isc_task_t *task; - - REQUIRE(taskp != NULL); - REQUIRE(VALID_TASK(*taskp)); - - task = *taskp; - *taskp = NULL; - - XTRACE("isc_task_detach"); - - if (isc_refcount_decrement(&task->references) == 1) { - LOCK(&task->lock); - task->state = task_state_done; - UNLOCK(&task->lock); - - task_destroy(task); - } + isc_async_run(task->loop, task__run, task); } static bool @@ -338,13 +329,12 @@ task_send(isc_task_t *task, isc_event_t **eventp) { if (task->state == task_state_idle) { was_idle = true; - INSIST(EMPTY(task->events)); - task->state = task_state_ready; + task_setstate(task, task_state_ready); + isc_task_attach(task, &(isc_task_t *){ NULL }); } INSIST(task->state == task_state_ready || task->state == task_state_running); ENQUEUE(task->events, event, ev_link); - task->nevents++; return (was_idle); } @@ -391,11 +381,13 @@ isc_task_send(isc_task_t *task, isc_event_t **eventp) { void isc_task_sendanddetach(isc_task_t **taskp, isc_event_t **eventp) { - isc_task_t *task; + isc_task_t *task = NULL; REQUIRE(taskp != NULL); + task = *taskp; *taskp = NULL; + REQUIRE(VALID_TASK(task)); XTRACE("isc_task_sendanddetach"); @@ -403,42 +395,6 @@ isc_task_sendanddetach(isc_task_t **taskp, isc_event_t **eventp) { isc_task_detach(&task); } -bool -isc_task_purgeevent(isc_task_t *task, isc_event_t *event) { - bool found = false; - - /* - * Purge 'event' from a task's event queue. - */ - - REQUIRE(VALID_TASK(task)); - - /* - * If 'event' is on the task's event queue, it will be purged, 'event' - * does not have to be on the task's event queue; in fact, it can even - * be an invalid pointer. Purging only occurs if the event is actually - * on the task's event queue. - * - * Purging never changes the state of the task. - */ - - LOCK(&task->lock); - if (ISC_LINK_LINKED(event, ev_link)) { - DEQUEUE(task->events, event, ev_link); - task->nevents--; - found = true; - } - UNLOCK(&task->lock); - - if (!found) { - return (false); - } - - isc_event_free(&event); - - return (true); -} - void isc_task_setname(isc_task_t *task, const char *name, void *tag) { /* @@ -453,6 +409,13 @@ isc_task_setname(isc_task_t *task, const char *name, void *tag) { UNLOCK(&task->lock); } +isc_loopmgr_t * +isc_task_getloopmgr(isc_task_t *task) { + REQUIRE(VALID_TASK(task)); + + return (task->manager->loopmgr); +} + const char * isc_task_getname(isc_task_t *task) { REQUIRE(VALID_TASK(task)); @@ -467,126 +430,99 @@ isc_task_gettag(isc_task_t *task) { return (task->tag); } -isc_nm_t * -isc_task_getnetmgr(isc_task_t *task) { - REQUIRE(VALID_TASK(task)); - - return (task->manager->netmgr); -} - -void -isc_task_setquantum(isc_task_t *task, unsigned int quantum) { - REQUIRE(VALID_TASK(task)); - - LOCK(&task->lock); - task->quantum = (quantum > 0) ? quantum - : task->manager->default_quantum; - UNLOCK(&task->lock); -} - /*** *** Task Manager. ***/ static isc_result_t task_run(isc_task_t *task) { - unsigned int dispatch_count = 0; isc_event_t *event = NULL; - isc_result_t result = ISC_R_SUCCESS; - uint32_t quantum; + isc_result_t result = ISC_R_UNSET; + isc_eventlist_t events; REQUIRE(VALID_TASK(task)); LOCK(&task->lock); - quantum = task->quantum; - if (task->state != task_state_ready) { - goto done; - } + ISC_LIST_INIT(events); + ISC_LIST_MOVE(events, task->events); - INSIST(task->state == task_state_ready); - task->state = task_state_running; + REQUIRE(task->state == task_state_ready); + + task_setstate(task, task_state_running); XTRACE("running"); XTRACE(task->name); TIME_NOW(&task->tnow); task->now = isc_time_seconds(&task->tnow); + UNLOCK(&task->lock); - while (true) { - if (!EMPTY(task->events)) { - event = HEAD(task->events); - DEQUEUE(task->events, event, ev_link); - task->nevents--; + event = ISC_LIST_HEAD(events); + while (event != NULL) { + isc_event_t *next = ISC_LIST_NEXT(event, ev_link); + ISC_LIST_UNLINK(events, event, ev_link); - /* - * Execute the event action. - */ - XTRACE("execute action"); - XTRACE(task->name); - if (event->ev_action != NULL) { - UNLOCK(&task->lock); - (event->ev_action)(task, event); - LOCK(&task->lock); - } - XTRACE("execution complete"); - dispatch_count++; + /* + * Execute the event action. + */ + XTRACE("execute action"); + XTRACE(task->name); + if (event->ev_action != NULL) { + (event->ev_action)(task, event); } + XTRACE("execution complete"); - if (EMPTY(task->events)) { - /* - * Nothing else to do for this task right now. - */ - XTRACE("empty"); - if (isc_refcount_current(&task->references) == 0) { - /* - * The task is done. - */ - XTRACE("done"); - task->state = task_state_done; - } else if (task->state == task_state_running) { - XTRACE("idling"); - task->state = task_state_idle; - } - break; - } else if (dispatch_count >= quantum) { - /* - * Our quantum has expired, but there is more work to be - * done. We'll requeue it to the ready queue later. - * - * We don't check quantum until dispatching at least one - * event, so the minimum quantum is one. - */ - XTRACE("quantum"); - task->state = task_state_ready; - result = ISC_R_QUOTA; - break; - } + event = next; } -done: + LOCK(&task->lock); + if (EMPTY(task->events)) { + /* + * Nothing else to do for this task right now. + */ + XTRACE("empty"); + XTRACE("idling"); + task_setstate(task, task_state_idle); + + result = ISC_R_SUCCESS; + } else { + /* + * More tasks were scheduled. + */ + XTRACE("quantum"); + task_setstate(task, task_state_ready); + result = ISC_R_QUOTA; + } UNLOCK(&task->lock); - isc_task_detach(&task); + + if (result == ISC_R_SUCCESS) { + isc_task_detach(&task); + } return (result); } -isc_result_t -isc_task_run(isc_task_t *task) { - return (task_run(task)); -} - static void -manager_free(isc_taskmgr_t *manager) { - isc_refcount_destroy(&manager->references); - isc_nm_detach(&manager->netmgr); +taskmgr_destroy(isc_taskmgr_t *taskmgr) { + taskmgr->magic = 0; - isc_mutex_destroy(&manager->lock); - manager->magic = 0; - isc_mem_putanddetach(&manager->mctx, manager, sizeof(*manager)); + for (size_t tid = 0; tid < taskmgr->nloops; tid++) { + INSIST(EMPTY(taskmgr->tasks[tid])); + isc_mutex_destroy(&taskmgr->locks[tid]); + } + + isc_mem_put(taskmgr->mctx, taskmgr->tasks, + taskmgr->nloops * sizeof(taskmgr->tasks[0])); + isc_mem_put(taskmgr->mctx, taskmgr->locks, + taskmgr->nloops * sizeof(taskmgr->locks[0])); + + isc_refcount_destroy(&taskmgr->references); + isc_mutex_destroy(&taskmgr->lock); + isc_mem_putanddetach(&taskmgr->mctx, taskmgr, sizeof(*taskmgr)); } void isc_taskmgr_attach(isc_taskmgr_t *source, isc_taskmgr_t **targetp) { - REQUIRE(VALID_MANAGER(source)); + REQUIRE(VALID_TASKMGR(source)); REQUIRE(targetp != NULL && *targetp == NULL); isc_refcount_increment(&source->references); @@ -597,115 +533,107 @@ isc_taskmgr_attach(isc_taskmgr_t *source, isc_taskmgr_t **targetp) { void isc_taskmgr_detach(isc_taskmgr_t **managerp) { REQUIRE(managerp != NULL); - REQUIRE(VALID_MANAGER(*managerp)); + REQUIRE(VALID_TASKMGR(*managerp)); isc_taskmgr_t *manager = *managerp; *managerp = NULL; if (isc_refcount_decrement(&manager->references) == 1) { - manager_free(manager); + taskmgr_destroy(manager); } } -isc_result_t -isc__taskmgr_create(isc_mem_t *mctx, unsigned int default_quantum, isc_nm_t *nm, - isc_taskmgr_t **managerp) { - isc_taskmgr_t *manager; +static void +taskmgr_teardown(void *arg) { + isc_taskmgr_t *taskmgr = (void *)arg; + uint32_t tid = isc_tid(); + isc_task_t *excl = NULL; + + REQUIRE(VALID_TASKMGR(taskmgr)); + + atomic_store(&taskmgr->shuttingdown, true); + + isc_log_write(isc_lctx, ISC_LOGCATEGORY_GENERAL, ISC_LOGMODULE_NETMGR, + ISC_LOG_DEBUG(1), "Shutting down task manager"); + + LOCK(&taskmgr->lock); + if (taskmgr->excl != NULL && taskmgr->excl->tid == tid) { + XTTRACE(taskmgr->excl, "taskmgr_teardown: excl"); + excl = taskmgr->excl; + taskmgr->excl = NULL; + } + UNLOCK(&taskmgr->lock); + if (excl != NULL) { + isc_task_detach(&excl); + } +} + +void +isc_taskmgr_create(isc_mem_t *mctx, isc_loopmgr_t *loopmgr, + isc_taskmgr_t **taskmgrp) { + isc_taskmgr_t *taskmgr = NULL; /* * Create a new task manager. */ - REQUIRE(managerp != NULL && *managerp == NULL); - REQUIRE(nm != NULL); + REQUIRE(taskmgrp != NULL && *taskmgrp == NULL); - manager = isc_mem_get(mctx, sizeof(*manager)); - *manager = (isc_taskmgr_t){ .magic = TASK_MANAGER_MAGIC }; + taskmgr = isc_mem_get(mctx, sizeof(*taskmgr)); + *taskmgr = (isc_taskmgr_t){ + .loopmgr = loopmgr, + .magic = TASK_TASKMGR_MAGIC, + .nloops = isc_loopmgr_nloops(loopmgr), + }; - isc_mutex_init(&manager->lock); + isc_mem_attach(mctx, &taskmgr->mctx); - if (default_quantum == 0) { - default_quantum = DEFAULT_DEFAULT_QUANTUM; + isc_mutex_init(&taskmgr->lock); + + taskmgr->tasks = isc_mem_get( + taskmgr->mctx, taskmgr->nloops * sizeof(taskmgr->tasks[0])); + taskmgr->locks = isc_mem_get( + taskmgr->mctx, taskmgr->nloops * sizeof(taskmgr->locks[0])); + + for (size_t tid = 0; tid < taskmgr->nloops; tid++) { + isc_mutex_init(&taskmgr->locks[tid]); + ISC_LIST_INIT(taskmgr->tasks[tid]); } - manager->default_quantum = default_quantum; - isc_nm_attach(nm, &manager->netmgr); - manager->nworkers = isc_nm_getnworkers(nm); + isc_loopmgr_teardown(loopmgr, taskmgr_teardown, taskmgr); - INIT_LIST(manager->tasks); - atomic_init(&manager->exclusive_req, false); - atomic_init(&manager->tasks_count, 0); + isc_refcount_init(&taskmgr->references, 1); - isc_mem_attach(mctx, &manager->mctx); - - isc_refcount_init(&manager->references, 1); - - *managerp = manager; - - return (ISC_R_SUCCESS); + *taskmgrp = taskmgr; } void -isc__taskmgr_shutdown(isc_taskmgr_t *manager) { - isc_task_t *task = NULL; +isc_taskmgr_destroy(isc_taskmgr_t **managerp) { + isc_taskmgr_t *manager = NULL; + uint_fast32_t refs; - REQUIRE(VALID_MANAGER(manager)); - - XTHREADTRACE("isc_taskmgr_shutdown"); - /* - * Only one non-worker thread may ever call this routine. - * If a worker thread wants to initiate shutdown of the - * task manager, it should ask some non-worker thread to call - * isc_taskmgr_destroy(), e.g. by signalling a condition variable - * that the startup thread is sleeping on. - */ - LOCK(&manager->lock); - if (manager->excl != NULL) { - task = manager->excl; - manager->excl = NULL; - } - - /* - * Make sure we only get called once. - */ - INSIST(manager->exiting == false); - manager->exiting = true; - - UNLOCK(&manager->lock); - - if (task != NULL) { - isc_task_detach(&task); - } -} - -void -isc__taskmgr_destroy(isc_taskmgr_t **managerp) { - REQUIRE(managerp != NULL && VALID_MANAGER(*managerp)); + REQUIRE(managerp != NULL && VALID_TASKMGR(*managerp)); XTHREADTRACE("isc_taskmgr_destroy"); - int counter = 0; - while (isc_refcount_current(&(*managerp)->references) > 1 && - counter++ < 1000) { - uv_sleep(10); - } + manager = *managerp; + *managerp = NULL; + /* + * The isc_loopmgr is not running, there's nothing that can finish now + */ + refs = isc_refcount_decrement(&manager->references); #if TASKMGR_TRACE - if (isc_refcount_current(&(*managerp)->references) > 1) { + if (refs > 1) { isc__taskmgr_dump_active(*managerp); } - INSIST(isc_refcount_current(&(*managerp)->references) == 1); #endif - - while (isc_refcount_current(&(*managerp)->references) > 1) { - uv_sleep(10); - } - - isc_taskmgr_detach(managerp); + INSIST(refs == 1); + taskmgr_destroy(manager); } void isc_taskmgr_setexcltask(isc_taskmgr_t *mgr, isc_task_t *task) { - REQUIRE(VALID_MANAGER(mgr)); + REQUIRE(VALID_TASKMGR(mgr)); REQUIRE(VALID_TASK(task)); LOCK(&task->lock); @@ -724,15 +652,17 @@ isc_result_t isc_taskmgr_excltask(isc_taskmgr_t *mgr, isc_task_t **taskp) { isc_result_t result; - REQUIRE(VALID_MANAGER(mgr)); + REQUIRE(VALID_TASKMGR(mgr)); REQUIRE(taskp != NULL && *taskp == NULL); + if (atomic_load(&mgr->shuttingdown)) { + return (ISC_R_SHUTTINGDOWN); + } + LOCK(&mgr->lock); if (mgr->excl != NULL) { isc_task_attach(mgr->excl, taskp); result = ISC_R_SUCCESS; - } else if (mgr->exiting) { - result = ISC_R_SHUTTINGDOWN; } else { result = ISC_R_NOTFOUND; } @@ -741,9 +671,10 @@ isc_taskmgr_excltask(isc_taskmgr_t *mgr, isc_task_t **taskp) { return (result); } -isc_result_t +void isc_task_beginexclusive(isc_task_t *task) { isc_taskmgr_t *manager; + bool first; REQUIRE(VALID_TASK(task)); @@ -753,13 +684,12 @@ isc_task_beginexclusive(isc_task_t *task) { LOCK(&manager->lock); REQUIRE(task == manager->excl || - (manager->exiting && manager->excl == NULL)); + (atomic_load(&manager->shuttingdown) && manager->excl == NULL)); + first = (manager->exclusive_req++ == 0); UNLOCK(&manager->lock); - if (!atomic_compare_exchange_strong(&manager->exclusive_req, - &(bool){ false }, true)) - { - return (ISC_R_LOCKBUSY); + if (!first) { + return; } if (isc_log_wouldlog(isc_lctx, ISC_LOG_DEBUG(1))) { @@ -768,42 +698,47 @@ isc_task_beginexclusive(isc_task_t *task) { "exclusive task mode: %s", "starting"); } - isc_nm_pause(manager->netmgr); + isc_loopmgr_pause(manager->loopmgr); if (isc_log_wouldlog(isc_lctx, ISC_LOG_DEBUG(1))) { isc_log_write(isc_lctx, ISC_LOGCATEGORY_GENERAL, ISC_LOGMODULE_OTHER, ISC_LOG_DEBUG(1), "exclusive task mode: %s", "started"); } - - return (ISC_R_SUCCESS); } void isc_task_endexclusive(isc_task_t *task) { isc_taskmgr_t *manager = NULL; + bool last; REQUIRE(VALID_TASK(task)); REQUIRE(task->state == task_state_running); manager = task->manager; + LOCK(&manager->lock); + INSIST(manager->exclusive_req > 0); + last = (--manager->exclusive_req == 0); + UNLOCK(&manager->lock); + + if (!last) { + return; + } + if (isc_log_wouldlog(isc_lctx, ISC_LOG_DEBUG(1))) { isc_log_write(isc_lctx, ISC_LOGCATEGORY_GENERAL, ISC_LOGMODULE_OTHER, ISC_LOG_DEBUG(1), "exclusive task mode: %s", "ending"); } - isc_nm_resume(manager->netmgr); + isc_loopmgr_resume(manager->loopmgr); if (isc_log_wouldlog(isc_lctx, ISC_LOG_DEBUG(1))) { isc_log_write(isc_lctx, ISC_LOGCATEGORY_GENERAL, ISC_LOGMODULE_OTHER, ISC_LOG_DEBUG(1), "exclusive task mode: %s", "ended"); } - - atomic_compare_exchange_enforced(&manager->exclusive_req, - &(bool){ true }, false); } #ifdef HAVE_LIBXML2 @@ -821,66 +756,48 @@ isc_taskmgr_renderxml(isc_taskmgr_t *mgr, void *writer0) { LOCK(&mgr->lock); - /* - * Write out the thread-model, and some details about each depending - * on which type is enabled. - */ - TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "thread-model")); - TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "type")); - TRY0(xmlTextWriterWriteString(writer, ISC_XMLCHAR "threaded")); - TRY0(xmlTextWriterEndElement(writer)); /* type */ - - TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "default-quantum")); - TRY0(xmlTextWriterWriteFormatString(writer, "%d", - mgr->default_quantum)); - TRY0(xmlTextWriterEndElement(writer)); /* default-quantum */ - - TRY0(xmlTextWriterEndElement(writer)); /* thread-model */ - TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "tasks")); - task = ISC_LIST_HEAD(mgr->tasks); - while (task != NULL) { - LOCK(&task->lock); - TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "task")); - - if (task->name[0] != 0) { + for (size_t tid = 0; tid < mgr->nloops; tid++) { + for (task = ISC_LIST_HEAD(mgr->tasks[tid]); task != NULL; + task = ISC_LIST_NEXT(task, link)) + { + LOCK(&task->lock); TRY0(xmlTextWriterStartElement(writer, - ISC_XMLCHAR "name")); - TRY0(xmlTextWriterWriteFormatString(writer, "%s", - task->name)); - TRY0(xmlTextWriterEndElement(writer)); /* name */ - } + ISC_XMLCHAR "task")); - TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "reference" + if (task->name[0] != 0) { + TRY0(xmlTextWriterStartElement( + writer, ISC_XMLCHAR "name")); + TRY0(xmlTextWriterWriteFormatString( + writer, "%s", task->name)); + TRY0(xmlTextWriterEndElement(writer)); /* name + */ + } + + TRY0(xmlTextWriterStartElement(writer, + ISC_XMLCHAR "reference" "s")); - TRY0(xmlTextWriterWriteFormatString( - writer, "%" PRIuFAST32, - isc_refcount_current(&task->references))); - TRY0(xmlTextWriterEndElement(writer)); /* references */ + TRY0(xmlTextWriterWriteFormatString( + writer, "%" PRIuFAST32, + isc_refcount_current(&task->references))); + TRY0(xmlTextWriterEndElement(writer)); /* references */ - TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "id")); - TRY0(xmlTextWriterWriteFormatString(writer, "%p", task)); - TRY0(xmlTextWriterEndElement(writer)); /* id */ + TRY0(xmlTextWriterStartElement(writer, + ISC_XMLCHAR "id")); + TRY0(xmlTextWriterWriteFormatString(writer, "%p", + task)); + TRY0(xmlTextWriterEndElement(writer)); /* id */ - TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "state")); - TRY0(xmlTextWriterWriteFormatString(writer, "%s", - statenames[task->state])); - TRY0(xmlTextWriterEndElement(writer)); /* state */ + TRY0(xmlTextWriterStartElement(writer, + ISC_XMLCHAR "state")); + TRY0(xmlTextWriterWriteFormatString( + writer, "%s", statenames[task->state])); + TRY0(xmlTextWriterEndElement(writer)); /* state */ - TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "quantum")); - TRY0(xmlTextWriterWriteFormatString(writer, "%d", - task->quantum)); - TRY0(xmlTextWriterEndElement(writer)); /* quantum */ + TRY0(xmlTextWriterEndElement(writer)); - TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "events")); - TRY0(xmlTextWriterWriteFormatString(writer, "%d", - task->nevents)); - TRY0(xmlTextWriterEndElement(writer)); /* events */ - - TRY0(xmlTextWriterEndElement(writer)); - - UNLOCK(&task->lock); - task = ISC_LIST_NEXT(task, link); + UNLOCK(&task->lock); + } } TRY0(xmlTextWriterEndElement(writer)); /* tasks */ @@ -912,61 +829,43 @@ isc_taskmgr_renderjson(isc_taskmgr_t *mgr, void *tasks0) { LOCK(&mgr->lock); - /* - * Write out the thread-model, and some details about each depending - * on which type is enabled. - */ - obj = json_object_new_string("threaded"); - CHECKMEM(obj); - json_object_object_add(tasks, "thread-model", obj); - - obj = json_object_new_int(mgr->default_quantum); - CHECKMEM(obj); - json_object_object_add(tasks, "default-quantum", obj); - array = json_object_new_array(); CHECKMEM(array); - for (task = ISC_LIST_HEAD(mgr->tasks); task != NULL; - task = ISC_LIST_NEXT(task, link)) - { - char buf[255]; + for (size_t tid = 0; tid < mgr->nloops; tid++) { + for (task = ISC_LIST_HEAD(mgr->tasks[tid]); task != NULL; + task = ISC_LIST_NEXT(task, link)) + { + char buf[255]; - LOCK(&task->lock); + LOCK(&task->lock); - taskobj = json_object_new_object(); - CHECKMEM(taskobj); - json_object_array_add(array, taskobj); + taskobj = json_object_new_object(); + CHECKMEM(taskobj); + json_object_array_add(array, taskobj); - snprintf(buf, sizeof(buf), "%p", task); - obj = json_object_new_string(buf); - CHECKMEM(obj); - json_object_object_add(taskobj, "id", obj); - - if (task->name[0] != 0) { - obj = json_object_new_string(task->name); + snprintf(buf, sizeof(buf), "%p", task); + obj = json_object_new_string(buf); CHECKMEM(obj); - json_object_object_add(taskobj, "name", obj); + json_object_object_add(taskobj, "id", obj); + + if (task->name[0] != 0) { + obj = json_object_new_string(task->name); + CHECKMEM(obj); + json_object_object_add(taskobj, "name", obj); + } + + obj = json_object_new_int( + isc_refcount_current(&task->references)); + CHECKMEM(obj); + json_object_object_add(taskobj, "references", obj); + + obj = json_object_new_string(statenames[task->state]); + CHECKMEM(obj); + json_object_object_add(taskobj, "state", obj); + + UNLOCK(&task->lock); } - - obj = json_object_new_int( - isc_refcount_current(&task->references)); - CHECKMEM(obj); - json_object_object_add(taskobj, "references", obj); - - obj = json_object_new_string(statenames[task->state]); - CHECKMEM(obj); - json_object_object_add(taskobj, "state", obj); - - obj = json_object_new_int(task->quantum); - CHECKMEM(obj); - json_object_object_add(taskobj, "quantum", obj); - - obj = json_object_new_int(task->nevents); - CHECKMEM(obj); - json_object_object_add(taskobj, "events", obj); - - UNLOCK(&task->lock); } json_object_object_add(tasks, "tasks", array); @@ -1028,10 +927,12 @@ isc__taskmgr_dump_active(isc_taskmgr_t *taskmgr) { LOCK(&taskmgr->lock); fprintf(stderr, "- taskmgr: %p\n", taskmgr); - for (isc_task_t *task = ISC_LIST_HEAD(taskmgr->tasks); task != NULL; - task = ISC_LIST_NEXT(task, link)) - { - task_dump(task); + for (size_t tid = 0; tid < taskmgr->nloops; tid++) { + for (isc_task_t *task = ISC_LIST_HEAD(taskmgr->tasks[tid]); + task != NULL; task = ISC_LIST_NEXT(task, link)) + { + task_dump(task); + } } UNLOCK(&taskmgr->lock); diff --git a/lib/isc/task_p.h b/lib/isc/task_p.h deleted file mode 100644 index 5fc50b0250..0000000000 --- a/lib/isc/task_p.h +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright (C) Internet Systems Consortium, Inc. ("ISC") - * - * SPDX-License-Identifier: MPL-2.0 - * - * This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, you can obtain one at https://mozilla.org/MPL/2.0/. - * - * See the COPYRIGHT file distributed with this work for additional - * information regarding copyright ownership. - */ - -#pragma once - -#include -#include -#include - -isc_result_t -isc__taskmgr_create(isc_mem_t *mctx, unsigned int default_quantum, isc_nm_t *nm, - isc_taskmgr_t **managerp); -/*%< - * Create a new task manager. - * - * Notes: - * - *\li If 'default_quantum' is non-zero, then it will be used as the default - * quantum value when tasks are created. If zero, then an implementation - * defined default quantum will be used. - * - *\li If 'nm' is set then netmgr is paused when an exclusive task mode - * is requested. - * - * Requires: - * - *\li 'mctx' is a valid memory context. - * - *\li managerp != NULL && *managerp == NULL - * - * Ensures: - * - *\li On success, '*managerp' will be attached to the newly created task - * manager. - * - * Returns: - * - *\li #ISC_R_SUCCESS - *\li #ISC_R_NOMEMORY - *\li #ISC_R_NOTHREADS No threads could be created. - *\li #ISC_R_UNEXPECTED An unexpected error occurred. - *\li #ISC_R_SHUTTINGDOWN The non-threaded, shared, task - * manager shutting down. - */ - -void -isc__taskmgr_destroy(isc_taskmgr_t **managerp); -/*%< - * Destroy '*managerp'. - * - * Notes: - * - *\li Calling isc_taskmgr_destroy() will shutdown all tasks managed by - * *managerp that haven't already been shutdown. The call will block - * until all tasks have entered the done state. - * - *\li isc_taskmgr_destroy() must not be called by a task event action, - * because it would block forever waiting for the event action to - * complete. An event action that wants to cause task manager shutdown - * should request some non-event action thread of execution to do the - * shutdown, e.g. by signaling a condition variable or using - * isc_app_shutdown(). - * - *\li Task manager references are not reference counted, so the caller - * must ensure that no attempt will be made to use the manager after - * isc_taskmgr_destroy() returns. - * - * Requires: - * - *\li '*managerp' is a valid task manager. - * - *\li 'isc__taskmgr_shutdown()' and isc__netmgr_shutdown() have been - * called. - */ - -void -isc__taskmgr_shutdown(isc_taskmgr_t *manager); -/*%> - * Shutdown 'manager'. - * - * Notes: - * - *\li Calling isc__taskmgr_shutdown() will shut down all tasks managed by - * *managerp that haven't already been shut down. - * - * Requires: - * - *\li 'manager' is a valid task manager. - * - *\li isc_taskmgr_destroy() has not be called previously on '*managerp'. - * - * Ensures: - * - *\li All resources used by the task manager, and any tasks it managed, - * have been freed. - */ diff --git a/lib/isc/timer.c b/lib/isc/timer.c index 54df9684d2..46c2d4b99c 100644 --- a/lib/isc/timer.c +++ b/lib/isc/timer.c @@ -61,7 +61,7 @@ void isc_timer_create(isc_loop_t *loop, isc_job_cb cb, void *cbarg, isc_timer_t **timerp) { int r; - isc_timer_t *timer = NULL; + isc_timer_t *timer; isc_loopmgr_t *loopmgr = NULL; REQUIRE(cb != NULL); diff --git a/lib/ns/client.c b/lib/ns/client.c index 5e82e63936..6476edd59d 100644 --- a/lib/ns/client.c +++ b/lib/ns/client.c @@ -16,6 +16,7 @@ #include #include +#include #include #include #include @@ -34,6 +35,8 @@ #include #include #include +#include +#include #include #include @@ -122,7 +125,7 @@ clientmgr_attach(ns_clientmgr_t *source, ns_clientmgr_t **targetp); static void clientmgr_detach(ns_clientmgr_t **mp); static void -clientmgr_destroy(ns_clientmgr_t *manager); +clientmgr_destroy_cb(void *arg); static void ns_client_dumpmessage(ns_client_t *client, const char *reason); static void @@ -1721,7 +1724,7 @@ ns__client_request(isc_nmhandle_t *handle, isc_result_t eresult, ns_interfacemgr_getclientmgr(ifp->mgr); INSIST(VALID_MANAGER(clientmgr)); - INSIST(clientmgr->tid == isc_nm_tid()); + INSIST(clientmgr->tid == isc_tid()); client = isc_mem_get(clientmgr->mctx, sizeof(*client)); @@ -2281,7 +2284,7 @@ ns__client_setup(ns_client_t *client, ns_clientmgr_t *mgr, bool new) { if (new) { REQUIRE(VALID_MANAGER(mgr)); REQUIRE(client != NULL); - REQUIRE(mgr->tid == isc_nm_tid()); + REQUIRE(mgr->tid == isc_tid()); *client = (ns_client_t){ .magic = 0 }; @@ -2303,7 +2306,7 @@ ns__client_setup(ns_client_t *client, ns_clientmgr_t *mgr, bool new) { } } else { REQUIRE(NS_CLIENT_VALID(client)); - REQUIRE(client->manager->tid == isc_nm_tid()); + REQUIRE(client->manager->tid == isc_tid()); /* * Retain these values from the existing client, but @@ -2373,22 +2376,8 @@ clientmgr_attach(ns_clientmgr_t *source, ns_clientmgr_t **targetp) { } static void -clientmgr_detach(ns_clientmgr_t **mp) { - int32_t oldrefs; - ns_clientmgr_t *mgr = *mp; - *mp = NULL; - - oldrefs = isc_refcount_decrement(&mgr->references); - isc_log_write(ns_lctx, NS_LOGCATEGORY_CLIENT, NS_LOGMODULE_CLIENT, - ISC_LOG_DEBUG(3), "clientmgr @%p detach: %d", mgr, - oldrefs - 1); - if (oldrefs == 1) { - clientmgr_destroy(mgr); - } -} - -static void -clientmgr_destroy(ns_clientmgr_t *manager) { +clientmgr_destroy_cb(void *arg) { + ns_clientmgr_t *manager = (ns_clientmgr_t *)arg; MTRACE("clientmgr_destroy"); isc_refcount_destroy(&manager->references); @@ -2404,6 +2393,24 @@ clientmgr_destroy(ns_clientmgr_t *manager) { isc_mem_putanddetach(&manager->mctx, manager, sizeof(*manager)); } +static void +clientmgr_detach(ns_clientmgr_t **mp) { + int32_t oldrefs; + ns_clientmgr_t *mgr = *mp; + *mp = NULL; + + oldrefs = isc_refcount_decrement(&mgr->references); + isc_log_write(ns_lctx, NS_LOGCATEGORY_CLIENT, NS_LOGMODULE_CLIENT, + ISC_LOG_DEBUG(3), "clientmgr @%p detach: %d", mgr, + oldrefs - 1); + if (oldrefs == 1) { + isc_loop_t *loop = isc_loop_get(mgr->loopmgr, mgr->tid); + + /* FIXME: Use isc_loopmgr_teardown() function instead? */ + isc_async_run(loop, clientmgr_destroy_cb, mgr); + } +} + isc_result_t ns_clientmgr_create(ns_server_t *sctx, isc_taskmgr_t *taskmgr, isc_loopmgr_t *loopmgr, dns_aclenv_t *aclenv, int tid, @@ -2426,7 +2433,7 @@ ns_clientmgr_create(ns_server_t *sctx, isc_taskmgr_t *taskmgr, dns_aclenv_attach(aclenv, &manager->aclenv); - result = isc_task_create(manager->taskmgr, 20, &manager->task, + result = isc_task_create(manager->taskmgr, &manager->task, manager->tid); RUNTIME_CHECK(result == ISC_R_SUCCESS); isc_task_setname(manager->task, "clientmgr", NULL); @@ -2448,19 +2455,12 @@ ns_clientmgr_create(ns_server_t *sctx, isc_taskmgr_t *taskmgr, void ns_clientmgr_destroy(ns_clientmgr_t **managerp) { - ns_clientmgr_t *manager; - REQUIRE(managerp != NULL); REQUIRE(VALID_MANAGER(*managerp)); - manager = *managerp; - *managerp = NULL; - MTRACE("destroy"); - if (isc_refcount_decrement(&manager->references) == 1) { - clientmgr_destroy(manager); - } + clientmgr_detach(managerp); } isc_sockaddr_t * diff --git a/lib/ns/include/ns/client.h b/lib/ns/include/ns/client.h index 3dfb6c81c9..3298322e59 100644 --- a/lib/ns/include/ns/client.h +++ b/lib/ns/include/ns/client.h @@ -148,7 +148,7 @@ struct ns_clientmgr { isc_taskmgr_t *taskmgr; isc_loopmgr_t *loopmgr; isc_refcount_t references; - int tid; + uint32_t tid; /* Attached by clients, needed for e.g. recursion */ isc_task_t *task; diff --git a/lib/ns/include/ns/interfacemgr.h b/lib/ns/include/ns/interfacemgr.h index cbbc6794e4..f926cce096 100644 --- a/lib/ns/include/ns/interfacemgr.h +++ b/lib/ns/include/ns/interfacemgr.h @@ -43,11 +43,13 @@ #include +#include #include #include #include #include #include +#include #include @@ -99,7 +101,7 @@ struct ns_interface { isc_result_t ns_interfacemgr_create(isc_mem_t *mctx, ns_server_t *sctx, - isc_taskmgr_t *taskmgr, isc_loopmgr_t *loopmgr, + isc_loopmgr_t *loopmgr, isc_taskmgr_t *taskmgr, isc_nm_t *nm, dns_dispatchmgr_t *dispatchmgr, isc_task_t *task, dns_geoip_databases_t *geoip, bool scan, ns_interfacemgr_t **mgrp); diff --git a/lib/ns/interfacemgr.c b/lib/ns/interfacemgr.c index ce21aa39a8..9bff77d120 100644 --- a/lib/ns/interfacemgr.c +++ b/lib/ns/interfacemgr.c @@ -16,11 +16,13 @@ #include #include +#include #include #include #include #include #include +#include #include #include @@ -73,9 +75,9 @@ struct ns_interfacemgr { isc_mutex_t lock; isc_mem_t *mctx; /*%< Memory context */ ns_server_t *sctx; /*%< Server context */ + isc_loopmgr_t *loopmgr; /*%< Loop manager */ isc_taskmgr_t *taskmgr; /*%< Task manager */ isc_task_t *task; /*%< Task */ - isc_loopmgr_t *loopmgr; /*%< Loop manager */ isc_nm_t *nm; /*%< Net manager */ uint32_t ncpus; /*%< Number of workers */ dns_dispatchmgr_t *dispatchmgr; @@ -274,7 +276,7 @@ route_connected(isc_nmhandle_t *handle, isc_result_t eresult, void *arg) { isc_result_t ns_interfacemgr_create(isc_mem_t *mctx, ns_server_t *sctx, - isc_taskmgr_t *taskmgr, isc_loopmgr_t *loopmgr, + isc_loopmgr_t *loopmgr, isc_taskmgr_t *taskmgr, isc_nm_t *nm, dns_dispatchmgr_t *dispatchmgr, isc_task_t *task, dns_geoip_databases_t *geoip, bool scan, ns_interfacemgr_t **mgrp) { @@ -289,12 +291,12 @@ ns_interfacemgr_create(isc_mem_t *mctx, ns_server_t *sctx, mgr = isc_mem_get(mctx, sizeof(*mgr)); *mgr = (ns_interfacemgr_t){ - .taskmgr = taskmgr, .loopmgr = loopmgr, + .taskmgr = taskmgr, .nm = nm, .dispatchmgr = dispatchmgr, .generation = 1, - .ncpus = isc_nm_getnworkers(nm), + .ncpus = isc_loopmgr_nloops(loopmgr), }; isc_mem_attach(mctx, &mgr->mctx); @@ -302,7 +304,7 @@ ns_interfacemgr_create(isc_mem_t *mctx, ns_server_t *sctx, isc_mutex_init(&mgr->lock); - result = isc_task_create(taskmgr, 0, &mgr->task, 0); + result = isc_task_create(taskmgr, &mgr->task, 0); if (result != ISC_R_SUCCESS) { goto cleanup_lock; } @@ -350,13 +352,11 @@ ns_interfacemgr_create(isc_mem_t *mctx, ns_server_t *sctx, ns_interfacemgr_attach(mgr, &imgr); result = isc_nm_routeconnect(nm, route_connected, imgr); - if (result == ISC_R_NOTIMPLEMENTED) { - ns_interfacemgr_detach(&imgr); - } if (result != ISC_R_SUCCESS) { isc_log_write(IFMGR_COMMON_LOGARGS, ISC_LOG_INFO, "unable to open route socket: %s", isc_result_totext(result)); + ns_interfacemgr_detach(&imgr); } } @@ -1347,7 +1347,7 @@ ns_interfacemgr_scan(ns_interfacemgr_t *mgr, bool verbose, bool config) { bool purge = true; REQUIRE(NS_INTERFACEMGR_VALID(mgr)); - REQUIRE(isc_nm_tid() == 0); + REQUIRE(isc_tid() == 0); mgr->generation++; /* Increment the generation count. */ @@ -1452,7 +1452,7 @@ ns_interfacemgr_getserver(ns_interfacemgr_t *mgr) { ns_clientmgr_t * ns_interfacemgr_getclientmgr(ns_interfacemgr_t *mgr) { - int tid = isc_nm_tid(); + int tid = isc_tid(); REQUIRE(NS_INTERFACEMGR_VALID(mgr)); REQUIRE(tid >= 0); diff --git a/tests/dns/.gitignore b/tests/dns/.gitignore index 382ef6adb0..ed143a8bc6 100644 --- a/tests/dns/.gitignore +++ b/tests/dns/.gitignore @@ -1,2 +1,3 @@ /zone.data /testdata/dnstap/dnstap.file +/testdata/master/master18.data diff --git a/tests/dns/dispatch_test.c b/tests/dns/dispatch_test.c index a297330864..0b910e4438 100644 --- a/tests/dns/dispatch_test.c +++ b/tests/dns/dispatch_test.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include @@ -37,8 +36,6 @@ #include -uv_sem_t sem; - /* Timeouts in miliseconds */ #define T_SERVER_INIT 5000 #define T_SERVER_IDLE 5000 @@ -52,21 +49,31 @@ uv_sem_t sem; #define T_CLIENT_CONNECT 1000 -dns_dispatchmgr_t *dispatchmgr = NULL; -dns_dispatchset_t *dset = NULL; -isc_nm_t *connect_nm = NULL; +/* dns_dispatchset_t *dset = NULL; */ static isc_sockaddr_t udp_server_addr; static isc_sockaddr_t udp_connect_addr; static isc_sockaddr_t tcp_server_addr; static isc_sockaddr_t tcp_connect_addr; +static dns_dispatchmgr_t *dispatchmgr = NULL; +static dns_dispatch_t *dispatch = NULL; +static isc_nmsocket_t *sock = NULL; + +static isc_nm_t *connect_nm = NULL; + const struct in6_addr in6addr_blackhole = { { { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 } } }; +struct { + uint8_t rbuf[12]; + isc_region_t region; + uint8_t message[12]; +} testdata; + static int setup_ephemeral_port(isc_sockaddr_t *addr, sa_family_t family) { socklen_t addrlen = sizeof(*addr); - uv_os_sock_t fd = -1; + uv_os_sock_t fd; int r; isc_sockaddr_fromin6(addr, &in6addr_loopback, 0); @@ -107,13 +114,14 @@ setup_ephemeral_port(isc_sockaddr_t *addr, sa_family_t family) { return (fd); } -static void -reset_testdata(void); - static int -_setup(void **state) { - uv_os_sock_t sock = -1; - int r; +setup_test(void **state) { + uv_os_sock_t socket = -1; + + setup_loopmgr(state); + setup_netmgr(state); + + isc_netmgr_create(mctx, loopmgr, &connect_nm); udp_connect_addr = (isc_sockaddr_t){ .length = 0 }; isc_sockaddr_fromin6(&udp_connect_addr, &in6addr_loopback, 0); @@ -122,23 +130,18 @@ _setup(void **state) { isc_sockaddr_fromin6(&tcp_connect_addr, &in6addr_loopback, 0); udp_server_addr = (isc_sockaddr_t){ .length = 0 }; - sock = setup_ephemeral_port(&udp_server_addr, SOCK_DGRAM); - if (sock < 0) { + socket = setup_ephemeral_port(&udp_server_addr, SOCK_DGRAM); + if (socket < 0) { return (-1); } - close(sock); + close(socket); tcp_server_addr = (isc_sockaddr_t){ .length = 0 }; - sock = setup_ephemeral_port(&tcp_server_addr, SOCK_STREAM); - if (sock < 0) { + socket = setup_ephemeral_port(&tcp_server_addr, SOCK_STREAM); + if (socket < 0) { return (-1); } - close(sock); - - setup_managers(state); - - /* Create a secondary network manager */ - isc_managers_create(mctx, workers, 0, &connect_nm, NULL, NULL); + close(socket); isc_nm_settimeouts(netmgr, T_SERVER_INIT, T_SERVER_IDLE, T_SERVER_KEEPALIVE, T_SERVER_ADVERTISED); @@ -150,82 +153,77 @@ _setup(void **state) { isc_nm_settimeouts(connect_nm, T_CLIENT_INIT, T_CLIENT_IDLE, T_CLIENT_KEEPALIVE, T_CLIENT_ADVERTISED); - r = uv_sem_init(&sem, 0); - assert_int_equal(r, 0); - - reset_testdata(); + memset(testdata.rbuf, 0, sizeof(testdata.rbuf)); + testdata.region.base = testdata.rbuf; + testdata.region.length = sizeof(testdata.rbuf); + memset(testdata.message, 0, sizeof(testdata.message)); return (0); } static int -_teardown(void **state) { - uv_sem_destroy(&sem); +teardown_test(void **state) { + isc_netmgr_destroy(&connect_nm); - isc_managers_destroy(&connect_nm, NULL, NULL); - assert_null(connect_nm); - - teardown_managers(state); + teardown_netmgr(state); + teardown_loopmgr(state); return (0); } static isc_result_t -make_dispatchset(unsigned int ndisps) { +make_dispatchset(unsigned int ndisps, dns_dispatchset_t **dsetp) { isc_result_t result; isc_sockaddr_t any; dns_dispatch_t *disp = NULL; - result = dns_dispatchmgr_create(mctx, netmgr, &dispatchmgr); - if (result != ISC_R_SUCCESS) { - return (result); - } - isc_sockaddr_any(&any); result = dns_dispatch_createudp(dispatchmgr, &any, &disp); if (result != ISC_R_SUCCESS) { return (result); } - result = dns_dispatchset_create(mctx, disp, &dset, ndisps); + result = dns_dispatchset_create(mctx, disp, dsetp, ndisps); dns_dispatch_detach(&disp); return (result); } -static void -reset(void) { - if (dset != NULL) { - dns_dispatchset_destroy(&dset); - } - if (dispatchmgr != NULL) { - dns_dispatchmgr_detach(&dispatchmgr); - } -} - /* create dispatch set */ -ISC_RUN_TEST_IMPL(dispatchset_create) { +ISC_LOOP_TEST_IMPL(dispatchset_create) { + dns_dispatchset_t *dset = NULL; isc_result_t result; - UNUSED(state); + UNUSED(arg); - result = make_dispatchset(1); + result = dns_dispatchmgr_create(mctx, connect_nm, &dispatchmgr); assert_int_equal(result, ISC_R_SUCCESS); - reset(); - result = make_dispatchset(10); + result = make_dispatchset(1, &dset); assert_int_equal(result, ISC_R_SUCCESS); - reset(); + dns_dispatchset_destroy(&dset); + + result = make_dispatchset(10, &dset); + assert_int_equal(result, ISC_R_SUCCESS); + dns_dispatchset_destroy(&dset); + + dns_dispatchmgr_detach(&dispatchmgr); + + isc_loopmgr_shutdown(loopmgr); } /* test dispatch set round-robin */ -ISC_RUN_TEST_IMPL(dispatchset_get) { +ISC_LOOP_TEST_IMPL(dispatchset_get) { isc_result_t result; + dns_dispatchset_t *dset = NULL; dns_dispatch_t *d1, *d2, *d3, *d4, *d5; - UNUSED(state); + UNUSED(arg); - result = make_dispatchset(1); + result = dns_dispatchmgr_create(mctx, connect_nm, &dispatchmgr); + assert_int_equal(result, ISC_R_SUCCESS); + + result = make_dispatchset(1, &dset); assert_int_equal(result, ISC_R_SUCCESS); d1 = dns_dispatchset_get(dset); @@ -239,9 +237,9 @@ ISC_RUN_TEST_IMPL(dispatchset_get) { assert_ptr_equal(d3, d4); assert_ptr_equal(d4, d5); - reset(); + dns_dispatchset_destroy(&dset); - result = make_dispatchset(4); + result = make_dispatchset(4, &dset); assert_int_equal(result, ISC_R_SUCCESS); d1 = dns_dispatchset_get(dset); @@ -256,24 +254,14 @@ ISC_RUN_TEST_IMPL(dispatchset_get) { assert_ptr_not_equal(d3, d4); assert_ptr_not_equal(d4, d5); - reset(); + dns_dispatchset_destroy(&dset); + dns_dispatchmgr_detach(&dispatchmgr); + isc_loopmgr_shutdown(loopmgr); } -struct { - atomic_uint_fast32_t responses; - atomic_uint_fast32_t result; -} testdata; - -static dns_dispatch_t *dispatch = NULL; static dns_dispentry_t *dispentry = NULL; static atomic_bool first = true; -static void -reset_testdata(void) { - atomic_init(&testdata.responses, 0); - atomic_init(&testdata.result, ISC_R_UNSET); -} - static void server_senddone(isc_nmhandle_t *handle, isc_result_t eresult, void *cbarg) { UNUSED(handle); @@ -286,7 +274,7 @@ server_senddone(isc_nmhandle_t *handle, isc_result_t eresult, void *cbarg) { static void nameserver(isc_nmhandle_t *handle, isc_result_t eresult, isc_region_t *region, void *cbarg) { - isc_region_t response; + isc_region_t response1, response2; static unsigned char buf1[16]; static unsigned char buf2[16]; @@ -304,16 +292,16 @@ nameserver(isc_nmhandle_t *handle, isc_result_t eresult, isc_region_t *region, /* * send message to be discarded. */ - response.base = buf1; - response.length = sizeof(buf1); - isc_nm_send(handle, &response, server_senddone, NULL); + response1.base = buf1; + response1.length = sizeof(buf1); + isc_nm_send(handle, &response1, server_senddone, NULL); /* * send nextitem message. */ - response.base = buf2; - response.length = sizeof(buf2); - isc_nm_send(handle, &response, server_senddone, NULL); + response2.base = buf2; + response2.length = sizeof(buf2); + isc_nm_send(handle, &response2, server_senddone, NULL); } static isc_result_t @@ -338,13 +326,12 @@ response_getnext(isc_result_t result, isc_region_t *region, void *arg) { UNUSED(region); UNUSED(arg); - atomic_fetch_add_relaxed(&testdata.responses, 1); - if (atomic_compare_exchange_strong(&first, &(bool){ true }, false)) { result = dns_dispatch_getnext(dispentry); assert_int_equal(result, ISC_R_SUCCESS); } else { - uv_sem_post(&sem); + dns_dispatch_done(&dispentry); + isc_loopmgr_shutdown(loopmgr); } } @@ -353,17 +340,10 @@ response(isc_result_t eresult, isc_region_t *region, void *arg) { UNUSED(region); UNUSED(arg); - switch (eresult) { - case ISC_R_EOF: - case ISC_R_CANCELED: - case ISC_R_SHUTTINGDOWN: - break; - default: - atomic_fetch_add_relaxed(&testdata.responses, 1); - atomic_store_relaxed(&testdata.result, eresult); - } + assert_int_equal(eresult, ISC_R_SUCCESS); - uv_sem_post(&sem); + dns_dispatch_done(&dispentry); + isc_loopmgr_shutdown(loopmgr); } static void @@ -371,9 +351,10 @@ response_timeout(isc_result_t eresult, isc_region_t *region, void *arg) { UNUSED(region); UNUSED(arg); - atomic_store_relaxed(&testdata.result, eresult); + assert_int_equal(eresult, ISC_R_TIMEDOUT); - uv_sem_post(&sem); + dns_dispatch_done(&dispentry); + isc_loopmgr_shutdown(loopmgr); } static void @@ -391,8 +372,6 @@ client_senddone(isc_result_t eresult, isc_region_t *region, void *cbarg) { UNUSED(eresult); UNUSED(region); UNUSED(cbarg); - - return; } static void @@ -400,78 +379,27 @@ timeout_connected(isc_result_t eresult, isc_region_t *region, void *cbarg) { UNUSED(region); UNUSED(cbarg); - atomic_store_relaxed(&testdata.result, eresult); - - uv_sem_post(&sem); -} - -ISC_RUN_TEST_IMPL(dispatch_timeout_tcp_connect) { - isc_result_t result; - isc_region_t region; - unsigned char rbuf[12] = { 0 }; - unsigned char message[12] = { 0 }; - uint16_t id; - - UNUSED(state); - - tcp_connect_addr = (isc_sockaddr_t){ .length = 0 }; - isc_sockaddr_fromin6(&tcp_connect_addr, &in6addr_blackhole, 0); - - result = dns_dispatchmgr_create(mctx, connect_nm, &dispatchmgr); - assert_int_equal(result, ISC_R_SUCCESS); - - result = dns_dispatch_createtcp(dispatchmgr, &tcp_connect_addr, - &tcp_server_addr, -1, &dispatch); - assert_int_equal(result, ISC_R_SUCCESS); - - region.base = rbuf; - region.length = sizeof(rbuf); - - result = dns_dispatch_add(dispatch, 0, T_CLIENT_CONNECT, - &tcp_server_addr, timeout_connected, - client_senddone, response, ®ion, &id, - &dispentry); - assert_int_equal(result, ISC_R_SUCCESS); - - memset(message, 0, sizeof(message)); - message[0] = (id >> 8) & 0xff; - message[1] = id & 0xff; - - region.base = message; - region.length = sizeof(message); - - dns_dispatch_connect(dispentry); - - uv_sem_wait(&sem); + if (eresult == ISC_R_ADDRNOTAVAIL || eresult == ISC_R_CONNREFUSED) { + /* FIXME: Skip */ + } else { + assert_int_equal(eresult, ISC_R_TIMEDOUT); + } dns_dispatch_done(&dispentry); - dns_dispatch_detach(&dispatch); - dns_dispatchmgr_detach(&dispatchmgr); - - /* Skip if the IPv6 is not available or not blackholed */ - - result = atomic_load_acquire(&testdata.result); - if (result == ISC_R_ADDRNOTAVAIL || result == ISC_R_CONNREFUSED) { - skip(); - return; - } - - assert_int_equal(result, ISC_R_TIMEDOUT); + isc_loopmgr_shutdown(loopmgr); } -ISC_RUN_TEST_IMPL(dispatch_timeout_tcp_response) { +ISC_LOOP_TEST_IMPL(dispatch_timeout_tcp_connect) { isc_result_t result; - isc_region_t region; - unsigned char rbuf[12] = { 0 }; - unsigned char message[12] = { 0 }; uint16_t id; - isc_nmsocket_t *sock = NULL; - - UNUSED(state); + /* Client */ tcp_connect_addr = (isc_sockaddr_t){ .length = 0 }; - isc_sockaddr_fromin6(&tcp_connect_addr, &in6addr_loopback, 0); + isc_sockaddr_fromin6(&tcp_connect_addr, &in6addr_blackhole, 0); + + testdata.region.base = testdata.message; + testdata.region.length = sizeof(testdata.message); result = dns_dispatchmgr_create(mctx, connect_nm, &dispatchmgr); assert_int_equal(result, ISC_R_SUCCESS); @@ -479,55 +407,77 @@ ISC_RUN_TEST_IMPL(dispatch_timeout_tcp_response) { result = dns_dispatch_createtcp(dispatchmgr, &tcp_connect_addr, &tcp_server_addr, -1, &dispatch); assert_int_equal(result, ISC_R_SUCCESS); + dns_dispatchmgr_detach(&dispatchmgr); + result = dns_dispatch_add(dispatch, 0, T_CLIENT_CONNECT, + &tcp_server_addr, timeout_connected, + client_senddone, response_timeout, + &testdata.region, &id, &dispentry); + assert_int_equal(result, ISC_R_SUCCESS); + dns_dispatch_detach(&dispatch); + + testdata.message[0] = (id >> 8) & 0xff; + testdata.message[1] = id & 0xff; + + dns_dispatch_connect(dispentry); +} + +static void +stop_listening(void *arg) { + UNUSED(arg); + + isc_nm_stoplistening(sock); + isc_nmsocket_close(&sock); + assert_null(sock); +} + +ISC_LOOP_TEST_IMPL(dispatch_timeout_tcp_response) { + isc_result_t result; + uint16_t id; + + /* Server */ result = isc_nm_listentcpdns(netmgr, ISC_NM_LISTEN_ONE, &tcp_server_addr, noop_nameserver, NULL, accept_cb, NULL, 0, NULL, &sock); assert_int_equal(result, ISC_R_SUCCESS); - region.base = rbuf; - region.length = sizeof(rbuf); + /* ensure we stop listening after the test is done */ + isc_loop_teardown(isc_loop_main(loopmgr), stop_listening, sock); + + /* Client */ + result = dns_dispatchmgr_create(mctx, connect_nm, &dispatchmgr); + assert_int_equal(result, ISC_R_SUCCESS); + + result = dns_dispatch_createtcp(dispatchmgr, &tcp_connect_addr, + &tcp_server_addr, -1, &dispatch); + assert_int_equal(result, ISC_R_SUCCESS); + dns_dispatchmgr_detach(&dispatchmgr); result = dns_dispatch_add(dispatch, 0, T_CLIENT_CONNECT, &tcp_server_addr, connected, client_senddone, - response_timeout, ®ion, &id, &dispentry); + response_timeout, &testdata.region, &id, + &dispentry); assert_int_equal(result, ISC_R_SUCCESS); - - memset(message, 0, sizeof(message)); - message[0] = (id >> 8) & 0xff; - message[1] = id & 0xff; - - region.base = message; - region.length = sizeof(message); + dns_dispatch_detach(&dispatch); dns_dispatch_connect(dispentry); - - uv_sem_wait(&sem); - - assert_int_equal(atomic_load_acquire(&testdata.result), ISC_R_TIMEDOUT); - - isc_nm_stoplistening(sock); - isc_nmsocket_close(&sock); - assert_null(sock); - - dns_dispatch_done(&dispentry); - - dns_dispatch_detach(&dispatch); - dns_dispatchmgr_detach(&dispatchmgr); } -ISC_RUN_TEST_IMPL(dispatch_tcp_response) { +ISC_LOOP_TEST_IMPL(dispatch_tcp_response) { isc_result_t result; - isc_region_t region; - unsigned char rbuf[12] = { 0 }; - unsigned char message[12] = { 0 }; uint16_t id; - isc_nmsocket_t *sock = NULL; - UNUSED(state); + /* Server */ + result = isc_nm_listentcpdns(netmgr, ISC_NM_LISTEN_ONE, + &tcp_server_addr, nameserver, NULL, + accept_cb, NULL, 0, NULL, &sock); + assert_int_equal(result, ISC_R_SUCCESS); - tcp_connect_addr = (isc_sockaddr_t){ .length = 0 }; - isc_sockaddr_fromin6(&tcp_connect_addr, &in6addr_loopback, 0); + isc_loop_teardown(isc_loop_main(loopmgr), stop_listening, sock); + + /* Client */ + testdata.region.base = testdata.message; + testdata.region.length = sizeof(testdata.message); result = dns_dispatchmgr_create(mctx, connect_nm, &dispatchmgr); assert_int_equal(result, ISC_R_SUCCESS); @@ -535,111 +485,66 @@ ISC_RUN_TEST_IMPL(dispatch_tcp_response) { result = dns_dispatch_createtcp(dispatchmgr, &tcp_connect_addr, &tcp_server_addr, -1, &dispatch); assert_int_equal(result, ISC_R_SUCCESS); - - result = isc_nm_listentcpdns(netmgr, ISC_NM_LISTEN_ONE, - &tcp_server_addr, nameserver, NULL, - accept_cb, NULL, 0, NULL, &sock); - assert_int_equal(result, ISC_R_SUCCESS); - - region.base = rbuf; - region.length = sizeof(rbuf); + dns_dispatchmgr_detach(&dispatchmgr); result = dns_dispatch_add(dispatch, 0, T_CLIENT_CONNECT, &tcp_server_addr, connected, client_senddone, - response, ®ion, &id, &dispentry); + response, &testdata.region, &id, &dispentry); assert_int_equal(result, ISC_R_SUCCESS); + dns_dispatch_detach(&dispatch); - memset(message, 0, sizeof(message)); - message[0] = (id >> 8) & 0xff; - message[1] = id & 0xff; - - region.base = message; - region.length = sizeof(message); + testdata.message[0] = (id >> 8) & 0xff; + testdata.message[1] = id & 0xff; dns_dispatch_connect(dispentry); - - uv_sem_wait(&sem); - - assert_in_range(atomic_load_acquire(&testdata.responses), 1, 2); - assert_int_equal(atomic_load_acquire(&testdata.result), ISC_R_SUCCESS); - - /* Cleanup */ - - isc_nm_stoplistening(sock); - isc_nmsocket_close(&sock); - assert_null(sock); - - dns_dispatch_done(&dispentry); - - dns_dispatch_detach(&dispatch); - dns_dispatchmgr_detach(&dispatchmgr); } -ISC_RUN_TEST_IMPL(dispatch_timeout_udp_response) { +ISC_LOOP_TEST_IMPL(dispatch_timeout_udp_response) { isc_result_t result; - isc_region_t region; - unsigned char rbuf[12] = { 0 }; - unsigned char message[12] = { 0 }; uint16_t id; - isc_nmsocket_t *sock = NULL; - - UNUSED(state); - - udp_connect_addr = (isc_sockaddr_t){ .length = 0 }; - isc_sockaddr_fromin6(&udp_connect_addr, &in6addr_loopback, 0); + /* Server */ result = dns_dispatchmgr_create(mctx, connect_nm, &dispatchmgr); assert_int_equal(result, ISC_R_SUCCESS); - result = dns_dispatch_createudp(dispatchmgr, &tcp_connect_addr, - &dispatch); - assert_int_equal(result, ISC_R_SUCCESS); - result = isc_nm_listenudp(netmgr, ISC_NM_LISTEN_ONE, &udp_server_addr, noop_nameserver, NULL, &sock); assert_int_equal(result, ISC_R_SUCCESS); - region.base = rbuf; - region.length = sizeof(rbuf); + /* ensure we stop listening after the test is done */ + isc_loop_teardown(isc_loop_main(loopmgr), stop_listening, sock); + + /* Client */ + result = dns_dispatch_createudp(dispatchmgr, &udp_connect_addr, + &dispatch); + assert_int_equal(result, ISC_R_SUCCESS); + dns_dispatchmgr_detach(&dispatchmgr); result = dns_dispatch_add(dispatch, 0, T_CLIENT_CONNECT, &udp_server_addr, connected, client_senddone, - response_timeout, ®ion, &id, &dispentry); + response_timeout, &testdata.region, &id, + &dispentry); assert_int_equal(result, ISC_R_SUCCESS); - - memset(message, 0, sizeof(message)); - message[0] = (id >> 8) & 0xff; - message[1] = id & 0xff; - - region.base = message; - region.length = sizeof(message); + dns_dispatch_detach(&dispatch); dns_dispatch_connect(dispentry); - - uv_sem_wait(&sem); - - assert_int_equal(atomic_load_acquire(&testdata.result), ISC_R_TIMEDOUT); - - isc_nm_stoplistening(sock); - isc_nmsocket_close(&sock); - assert_null(sock); - - dns_dispatch_done(&dispentry); - - dns_dispatch_detach(&dispatch); - dns_dispatchmgr_detach(&dispatchmgr); } /* test dispatch getnext */ -ISC_RUN_TEST_IMPL(dispatch_getnext) { +ISC_LOOP_TEST_IMPL(dispatch_getnext) { isc_result_t result; - isc_region_t region; - isc_nmsocket_t *sock = NULL; - unsigned char message[12] = { 0 }; - unsigned char rbuf[12] = { 0 }; uint16_t id; - UNUSED(state); + /* Server */ + result = isc_nm_listenudp(netmgr, ISC_NM_LISTEN_ONE, &udp_server_addr, + nameserver, NULL, &sock); + assert_int_equal(result, ISC_R_SUCCESS); + + isc_loop_teardown(isc_loop_main(loopmgr), stop_listening, sock); + + /* Client */ + testdata.region.base = testdata.message; + testdata.region.length = sizeof(testdata.message); result = dns_dispatchmgr_create(mctx, connect_nm, &dispatchmgr); assert_int_equal(result, ISC_R_SUCCESS); @@ -647,54 +552,29 @@ ISC_RUN_TEST_IMPL(dispatch_getnext) { result = dns_dispatch_createudp(dispatchmgr, &udp_connect_addr, &dispatch); assert_int_equal(result, ISC_R_SUCCESS); + dns_dispatchmgr_detach(&dispatchmgr); - /* - * Create a local udp nameserver on the loopback. - */ - result = isc_nm_listenudp(netmgr, ISC_NM_LISTEN_ONE, &udp_server_addr, - nameserver, NULL, &sock); - assert_int_equal(result, ISC_R_SUCCESS); - - region.base = rbuf; - region.length = sizeof(rbuf); result = dns_dispatch_add(dispatch, 0, T_CLIENT_CONNECT, &udp_server_addr, connected, client_senddone, - response_getnext, ®ion, &id, &dispentry); + response_getnext, &testdata.region, &id, + &dispentry); assert_int_equal(result, ISC_R_SUCCESS); + dns_dispatch_detach(&dispatch); - memset(message, 0, sizeof(message)); - message[0] = (id >> 8) & 0xff; - message[1] = id & 0xff; - - region.base = message; - region.length = sizeof(message); + testdata.message[0] = (id >> 8) & 0xff; + testdata.message[1] = id & 0xff; dns_dispatch_connect(dispentry); - - uv_sem_wait(&sem); - - assert_int_equal(atomic_load_acquire(&testdata.responses), 2); - - /* Cleanup */ - isc_nm_stoplistening(sock); - isc_nmsocket_close(&sock); - assert_null(sock); - - dns_dispatch_done(&dispentry); - dns_dispatch_detach(&dispatch); - dns_dispatchmgr_detach(&dispatchmgr); } ISC_TEST_LIST_START - -ISC_TEST_ENTRY_CUSTOM(dispatch_timeout_tcp_connect, _setup, _teardown) -ISC_TEST_ENTRY_CUSTOM(dispatch_timeout_tcp_response, _setup, _teardown) -ISC_TEST_ENTRY_CUSTOM(dispatch_tcp_response, _setup, _teardown) -ISC_TEST_ENTRY_CUSTOM(dispatch_timeout_udp_response, _setup, _teardown) -ISC_TEST_ENTRY_CUSTOM(dispatchset_create, _setup, _teardown) -ISC_TEST_ENTRY_CUSTOM(dispatchset_get, _setup, _teardown) -ISC_TEST_ENTRY_CUSTOM(dispatch_getnext, _setup, _teardown) - +ISC_TEST_ENTRY_CUSTOM(dispatch_timeout_udp_response, setup_test, teardown_test) +ISC_TEST_ENTRY_CUSTOM(dispatchset_create, setup_test, teardown_test) +ISC_TEST_ENTRY_CUSTOM(dispatchset_get, setup_test, teardown_test) +ISC_TEST_ENTRY_CUSTOM(dispatch_timeout_tcp_response, setup_test, teardown_test) +ISC_TEST_ENTRY_CUSTOM(dispatch_timeout_tcp_connect, setup_test, teardown_test) +ISC_TEST_ENTRY_CUSTOM(dispatch_tcp_response, setup_test, teardown_test) +ISC_TEST_ENTRY_CUSTOM(dispatch_getnext, setup_test, teardown_test) ISC_TEST_LIST_END ISC_TEST_MAIN diff --git a/tests/dns/keytable_test.c b/tests/dns/keytable_test.c index f96f199390..1ac1eb042e 100644 --- a/tests/dns/keytable_test.c +++ b/tests/dns/keytable_test.c @@ -42,6 +42,22 @@ #include +static int +setup_test(void **state) { + setup_loopmgr(state); + setup_taskmgr(state); + + return (0); +} + +static int +teardown_test(void **state) { + teardown_taskmgr(state); + teardown_loopmgr(state); + + return (0); +} + dns_keytable_t *keytable = NULL; dns_ntatable_t *ntatable = NULL; @@ -162,9 +178,8 @@ create_tables(void) { ISC_R_SUCCESS); assert_int_equal(dns_keytable_create(mctx, &keytable), ISC_R_SUCCESS); - assert_int_equal( - dns_ntatable_create(view, taskmgr, timermgr, &ntatable), - ISC_R_SUCCESS); + assert_int_equal(dns_ntatable_create(view, taskmgr, loopmgr, &ntatable), + ISC_R_SUCCESS); /* Add a normal key */ dns_test_namefromstring("example.com", &fn); @@ -206,7 +221,7 @@ destroy_tables(void) { } /* add keys to the keytable */ -ISC_RUN_TEST_IMPL(dns_keytable_add) { +ISC_LOOP_TEST_IMPL(add) { dns_keynode_t *keynode = NULL; dns_keynode_t *null_keynode = NULL; unsigned char digest[ISC_MAX_MD_SIZE]; @@ -214,7 +229,7 @@ ISC_RUN_TEST_IMPL(dns_keytable_add) { dns_fixedname_t fn; dns_name_t *keyname = dns_fixedname_name(&fn); - UNUSED(state); + UNUSED(arg); create_tables(); @@ -360,10 +375,14 @@ ISC_RUN_TEST_IMPL(dns_keytable_add) { dns_keytable_detachkeynode(keytable, &keynode); destroy_tables(); + + isc_loopmgr_shutdown(loopmgr); } /* delete keys from the keytable */ -ISC_RUN_TEST_IMPL(dns_keytable_delete) { +ISC_LOOP_TEST_IMPL(delete) { + UNUSED(arg); + create_tables(); /* dns_keytable_delete requires exact match */ @@ -389,15 +408,17 @@ ISC_RUN_TEST_IMPL(dns_keytable_delete) { ISC_R_SUCCESS); destroy_tables(); + + isc_loopmgr_shutdown(loopmgr); } /* delete key nodes from the keytable */ -ISC_RUN_TEST_IMPL(dns_keytable_deletekey) { +ISC_LOOP_TEST_IMPL(deletekey) { dns_rdata_dnskey_t dnskey; dns_fixedname_t fn; dns_name_t *keyname = dns_fixedname_name(&fn); - UNUSED(state); + UNUSED(arg); create_tables(); @@ -456,15 +477,17 @@ ISC_RUN_TEST_IMPL(dns_keytable_deletekey) { dns_rdata_freestruct(&dnskey); destroy_tables(); + + isc_loopmgr_shutdown(loopmgr); } /* check find-variant operations */ -ISC_RUN_TEST_IMPL(dns_keytable_find) { +ISC_LOOP_TEST_IMPL(find) { dns_keynode_t *keynode = NULL; dns_fixedname_t fname; dns_name_t *name; - UNUSED(state); + UNUSED(arg); create_tables(); @@ -510,16 +533,18 @@ ISC_RUN_TEST_IMPL(dns_keytable_find) { assert_true(dns_name_equal(name, str2name("null.example"))); destroy_tables(); + + isc_loopmgr_shutdown(loopmgr); } /* check issecuredomain() */ -ISC_RUN_TEST_IMPL(dns_keytable_issecuredomain) { +ISC_LOOP_TEST_IMPL(issecuredomain) { bool issecure; const char **n; const char *names[] = { "example.com", "sub.example.com", "null.example", "sub.null.example", NULL }; - UNUSED(state); + UNUSED(arg); create_tables(); /* @@ -547,13 +572,15 @@ ISC_RUN_TEST_IMPL(dns_keytable_issecuredomain) { assert_false(issecure); destroy_tables(); + + isc_loopmgr_shutdown(loopmgr); } /* check dns_keytable_dump() */ -ISC_RUN_TEST_IMPL(dns_keytable_dump) { +ISC_LOOP_TEST_IMPL(dump) { FILE *f = fopen("/dev/null", "w"); - UNUSED(state); + UNUSED(arg); create_tables(); @@ -565,10 +592,12 @@ ISC_RUN_TEST_IMPL(dns_keytable_dump) { fclose(f); destroy_tables(); + + isc_loopmgr_shutdown(loopmgr); } /* check negative trust anchors */ -ISC_RUN_TEST_IMPL(dns_keytable_nta) { +ISC_LOOP_TEST_IMPL(nta) { isc_result_t result; bool issecure, covered; dns_fixedname_t fn; @@ -578,12 +607,10 @@ ISC_RUN_TEST_IMPL(dns_keytable_nta) { dns_view_t *myview = NULL; isc_stdtime_t now; - UNUSED(state); - result = dns_test_makeview("view", false, &myview); assert_int_equal(result, ISC_R_SUCCESS); - result = isc_task_create(taskmgr, 0, &myview->task, 0); + result = isc_task_create(taskmgr, &myview->task, 0); assert_int_equal(result, ISC_R_SUCCESS); result = dns_view_initsecroots(myview, mctx); @@ -591,7 +618,7 @@ ISC_RUN_TEST_IMPL(dns_keytable_nta) { result = dns_view_getsecroots(myview, &keytable); assert_int_equal(result, ISC_R_SUCCESS); - result = dns_view_initntatable(myview, taskmgr, timermgr); + result = dns_view_initntatable(myview, taskmgr, loopmgr); assert_int_equal(result, ISC_R_SUCCESS); result = dns_view_getntatable(myview, &ntatable); assert_int_equal(result, ISC_R_SUCCESS); @@ -671,19 +698,18 @@ ISC_RUN_TEST_IMPL(dns_keytable_nta) { dns_ntatable_detach(&ntatable); dns_keytable_detach(&keytable); dns_view_detach(&myview); + + isc_loopmgr_shutdown(loopmgr); } ISC_TEST_LIST_START - -ISC_TEST_ENTRY_CUSTOM(dns_keytable_add, setup_managers, teardown_managers) -ISC_TEST_ENTRY_CUSTOM(dns_keytable_delete, setup_managers, teardown_managers) -ISC_TEST_ENTRY_CUSTOM(dns_keytable_deletekey, setup_managers, teardown_managers) -ISC_TEST_ENTRY_CUSTOM(dns_keytable_find, setup_managers, teardown_managers) -ISC_TEST_ENTRY_CUSTOM(dns_keytable_issecuredomain, setup_managers, - teardown_managers) -ISC_TEST_ENTRY_CUSTOM(dns_keytable_dump, setup_managers, teardown_managers) -ISC_TEST_ENTRY_CUSTOM(dns_keytable_nta, setup_managers, teardown_managers) - +ISC_TEST_ENTRY_CUSTOM(add, setup_test, teardown_test) +ISC_TEST_ENTRY_CUSTOM(delete, setup_test, teardown_test) +ISC_TEST_ENTRY_CUSTOM(deletekey, setup_test, teardown_test) +ISC_TEST_ENTRY_CUSTOM(find, setup_test, teardown_test) +ISC_TEST_ENTRY_CUSTOM(issecuredomain, setup_test, teardown_test) +ISC_TEST_ENTRY_CUSTOM(dump, setup_test, teardown_test) +ISC_TEST_ENTRY_CUSTOM(nta, setup_test, teardown_test) ISC_TEST_LIST_END ISC_TEST_MAIN diff --git a/tests/dns/resolver_test.c b/tests/dns/resolver_test.c index 634dbc4a28..33914b4d70 100644 --- a/tests/dns/resolver_test.c +++ b/tests/dns/resolver_test.c @@ -21,7 +21,6 @@ #define UNIT_TESTING #include -#include #include #include #include @@ -41,7 +40,7 @@ static dns_dispatch_t *dispatch = NULL; static dns_view_t *view = NULL; static int -_setup(void **state) { +setup_test(void **state) { isc_result_t result; isc_sockaddr_t local; @@ -50,7 +49,7 @@ _setup(void **state) { result = dns_dispatchmgr_create(mctx, netmgr, &dispatchmgr); assert_int_equal(result, ISC_R_SUCCESS); - result = dns_test_makeview("view", true, &view); + result = dns_test_makeview("view", false, &view); assert_int_equal(result, ISC_R_SUCCESS); isc_sockaddr_any(&local); @@ -61,11 +60,10 @@ _setup(void **state) { } static int -_teardown(void **state) { +teardown_test(void **state) { dns_dispatch_detach(&dispatch); dns_view_detach(&view); dns_dispatchmgr_detach(&dispatchmgr); - teardown_managers(state); return (0); @@ -75,7 +73,7 @@ static void mkres(dns_resolver_t **resolverp) { isc_result_t result; - result = dns_resolver_create(view, taskmgr, 1, netmgr, timermgr, 0, + result = dns_resolver_create(view, loopmgr, taskmgr, 1, netmgr, 0, dispatchmgr, dispatch, NULL, resolverp); assert_int_equal(result, ISC_R_SUCCESS); } @@ -87,37 +85,33 @@ destroy_resolver(dns_resolver_t **resolverp) { } /* dns_resolver_create */ -ISC_RUN_TEST_IMPL(dns_resolver_create) { +ISC_LOOP_TEST_IMPL(create) { dns_resolver_t *resolver = NULL; - UNUSED(state); - mkres(&resolver); destroy_resolver(&resolver); + isc_loopmgr_shutdown(loopmgr); } /* dns_resolver_gettimeout */ -ISC_RUN_TEST_IMPL(dns_resolver_gettimeout) { +ISC_LOOP_TEST_IMPL(gettimeout) { dns_resolver_t *resolver = NULL; unsigned int timeout; - UNUSED(state); - mkres(&resolver); timeout = dns_resolver_gettimeout(resolver); assert_true(timeout > 0); destroy_resolver(&resolver); + isc_loopmgr_shutdown(loopmgr); } /* dns_resolver_settimeout */ -ISC_RUN_TEST_IMPL(dns_resolver_settimeout) { +ISC_LOOP_TEST_IMPL(settimeout) { dns_resolver_t *resolver = NULL; unsigned int default_timeout, timeout; - UNUSED(state); - mkres(&resolver); default_timeout = dns_resolver_gettimeout(resolver); @@ -126,15 +120,14 @@ ISC_RUN_TEST_IMPL(dns_resolver_settimeout) { assert_true(timeout == default_timeout + 1); destroy_resolver(&resolver); + isc_loopmgr_shutdown(loopmgr); } /* dns_resolver_settimeout */ -ISC_RUN_TEST_IMPL(dns_resolver_settimeout_default) { +ISC_LOOP_TEST_IMPL(settimeout_default) { dns_resolver_t *resolver = NULL; unsigned int default_timeout, timeout; - UNUSED(state); - mkres(&resolver); default_timeout = dns_resolver_gettimeout(resolver); @@ -148,15 +141,14 @@ ISC_RUN_TEST_IMPL(dns_resolver_settimeout_default) { assert_int_equal(timeout, default_timeout); destroy_resolver(&resolver); + isc_loopmgr_shutdown(loopmgr); } /* dns_resolver_settimeout below minimum */ -ISC_RUN_TEST_IMPL(dns_resolver_settimeout_belowmin) { +ISC_LOOP_TEST_IMPL(settimeout_belowmin) { dns_resolver_t *resolver = NULL; unsigned int default_timeout, timeout; - UNUSED(state); - mkres(&resolver); default_timeout = dns_resolver_gettimeout(resolver); @@ -166,32 +158,30 @@ ISC_RUN_TEST_IMPL(dns_resolver_settimeout_belowmin) { assert_int_equal(timeout, default_timeout); destroy_resolver(&resolver); + isc_loopmgr_shutdown(loopmgr); } /* dns_resolver_settimeout over maximum */ -ISC_RUN_TEST_IMPL(dns_resolver_settimeout_overmax) { +ISC_LOOP_TEST_IMPL(settimeout_overmax) { dns_resolver_t *resolver = NULL; unsigned int timeout; - UNUSED(state); - mkres(&resolver); dns_resolver_settimeout(resolver, 4000000); timeout = dns_resolver_gettimeout(resolver); assert_in_range(timeout, 0, 3999999); destroy_resolver(&resolver); + isc_loopmgr_shutdown(loopmgr); } ISC_TEST_LIST_START - -ISC_TEST_ENTRY_CUSTOM(dns_resolver_create, _setup, _teardown) -ISC_TEST_ENTRY_CUSTOM(dns_resolver_gettimeout, _setup, _teardown) -ISC_TEST_ENTRY_CUSTOM(dns_resolver_settimeout, _setup, _teardown) -ISC_TEST_ENTRY_CUSTOM(dns_resolver_settimeout_default, _setup, _teardown) -ISC_TEST_ENTRY_CUSTOM(dns_resolver_settimeout_belowmin, _setup, _teardown) -ISC_TEST_ENTRY_CUSTOM(dns_resolver_settimeout_overmax, _setup, _teardown) - +ISC_TEST_ENTRY_CUSTOM(create, setup_test, teardown_test) +ISC_TEST_ENTRY_CUSTOM(gettimeout, setup_test, teardown_test) +ISC_TEST_ENTRY_CUSTOM(settimeout, setup_test, teardown_test) +ISC_TEST_ENTRY_CUSTOM(settimeout_default, setup_test, teardown_test) +ISC_TEST_ENTRY_CUSTOM(settimeout_belowmin, setup_test, teardown_test) +ISC_TEST_ENTRY_CUSTOM(settimeout_overmax, setup_test, teardown_test) ISC_TEST_LIST_END ISC_TEST_MAIN diff --git a/tests/dns/tsig_test.c b/tests/dns/tsig_test.c index d12805ff56..15d33ca46c 100644 --- a/tests/dns/tsig_test.c +++ b/tests/dns/tsig_test.c @@ -36,6 +36,14 @@ #define TEST_ORIGIN "test" +#define CHECK(r) \ + { \ + result = (r); \ + if (result != ISC_R_SUCCESS) { \ + goto cleanup; \ + } \ + } + static int debug = 0; static int diff --git a/tests/dns/zonemgr_test.c b/tests/dns/zonemgr_test.c index cf1c718ba2..623a5eee2f 100644 --- a/tests/dns/zonemgr_test.c +++ b/tests/dns/zonemgr_test.c @@ -33,32 +33,50 @@ #include +static int +setup_test(void **state) { + setup_loopmgr(state); + setup_taskmgr(state); + setup_netmgr(state); + + return (0); +} + +static int +teardown_test(void **state) { + teardown_netmgr(state); + teardown_taskmgr(state); + teardown_loopmgr(state); + + return (0); +} + /* create zone manager */ -ISC_RUN_TEST_IMPL(dns_zonemgr_create) { +ISC_LOOP_TEST_IMPL(zonemgr_create) { dns_zonemgr_t *myzonemgr = NULL; isc_result_t result; - UNUSED(state); + UNUSED(arg); - result = dns_zonemgr_create(mctx, taskmgr, timermgr, netmgr, - &myzonemgr); + result = dns_zonemgr_create(mctx, loopmgr, taskmgr, netmgr, &myzonemgr); assert_int_equal(result, ISC_R_SUCCESS); dns_zonemgr_shutdown(myzonemgr); dns_zonemgr_detach(&myzonemgr); assert_null(myzonemgr); + + isc_loopmgr_shutdown(loopmgr); } /* manage and release a zone */ -ISC_RUN_TEST_IMPL(dns_zonemgr_managezone) { +ISC_LOOP_TEST_IMPL(zonemgr_managezone) { dns_zonemgr_t *myzonemgr = NULL; dns_zone_t *zone = NULL; isc_result_t result; - UNUSED(state); + UNUSED(arg); - result = dns_zonemgr_create(mctx, taskmgr, timermgr, netmgr, - &myzonemgr); + result = dns_zonemgr_create(mctx, loopmgr, taskmgr, netmgr, &myzonemgr); assert_int_equal(result, ISC_R_SUCCESS); result = dns_test_makezone("foo", &zone, NULL, false); @@ -80,18 +98,19 @@ ISC_RUN_TEST_IMPL(dns_zonemgr_managezone) { dns_zonemgr_shutdown(myzonemgr); dns_zonemgr_detach(&myzonemgr); assert_null(myzonemgr); + + isc_loopmgr_shutdown(loopmgr); } /* create and release a zone */ -ISC_RUN_TEST_IMPL(dns_zonemgr_createzone) { +ISC_LOOP_TEST_IMPL(zonemgr_createzone) { dns_zonemgr_t *myzonemgr = NULL; dns_zone_t *zone = NULL; isc_result_t result; - UNUSED(state); + UNUSED(arg); - result = dns_zonemgr_create(mctx, taskmgr, timermgr, netmgr, - &myzonemgr); + result = dns_zonemgr_create(mctx, loopmgr, taskmgr, netmgr, &myzonemgr); assert_int_equal(result, ISC_R_SUCCESS); result = dns_zonemgr_createzone(myzonemgr, &zone); @@ -105,10 +124,12 @@ ISC_RUN_TEST_IMPL(dns_zonemgr_createzone) { dns_zonemgr_shutdown(myzonemgr); dns_zonemgr_detach(&myzonemgr); assert_null(myzonemgr); + + isc_loopmgr_shutdown(loopmgr); } /* manage and release a zone */ -ISC_RUN_TEST_IMPL(dns_zonemgr_unreachable) { +ISC_LOOP_TEST_IMPL(zonemgr_unreachable) { dns_zonemgr_t *myzonemgr = NULL; dns_zone_t *zone = NULL; isc_sockaddr_t addr1, addr2; @@ -116,12 +137,11 @@ ISC_RUN_TEST_IMPL(dns_zonemgr_unreachable) { isc_result_t result; isc_time_t now; - UNUSED(state); + UNUSED(arg); TIME_NOW(&now); - result = dns_zonemgr_create(mctx, taskmgr, timermgr, netmgr, - &myzonemgr); + result = dns_zonemgr_create(mctx, loopmgr, taskmgr, netmgr, &myzonemgr); assert_int_equal(result, ISC_R_SUCCESS); result = dns_test_makezone("foo", &zone, NULL, false); @@ -169,35 +189,15 @@ ISC_RUN_TEST_IMPL(dns_zonemgr_unreachable) { dns_zonemgr_shutdown(myzonemgr); dns_zonemgr_detach(&myzonemgr); assert_null(myzonemgr); + + isc_loopmgr_shutdown(loopmgr); } -/* - * XXX: - * dns_zonemgr API calls that are not yet part of this unit test: - * - * - dns_zonemgr_attach - * - dns_zonemgr_forcemaint - * - dns_zonemgr_resumexfrs - * - dns_zonemgr_shutdown - * - dns_zonemgr_settransfersin - * - dns_zonemgr_getttransfersin - * - dns_zonemgr_settransfersperns - * - dns_zonemgr_getttransfersperns - * - dns_zonemgr_setiolimit - * - dns_zonemgr_getiolimit - * - dns_zonemgr_dbdestroyed - * - dns_zonemgr_setserialqueryrate - * - dns_zonemgr_getserialqueryrate - */ - ISC_TEST_LIST_START - -ISC_TEST_ENTRY_CUSTOM(dns_zonemgr_create, setup_managers, teardown_managers) -ISC_TEST_ENTRY_CUSTOM(dns_zonemgr_managezone, setup_managers, teardown_managers) -ISC_TEST_ENTRY_CUSTOM(dns_zonemgr_createzone, setup_managers, teardown_managers) -ISC_TEST_ENTRY_CUSTOM(dns_zonemgr_unreachable, setup_managers, - teardown_managers) - +ISC_TEST_ENTRY_CUSTOM(zonemgr_create, setup_test, teardown_test) +ISC_TEST_ENTRY_CUSTOM(zonemgr_managezone, setup_test, teardown_test) +ISC_TEST_ENTRY_CUSTOM(zonemgr_createzone, setup_test, teardown_test) +ISC_TEST_ENTRY_CUSTOM(zonemgr_unreachable, setup_test, teardown_test) ISC_TEST_LIST_END ISC_TEST_MAIN diff --git a/tests/dns/zt_test.c b/tests/dns/zt_test.c index 5b25089982..b164b74e17 100644 --- a/tests/dns/zt_test.c +++ b/tests/dns/zt_test.c @@ -23,9 +23,9 @@ #define UNIT_TESTING #include -#include #include #include +#include #include #include #include @@ -39,27 +39,9 @@ #include -static int -_setup(void **state) { - isc_app_start(); - setup_managers(state); - - return (0); -} - -static int -_teardown(void **state) { - teardown_managers(state); - isc_app_finish(); - - return (0); -} - -struct args { - void *arg1; - void *arg2; - bool arg3; -}; +static dns_db_t *db = NULL; +static FILE *zonefile, *origfile; +static dns_view_t *view = NULL; static isc_result_t count_zone(dns_zone_t *zone, void *uap) { @@ -71,58 +53,12 @@ count_zone(dns_zone_t *zone, void *uap) { return (ISC_R_SUCCESS); } -static isc_result_t -load_done(dns_zt_t *zt, dns_zone_t *zone, isc_task_t *task) { - /* We treat zt as a pointer to a boolean for testing purposes */ - atomic_bool *done = (atomic_bool *)zt; - - UNUSED(zone); - UNUSED(task); - - atomic_store(done, true); - isc_app_shutdown(); - return (ISC_R_SUCCESS); -} - -static isc_result_t -all_done(void *arg) { - atomic_bool *done = (atomic_bool *)arg; - - atomic_store(done, true); - isc_app_shutdown(); - return (ISC_R_SUCCESS); -} - -static void -start_zt_asyncload(isc_task_t *task, isc_event_t *event) { - struct args *args = (struct args *)(event->ev_arg); - - UNUSED(task); - - dns_zt_asyncload(args->arg1, false, all_done, args->arg2); - - isc_event_free(&event); -} - -static void -start_zone_asyncload(isc_task_t *task, isc_event_t *event) { - struct args *args = (struct args *)(event->ev_arg); - - UNUSED(task); - - dns_zone_asyncload(args->arg1, args->arg3, load_done, args->arg2); - isc_event_free(&event); -} - /* apply a function to a zone table */ -ISC_RUN_TEST_IMPL(dns_zt_apply) { +ISC_LOOP_TEST_IMPL(apply) { isc_result_t result; dns_zone_t *zone = NULL; - dns_view_t *view = NULL; int nzones = 0; - UNUSED(state); - result = dns_test_makezone("foo", &zone, NULL, true); assert_int_equal(result, ISC_R_SUCCESS); @@ -146,22 +82,88 @@ ISC_RUN_TEST_IMPL(dns_zt_apply) { /* The view was left attached in dns_test_makezone() */ dns_view_detach(&view); dns_zone_detach(&zone); + isc_loopmgr_shutdown(loopmgr); +} + +static isc_result_t +load_done_last(dns_zt_t *zt, dns_zone_t *zone, isc_task_t *task) { + isc_result_t result; + + UNUSED(zt); + UNUSED(zone); + UNUSED(task); + + /* The zone should now be loaded; test it */ + result = dns_zone_getdb(zone, &db); + assert_int_equal(result, ISC_R_SUCCESS); + + assert_non_null(db); + if (db != NULL) { + dns_db_detach(&db); + } + + dns_test_releasezone(zone); + dns_test_closezonemgr(); + + dns_zone_detach(&zone); + dns_view_detach(&view); + + isc_loopmgr_shutdown(loopmgr); + + return (ISC_R_SUCCESS); +} + +static isc_result_t +load_done_new_only(dns_zt_t *zt, dns_zone_t *zone, isc_task_t *task) { + isc_result_t result; + + UNUSED(zt); + UNUSED(zone); + UNUSED(task); + + /* The zone should now be loaded; test it */ + result = dns_zone_getdb(zone, &db); + assert_int_equal(result, ISC_R_SUCCESS); + dns_db_detach(&db); + + dns_zone_asyncload(zone, true, load_done_last, NULL); + + return (ISC_R_SUCCESS); +} + +static isc_result_t +load_done_first(dns_zt_t *zt, dns_zone_t *zone, isc_task_t *task) { + atomic_bool *done = (atomic_bool *)zt; + isc_result_t result; + + UNUSED(zone); + UNUSED(task); + + /* The zone should now be loaded; test it */ + result = dns_zone_getdb(zone, &db); + assert_int_equal(result, ISC_R_SUCCESS); + dns_db_detach(&db); + + /* + * Add something to zone file, reload zone with newonly - it should + * not be reloaded. + */ + fprintf(zonefile, "\nb in b 1.2.3.4\n"); + fflush(zonefile); + fclose(zonefile); + + dns_zone_asyncload(zone, true, load_done_new_only, &done); + + return (ISC_R_SUCCESS); } /* asynchronous zone load */ -ISC_RUN_TEST_IMPL(dns_zt_asyncload_zone) { +ISC_LOOP_TEST_IMPL(asyncload_zone) { isc_result_t result; int n; dns_zone_t *zone = NULL; - dns_view_t *view = NULL; - dns_db_t *db = NULL; - FILE *zonefile, *origfile; char buf[4096]; atomic_bool done; - int i = 0; - struct args args; - - UNUSED(state); atomic_init(&done, false); @@ -190,84 +192,49 @@ ISC_RUN_TEST_IMPL(dns_zt_asyncload_zone) { dns_zone_setfile(zone, "./zone.data", dns_masterformat_text, &dns_master_style_default); - args.arg1 = zone; - args.arg2 = &done; - args.arg3 = false; - isc_app_onrun(mctx, maintask, start_zone_asyncload, &args); + dns_zone_asyncload(zone, false, load_done_first, &done); +} - isc_app_run(); - while (dns__zone_loadpending(zone) && i++ < 5000) { - dns_test_nap(1000); - } - assert_true(atomic_load(&done)); - /* The zone should now be loaded; test it */ - result = dns_zone_getdb(zone, &db); +dns_zone_t *zone1 = NULL, *zone2 = NULL, *zone3 = NULL; + +static isc_result_t +all_done(void *arg __attribute__((__unused__))) { + isc_result_t result; + + /* Both zones should now be loaded; test them */ + result = dns_zone_getdb(zone1, &db); assert_int_equal(result, ISC_R_SUCCESS); - dns_db_detach(&db); - /* - * Add something to zone file, reload zone with newonly - it should - * not be reloaded. - */ - fprintf(zonefile, "\nb in b 1.2.3.4\n"); - fflush(zonefile); - fclose(zonefile); - - args.arg1 = zone; - args.arg2 = &done; - args.arg3 = true; - isc_app_onrun(mctx, maintask, start_zone_asyncload, &args); - - isc_app_run(); - - while (dns__zone_loadpending(zone) && i++ < 5000) { - dns_test_nap(1000); - } - assert_true(atomic_load(&done)); - /* The zone should now be loaded; test it */ - result = dns_zone_getdb(zone, &db); - assert_int_equal(result, ISC_R_SUCCESS); - dns_db_detach(&db); - - /* Now reload it without newonly - it should be reloaded */ - args.arg1 = zone; - args.arg2 = &done; - args.arg3 = false; - isc_app_onrun(mctx, maintask, start_zone_asyncload, &args); - - isc_app_run(); - - while (dns__zone_loadpending(zone) && i++ < 5000) { - dns_test_nap(1000); - } - assert_true(atomic_load(&done)); - /* The zone should now be loaded; test it */ - result = dns_zone_getdb(zone, &db); - assert_int_equal(result, ISC_R_SUCCESS); - assert_non_null(db); if (db != NULL) { dns_db_detach(&db); } - dns_test_releasezone(zone); + result = dns_zone_getdb(zone2, &db); + assert_int_equal(result, ISC_R_SUCCESS); + assert_non_null(db); + if (db != NULL) { + dns_db_detach(&db); + } + + dns_test_releasezone(zone3); + dns_test_releasezone(zone2); + dns_test_releasezone(zone1); dns_test_closezonemgr(); - dns_zone_detach(&zone); + dns_zone_detach(&zone1); + dns_zone_detach(&zone2); + dns_zone_detach(&zone3); dns_view_detach(&view); + + isc_loopmgr_shutdown(loopmgr); + return (ISC_R_SUCCESS); } /* asynchronous zone table load */ -ISC_RUN_TEST_IMPL(dns_zt_asyncload_zt) { +ISC_LOOP_TEST_IMPL(asyncload_zt) { isc_result_t result; - dns_zone_t *zone1 = NULL, *zone2 = NULL, *zone3 = NULL; - dns_view_t *view; dns_zt_t *zt = NULL; - dns_db_t *db = NULL; atomic_bool done; - int i = 0; - struct args args; - - UNUSED(state); atomic_init(&done, false); @@ -304,48 +271,13 @@ ISC_RUN_TEST_IMPL(dns_zt_asyncload_zt) { assert_false(dns__zone_loadpending(zone2)); assert_false(atomic_load(&done)); - args.arg1 = zt; - args.arg2 = &done; - isc_app_onrun(mctx, maintask, start_zt_asyncload, &args); - - isc_app_run(); - while (!atomic_load(&done) && i++ < 5000) { - dns_test_nap(1000); - } - assert_true(atomic_load(&done)); - - /* Both zones should now be loaded; test them */ - result = dns_zone_getdb(zone1, &db); - assert_int_equal(result, ISC_R_SUCCESS); - assert_non_null(db); - if (db != NULL) { - dns_db_detach(&db); - } - - result = dns_zone_getdb(zone2, &db); - assert_int_equal(result, ISC_R_SUCCESS); - assert_non_null(db); - if (db != NULL) { - dns_db_detach(&db); - } - - dns_test_releasezone(zone3); - dns_test_releasezone(zone2); - dns_test_releasezone(zone1); - dns_test_closezonemgr(); - - dns_zone_detach(&zone1); - dns_zone_detach(&zone2); - dns_zone_detach(&zone3); - dns_view_detach(&view); + dns_zt_asyncload(zt, false, all_done, NULL); } ISC_TEST_LIST_START - -ISC_TEST_ENTRY_CUSTOM(dns_zt_apply, _setup, _teardown) -ISC_TEST_ENTRY_CUSTOM(dns_zt_asyncload_zone, _setup, _teardown) -ISC_TEST_ENTRY_CUSTOM(dns_zt_asyncload_zt, _setup, _teardown) - +ISC_TEST_ENTRY_CUSTOM(apply, setup_managers, teardown_managers) +ISC_TEST_ENTRY_CUSTOM(asyncload_zone, setup_managers, teardown_managers) +ISC_TEST_ENTRY_CUSTOM(asyncload_zt, setup_managers, teardown_managers) ISC_TEST_LIST_END ISC_TEST_MAIN diff --git a/tests/include/tests/dns.h b/tests/include/tests/dns.h index 64a265b660..583a299e14 100644 --- a/tests/include/tests/dns.h +++ b/tests/include/tests/dns.h @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include diff --git a/tests/include/tests/isc.h b/tests/include/tests/isc.h index b571938692..5f7b976060 100644 --- a/tests/include/tests/isc.h +++ b/tests/include/tests/isc.h @@ -17,7 +17,6 @@ #include #include -#include #include #include @@ -30,28 +29,16 @@ #include #include #include +#include -#include "netmgr_p.h" -#include "task_p.h" -#include "timer_p.h" - -#define CHECK(r) \ - do { \ - result = (r); \ - if (result != ISC_R_SUCCESS) \ - goto cleanup; \ - } while (0) - -extern isc_mem_t *mctx; -extern isc_nm_t *netmgr; -extern isc_loopmgr_t *loopmgr; -extern isc_loop_t *mainloop; -extern isc_taskmgr_t *taskmgr; -extern isc_timermgr_t *timermgr; -extern unsigned int workers; -extern isc_task_t *maintask; - -#define isc_test_nap(ms) uv_sleep(ms) +extern isc_mem_t *mctx; +extern isc_log_t *lctx; +extern isc_loop_t *mainloop; +extern isc_loopmgr_t *loopmgr; +extern isc_taskmgr_t *taskmgr; +extern isc_nm_t *netmgr; +extern int ncpus; +extern unsigned int workers; int setup_mctx(void **state); @@ -63,6 +50,16 @@ setup_loopmgr(void **state); int teardown_loopmgr(void **state); +int +setup_taskmgr(void **state); +int +teardown_taskmgr(void **state); + +int +setup_netmgr(void **state); +int +teardown_netmgr(void **state); + int setup_managers(void **state); int @@ -95,6 +92,26 @@ teardown_managers(void **state); #define ISC_TEARDOWN_TEST_DECLARE(name) \ int teardown_test_##name(void **state __attribute__((unused))) +#define ISC_LOOP_TEST_CUSTOM_DECLARE(name, setup, teardown) \ + void run_test_##name(void **state __attribute__((__unused__))); \ + void loop_test_##name(void *arg __attribute__((__unused__))); + +#define ISC_LOOP_TEST_DECLARE(name) \ + ISC_LOOP_TEST_CUSTOM_DECLARE(name, NULL, NULL) + +#define ISC_LOOP_TEST_SETUP_DECLARE(name) \ + ISC_LOOP_TEST_CUSTOM_DECLARE(name, setup_loop_##name, NULL) + +#define ISC_LOOP_TEST_SETUP_TEARDOWN_DECLARE(name) \ + ISC_LOOP_TEST_CUSTOM_DECLARE(name, setup_loop_##name, \ + teardown_loop_##name) + +#define ISC_LOOP_TEST_TEARDOWN_DECLARE(name) \ + ISC_LOOP_TEST_CUSTOM_DECLARE(name, NULL, teardown_loop_##name) + +#define ISC_LOOP_SETUP_DECLARE(name) \ + void setup_loop_##name(void *arg __attribute__((__unused__))); + #define ISC_SETUP_TEST_IMPL(name) \ int setup_test_##name(void **state __attribute__((unused))); \ int setup_test_##name(void **state __attribute__((unused))) @@ -107,6 +124,49 @@ teardown_managers(void **state); int teardown_test_##name(void **state __attribute__((unused))); \ int teardown_test_##name(void **state __attribute__((unused))) +#define ISC_TEST_LIST_START const struct CMUnitTest tests[] = { +#define ISC_TEST_LIST_END \ + } \ + ; + +#define ISC_LOOP_TEST_CUSTOM_IMPL(name, setup, teardown) \ + void run_test_##name(void **state __attribute__((__unused__))); \ + void loop_test_##name(void *arg __attribute__((__unused__))); \ + void run_test_##name(void **state __attribute__((__unused__))) { \ + isc_job_cb setup_loop = setup; \ + isc_job_cb teardown_loop = teardown; \ + if (setup_loop != NULL) { \ + setup_loop(state); \ + } \ + isc_loop_setup(mainloop, loop_test_##name, state); \ + isc_loopmgr_run(loopmgr); \ + if (teardown_loop != NULL) { \ + teardown_loop(state); \ + } \ + } \ + void loop_test_##name(void *arg __attribute__((__unused__))) + +#define ISC_LOOP_TEST_IMPL(name) ISC_LOOP_TEST_CUSTOM_IMPL(name, NULL, NULL) + +#define ISC_LOOP_TEST_SETUP_IMPL(name) \ + ISC_LOOP_TEST_CUSTOM_IMPL(name, setup_loop_##name, NULL) + +#define ISC_LOOP_TEST_SETUP_TEARDOWN_IMPL(name) \ + ISC_LOOP_TEST_CUSTOM_IMPL(name, setup_loop_##name, teardown_loop_##name) + +#define ISC_LOOP_TEST_TEARDOWN_IMPL(name) \ + ISC_LOOP_TEST_CUSTOM_IMPL(name, NULL, teardown_loop_##name) + +#define ISC_LOOP_SETUP_IMPL(name) \ + void setup_loop_##name(void *arg __attribute__((__unused__))); \ + void setup_loop_##name(void *arg __attribute__((__unused__))) + +#define ISC_LOOP_TEARDOWN_IMPL(name) \ + void teardown_loop_##name(void *arg __attribute__((__unused__))); \ + void teardown_loop_##name(void *arg __attribute__((__unused__))) + +#define ISC_TEST_DECLARE(name) void run_test_##name(void **state); + #define ISC_TEST_LIST_START const struct CMUnitTest tests[] = { #define ISC_TEST_LIST_END \ } \ diff --git a/tests/include/tests/ns.h b/tests/include/tests/ns.h index 175cb90975..93480e27fa 100644 --- a/tests/include/tests/ns.h +++ b/tests/include/tests/ns.h @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -46,18 +47,13 @@ typedef struct ns_test_id { .description = desc, .lineno = __LINE__ \ } -#define CHECK(r) \ - do { \ - result = (r); \ - if (result != ISC_R_SUCCESS) \ - goto cleanup; \ - } while (0) - extern dns_dispatchmgr_t *dispatchmgr; -extern ns_clientmgr_t *clientmgr; extern ns_interfacemgr_t *interfacemgr; extern ns_server_t *sctx; +extern atomic_uint_fast32_t client_refs[32]; +extern atomic_uintptr_t client_addrs[32]; + #ifdef NETMGR_TRACE #define FLARG \ , const char *file __attribute__((unused)), \ @@ -71,6 +67,8 @@ int setup_server(void **state); int teardown_server(void **state); +void +shutdown_interfacemgr(void *arg __attribute__((unused))); /*% * Load data for zone "zonename" from file "filename" and start serving it to diff --git a/tests/isc/Makefile.am b/tests/isc/Makefile.am index 3e0324c1a2..cb7a9e5689 100644 --- a/tests/isc/Makefile.am +++ b/tests/isc/Makefile.am @@ -27,7 +27,6 @@ check_PROGRAMS = \ md_test \ mem_test \ netaddr_test \ - netmgr_test \ parse_test \ quota_test \ radix_test \ @@ -79,18 +78,6 @@ md_test_LDADD = \ $(LDADD) \ $(OPENSSL_LIBS) -netmgr_test_CPPFLAGS = \ - $(AM_CPPFLAGS) \ - $(OPENSSL_CFLAGS) - -netmgr_test_LDADD = \ - $(LDADD) \ - $(OPENSSL_LIBS) - -netmgr_test_SOURCES = \ - netmgr_test.c \ - uv_wrap.h - random_test_LDADD = \ $(LDADD) \ -lm diff --git a/tests/isc/doh_test.c b/tests/isc/doh_test.c index b8c5d1f81f..78916c75ea 100644 --- a/tests/isc/doh_test.c +++ b/tests/isc/doh_test.c @@ -24,6 +24,7 @@ #define UNIT_TESTING #include +#include #include #include #include @@ -43,7 +44,6 @@ #include "netmgr/http.c" #include "netmgr/netmgr-int.h" #include "netmgr/socket.c" -#include "netmgr_p.h" #include @@ -66,9 +66,24 @@ static atomic_int_fast64_t creads = 0; static atomic_int_fast64_t ctimeouts = 0; static atomic_int_fast64_t total_sends = 0; -static atomic_bool was_error = false; +static int expected_ssends; +static int expected_sreads; +static int expected_csends; +static int expected_cconnects; +static int expected_creads; +static int expected_ctimeouts; + +#define have_expected_ssends(v) ((v) >= expected_ssends && expected_ssends >= 0) +#define have_expected_sreads(v) ((v) >= expected_sreads && expected_sreads >= 0) +#define have_expected_csends(v) ((v) >= expected_csends && expected_csends >= 0) +#define have_expected_cconnects(v) \ + ((v) >= expected_cconnects && expected_cconnects >= 0) +#define have_expected_creads(v) ((v) >= expected_creads && expected_creads >= 0) +#define have_expected_ctimeouts(v) \ + ((v) >= expected_ctimeouts && expected_ctimeouts >= 0) + +static atomic_bool test_was_error = false; -static bool reuse_supported = true; static bool noanswer = false; static atomic_bool POST = true; @@ -85,7 +100,7 @@ static atomic_bool check_listener_quota = false; static isc_nm_http_endpoints_t *endpoints = NULL; -static bool skip_long_tests = false; +static isc_nm_t **nm = NULL; /* Timeout for soft-timeout tests (0.05 seconds) */ #define T_SOFT 50 @@ -113,13 +128,8 @@ static bool skip_long_tests = false; #define X(v) #endif -#define SKIP_IN_CI \ - if (skip_long_tests) { \ - skip(); \ - return; \ - } - typedef struct csdata { + isc_mem_t *mctx; isc_nm_recv_cb_t reply_cb; void *cb_arg; isc_region_t region; @@ -133,7 +143,7 @@ connect_send_cb(isc_nmhandle_t *handle, isc_result_t result, void *arg) { (void)atomic_fetch_sub(&active_cconnects, 1); memmove(&data, arg, sizeof(data)); - isc_mem_put(handle->sock->mgr->mctx, arg, sizeof(data)); + isc_mem_put(data.mctx, arg, sizeof(data)); if (result != ISC_R_SUCCESS) { goto error; } @@ -144,17 +154,15 @@ connect_send_cb(isc_nmhandle_t *handle, isc_result_t result, void *arg) { goto error; } - isc_mem_put(handle->sock->mgr->mctx, data.region.base, - data.region.length); + isc_mem_putanddetach(&data.mctx, data.region.base, data.region.length); return; error: data.reply_cb(handle, result, NULL, data.cb_arg); - isc_mem_put(handle->sock->mgr->mctx, data.region.base, - data.region.length); + isc_mem_putanddetach(&data.mctx, data.region.base, data.region.length); if (result == ISC_R_TOOMANYOPENFILES) { atomic_store(&slowdown, true); } else { - atomic_store(&was_error, true); + atomic_store(&test_was_error, true); } } @@ -171,6 +179,7 @@ connect_send_request(isc_nm_t *mgr, const char *uri, bool post, memmove(copy.base, region->base, region->length); data = isc_mem_get(mgr->mctx, sizeof(*data)); *data = (csdata_t){ .reply_cb = cb, .cb_arg = cbarg, .region = copy }; + isc_mem_attach(mgr->mctx, &data->mctx); if (tls) { ctx = client_tlsctx; } @@ -227,9 +236,6 @@ setup_ephemeral_port(isc_sockaddr_t *addr, sa_family_t family) { close(fd); return (-1); } - if (result == ISC_R_NOTIMPLEMENTED) { - reuse_supported = false; - } #if IPV6_RECVERR #define setsockopt_on(socket, level, name) \ @@ -263,9 +269,7 @@ thread_local size_t tcp_buffer_length = 0; static int setup_test(void **state) { char *env_workers = getenv("ISC_TASK_WORKERS"); - size_t nworkers; uv_os_sock_t tcp_listen_sock = -1; - isc_nm_t **nm = NULL; tcp_listen_addr = (isc_sockaddr_t){ .length = 0 }; tcp_listen_sock = setup_ephemeral_port(&tcp_listen_addr, SOCK_STREAM); @@ -281,11 +285,6 @@ setup_test(void **state) { workers = isc_os_ncpus(); } INSIST(workers > 0); - nworkers = ISC_MAX(ISC_MIN(workers, 32), 1); - - if (!reuse_supported || getenv("CI") != NULL) { - skip_long_tests = true; - } atomic_store(&total_sends, NSENDS * NWRITES); atomic_store(&nsends, atomic_load(&total_sends)); @@ -297,7 +296,14 @@ setup_test(void **state) { atomic_store(&ctimeouts, 0); atomic_store(&active_cconnects, 0); - atomic_store(&was_error, false); + expected_cconnects = -1; + expected_csends = -1; + expected_creads = -1; + expected_sreads = -1; + expected_ssends = -1; + expected_ctimeouts = -1; + + atomic_store(&test_was_error, false); atomic_store(&POST, false); atomic_store(&use_TLS, false); @@ -310,9 +316,11 @@ setup_test(void **state) { return (-1); } + setup_loopmgr(state); + nm = isc_mem_get(mctx, MAX_NM * sizeof(nm[0])); for (size_t i = 0; i < MAX_NM; i++) { - isc__netmgr_create(mctx, nworkers, &nm[i]); + isc_netmgr_create(mctx, loopmgr, &nm[i]); assert_non_null(nm[i]); } @@ -338,15 +346,15 @@ setup_test(void **state) { } static int -teardown_test(void **state) { - isc_nm_t **nm = (isc_nm_t **)*state; - +teardown_test(void **state __attribute__((__unused__))) { for (size_t i = 0; i < MAX_NM; i++) { - isc__netmgr_destroy(&nm[i]); + isc_netmgr_destroy(&nm[i]); assert_null(nm[i]); } isc_mem_put(mctx, nm, MAX_NM * sizeof(nm[0])); + teardown_loopmgr(state); + if (server_tlsctx != NULL) { isc_tlsctx_free(&server_tlsctx); } @@ -391,11 +399,15 @@ doh_receive_reply_cb(isc_nmhandle_t *handle, isc_result_t eresult, if (eresult == ISC_R_SUCCESS) { (void)atomic_fetch_sub(&nsends, 1); - atomic_fetch_add(&csends, 1); - atomic_fetch_add(&creads, 1); + if (have_expected_csends(atomic_fetch_add(&csends, 1) + 1) || + have_expected_creads(atomic_fetch_add(&creads, 1) + 1)) + { + isc_loopmgr_shutdown(loopmgr); + } } else { /* We failed to connect; try again */ - atomic_store(&was_error, true); + atomic_store(&test_was_error, true); + isc_loopmgr_shutdown(loopmgr); } } @@ -420,7 +432,7 @@ doh_receive_request_cb(isc_nmhandle_t *handle, isc_result_t eresult, assert_non_null(handle); if (eresult != ISC_R_SUCCESS) { - atomic_store(&was_error, true); + atomic_store(&test_was_error, true); return; } @@ -454,8 +466,7 @@ doh_receive_request_cb(isc_nmhandle_t *handle, isc_result_t eresult, } } -ISC_RUN_TEST_IMPL(mock_doh_uv_tcp_bind) { - isc_nm_t **nm = (isc_nm_t **)*state; +ISC_LOOP_TEST_IMPL(mock_doh_uv_tcp_bind) { isc_nm_t *listen_nm = nm[0]; isc_result_t result = ISC_R_SUCCESS; isc_nmsocket_t *listen_sock = NULL; @@ -472,11 +483,12 @@ ISC_RUN_TEST_IMPL(mock_doh_uv_tcp_bind) { assert_null(listen_sock); RESET_RETURN; + + isc_loopmgr_shutdown(loopmgr); } static void -doh_noop(void **state) { - isc_nm_t **nm = (isc_nm_t **)*state; +doh_noop(void *arg __attribute__((__unused__))) { isc_nm_t *listen_nm = nm[0]; isc_nm_t *connect_nm = nm[1]; isc_result_t result = ISC_R_SUCCESS; @@ -503,7 +515,7 @@ doh_noop(void **state) { .length = send_msg.len }, noop_read_cb, NULL, atomic_load(&use_TLS), 30000); - isc__netmgr_shutdown(connect_nm); + isc_loopmgr_shutdown(loopmgr); assert_int_equal(0, atomic_load(&csends)); assert_int_equal(0, atomic_load(&creads)); @@ -511,19 +523,18 @@ doh_noop(void **state) { assert_int_equal(0, atomic_load(&ssends)); } -ISC_RUN_TEST_IMPL(doh_noop_POST) { +ISC_LOOP_TEST_IMPL(doh_noop_POST) { atomic_store(&POST, true); - doh_noop(state); + doh_noop(arg); } -ISC_RUN_TEST_IMPL(doh_noop_GET) { +ISC_LOOP_TEST_IMPL(doh_noop_GET) { atomic_store(&POST, false); - doh_noop(state); + doh_noop(arg); } static void -doh_noresponse(void **state) { - isc_nm_t **nm = (isc_nm_t **)*state; +doh_noresponse(void *arg __attribute__((__unused__))) { isc_nm_t *listen_nm = nm[0]; isc_nm_t *connect_nm = nm[1]; isc_result_t result = ISC_R_SUCCESS; @@ -549,17 +560,17 @@ doh_noresponse(void **state) { isc_nm_stoplistening(listen_sock); isc_nmsocket_close(&listen_sock); assert_null(listen_sock); - isc__netmgr_shutdown(connect_nm); + isc_loopmgr_shutdown(loopmgr); } -ISC_RUN_TEST_IMPL(doh_noresponse_POST) { +ISC_LOOP_TEST_IMPL(doh_noresponse_POST) { atomic_store(&POST, true); - doh_noresponse(state); + doh_noresponse(arg); } -ISC_RUN_TEST_IMPL(doh_noresponse_GET) { +ISC_LOOP_TEST_IMPL(doh_noresponse_GET) { atomic_store(&POST, false); - doh_noresponse(state); + doh_noresponse(arg); } static void @@ -579,10 +590,8 @@ timeout_query_sent_cb(isc_nmhandle_t *handle, isc_result_t eresult, static void timeout_retry_cb(isc_nmhandle_t *handle, isc_result_t eresult, - isc_region_t *region, void *arg) { - UNUSED(region); - UNUSED(arg); - + isc_region_t *region __attribute__((__unused__)), + void *arg __attribute__((__unused__))) { assert_non_null(handle); atomic_fetch_add(&ctimeouts, 1); @@ -593,6 +602,7 @@ timeout_retry_cb(isc_nmhandle_t *handle, isc_result_t eresult, } isc_nmhandle_detach(&handle); + isc_loopmgr_shutdown(loopmgr); } static void @@ -617,16 +627,24 @@ timeout_request_cb(isc_nmhandle_t *handle, isc_result_t result, void *arg) { return; error: - atomic_store(&was_error, true); + atomic_store(&test_was_error, true); } static void -doh_timeout_recovery(void **state) { - isc_nm_t **nm = (isc_nm_t **)*state; +listen_sock_close(void *arg) { + isc_nmsocket_t *listen_sock = arg; + + isc_nm_stoplistening(listen_sock); + isc_nmsocket_close(&listen_sock); + assert_null(listen_sock); +} + +static void +doh_timeout_recovery(void *arg __attribute__((__unused__))) { isc_nm_t *listen_nm = nm[0]; + isc_nmsocket_t *listen_sock = NULL; isc_nm_t *connect_nm = nm[1]; isc_result_t result = ISC_R_SUCCESS; - isc_nmsocket_t *listen_sock = NULL; isc_tlsctx_t *ctx = atomic_load(&use_TLS) ? server_tlsctx : NULL; char req_url[256]; @@ -638,6 +656,7 @@ doh_timeout_recovery(void **state) { &tcp_listen_addr, 0, NULL, NULL, endpoints, 0, &listen_sock); assert_int_equal(result, ISC_R_SUCCESS); + isc_loop_teardown(mainloop, listen_sock_close, listen_sock); /* * Accept connections but don't send responses, forcing client @@ -655,42 +674,33 @@ doh_timeout_recovery(void **state) { isc_nm_httpconnect(connect_nm, NULL, &tcp_listen_addr, req_url, atomic_load(&POST), timeout_request_cb, NULL, ctx, client_sess_cache, T_SOFT); +} - /* - * Sleep until sends reaches 5. - */ - for (size_t i = 0; i < 1000; i++) { - if (atomic_load(&ctimeouts) == 5) { - break; - } - isc_test_nap(1); - } +static int +doh_timeout_recovery_teardown(void **state) { assert_true(atomic_load(&ctimeouts) == 5); - - isc_nm_stoplistening(listen_sock); - isc_nmsocket_close(&listen_sock); - assert_null(listen_sock); - isc__netmgr_shutdown(connect_nm); + return (teardown_test(state)); } -ISC_RUN_TEST_IMPL(doh_timeout_recovery_POST) { - SKIP_IN_CI; - +ISC_LOOP_TEST_IMPL(doh_timeout_recovery_POST) { atomic_store(&POST, true); - doh_timeout_recovery(state); + doh_timeout_recovery(arg); } -ISC_RUN_TEST_IMPL(doh_timeout_recovery_GET) { - SKIP_IN_CI; - +ISC_LOOP_TEST_IMPL(doh_timeout_recovery_GET) { atomic_store(&POST, false); - doh_timeout_recovery(state); + doh_timeout_recovery(arg); } +static void +doh_connect_thread(void *arg); + static void doh_receive_send_reply_cb(isc_nmhandle_t *handle, isc_result_t eresult, isc_region_t *region, void *cbarg) { isc_nmhandle_t *thandle = NULL; + isc_nm_t *connect_nm = (isc_nm_t *)cbarg; + assert_non_null(handle); UNUSED(region); @@ -699,7 +709,7 @@ doh_receive_send_reply_cb(isc_nmhandle_t *handle, isc_result_t eresult, int_fast64_t sends = atomic_fetch_sub(&nsends, 1); atomic_fetch_add(&csends, 1); atomic_fetch_add(&creads, 1); - if (sends > 0 && cbarg == NULL) { + if (sends > 0 && connect_nm != NULL) { size_t i; for (i = 0; i < NWRITES / 2; i++) { eresult = isc__nm_http_request( @@ -707,21 +717,27 @@ doh_receive_send_reply_cb(isc_nmhandle_t *handle, isc_result_t eresult, &(isc_region_t){ .base = (uint8_t *)send_msg.base, .length = send_msg.len }, - doh_receive_send_reply_cb, (void *)1); + doh_receive_send_reply_cb, NULL); if (eresult == ISC_R_CANCELED) { break; } assert_true(eresult == ISC_R_SUCCESS); } + + isc_job_run(loopmgr, doh_connect_thread, connect_nm); + } + if (sends <= 0) { + isc_loopmgr_shutdown(loopmgr); } } else { - atomic_store(&was_error, true); + atomic_store(&test_was_error, true); + isc_loopmgr_shutdown(loopmgr); } isc_nmhandle_detach(&thandle); } -static isc_threadresult_t -doh_connect_thread(isc_threadarg_t arg) { +static void +doh_connect_thread(void *arg) { isc_nm_t *connect_nm = (isc_nm_t *)arg; char req_url[256]; int64_t sends = atomic_load(&nsends); @@ -729,31 +745,29 @@ doh_connect_thread(isc_threadarg_t arg) { sockaddr_to_url(&tcp_listen_addr, atomic_load(&use_TLS), req_url, sizeof(req_url), ISC_NM_HTTP_DEFAULT_PATH); - while (sends > 0) { - /* - * We need to back off and slow down if we start getting - * errors, to prevent a thundering herd problem. - */ - int_fast64_t active = atomic_fetch_add(&active_cconnects, 1); - if (atomic_load(&slowdown) || active > workers) { - isc_test_nap(active - workers); - atomic_store(&slowdown, false); - } - connect_send_request( - connect_nm, req_url, atomic_load(&POST), - &(isc_region_t){ .base = (uint8_t *)send_msg.base, - .length = send_msg.len }, - doh_receive_send_reply_cb, NULL, atomic_load(&use_TLS), - 30000); - sends = atomic_load(&nsends); + /* + * We need to back off and slow down if we start getting + * errors, to prevent a thundering herd problem. + */ + int_fast64_t active = atomic_fetch_add(&active_cconnects, 1); + if (atomic_load(&slowdown) || active > workers) { + goto next; + } + connect_send_request(connect_nm, req_url, atomic_load(&POST), + &(isc_region_t){ .base = (uint8_t *)send_msg.base, + .length = send_msg.len }, + doh_receive_send_reply_cb, connect_nm, + atomic_load(&use_TLS), 30000); + + if (sends <= 0) { + isc_loopmgr_shutdown(loopmgr); } - return ((isc_threadresult_t)0); +next : {} } static void -doh_recv_one(void **state) { - isc_nm_t **nm = (isc_nm_t **)*state; +doh_recv_one(void *arg __attribute__((__unused__))) { isc_nm_t *listen_nm = nm[0]; isc_nm_t *connect_nm = nm[1]; isc_result_t result = ISC_R_SUCCESS; @@ -762,6 +776,7 @@ doh_recv_one(void **state) { isc_quota_t *quotap = init_listener_quota(workers); atomic_store(&total_sends, 1); + expected_creads = 1; atomic_store(&nsends, atomic_load(&total_sends)); @@ -783,27 +798,11 @@ doh_recv_one(void **state) { doh_receive_reply_cb, NULL, atomic_load(&use_TLS), 30000); - while (atomic_load(&nsends) > 0) { - if (atomic_load(&was_error)) { - break; - } - isc_thread_yield(); - } - - while (atomic_load(&ssends) != 1 || atomic_load(&sreads) != 1 || - atomic_load(&csends) != 1) - { - if (atomic_load(&was_error)) { - break; - } - isc_thread_yield(); - } - - isc_nm_stoplistening(listen_sock); - isc_nmsocket_close(&listen_sock); - assert_null(listen_sock); - isc__netmgr_shutdown(connect_nm); + isc_loop_teardown(mainloop, listen_sock_close, listen_sock); +} +static int +doh_recv_one_teardown(void **state) { X(total_sends); X(csends); X(creads); @@ -814,70 +813,56 @@ doh_recv_one(void **state) { assert_int_equal(atomic_load(&creads), 1); assert_int_equal(atomic_load(&sreads), 1); assert_int_equal(atomic_load(&ssends), 1); + + return (teardown_test(state)); } -ISC_RUN_TEST_IMPL(doh_recv_one_POST) { - SKIP_IN_CI; - +ISC_LOOP_TEST_IMPL(doh_recv_one_POST) { atomic_store(&POST, true); - doh_recv_one(state); + doh_recv_one(arg); } -ISC_RUN_TEST_IMPL(doh_recv_one_GET) { - SKIP_IN_CI; - +ISC_LOOP_TEST_IMPL(doh_recv_one_GET) { atomic_store(&POST, false); - doh_recv_one(state); + doh_recv_one(arg); } -ISC_RUN_TEST_IMPL(doh_recv_one_POST_TLS) { - SKIP_IN_CI; - +ISC_LOOP_TEST_IMPL(doh_recv_one_POST_TLS) { atomic_store(&use_TLS, true); atomic_store(&POST, true); - doh_recv_one(state); + doh_recv_one(arg); } -ISC_RUN_TEST_IMPL(doh_recv_one_GET_TLS) { - SKIP_IN_CI; - +ISC_LOOP_TEST_IMPL(doh_recv_one_GET_TLS) { atomic_store(&use_TLS, true); atomic_store(&POST, false); - doh_recv_one(state); + doh_recv_one(arg); } -ISC_RUN_TEST_IMPL(doh_recv_one_POST_quota) { - SKIP_IN_CI; - +ISC_LOOP_TEST_IMPL(doh_recv_one_POST_quota) { atomic_store(&POST, true); atomic_store(&check_listener_quota, true); - doh_recv_one(state); + doh_recv_one(arg); } -ISC_RUN_TEST_IMPL(doh_recv_one_GET_quota) { - SKIP_IN_CI; - +ISC_LOOP_TEST_IMPL(doh_recv_one_GET_quota) { atomic_store(&POST, false); atomic_store(&check_listener_quota, true); - doh_recv_one(state); + doh_recv_one(arg); } -ISC_RUN_TEST_IMPL(doh_recv_one_POST_TLS_quota) { - SKIP_IN_CI; - +ISC_LOOP_TEST_IMPL(doh_recv_one_POST_TLS_quota) { atomic_store(&use_TLS, true); atomic_store(&POST, true); atomic_store(&check_listener_quota, true); - doh_recv_one(state); + doh_recv_one(arg); } -ISC_RUN_TEST_IMPL(doh_recv_one_GET_TLS_quota) { - SKIP_IN_CI; - +ISC_LOOP_TEST_IMPL(doh_recv_one_GET_TLS_quota) { atomic_store(&use_TLS, true); atomic_store(&POST, false); atomic_store(&check_listener_quota, true); - doh_recv_one(state); + doh_recv_one(arg); } static void @@ -907,12 +892,11 @@ doh_connect_send_two_requests_cb(isc_nmhandle_t *handle, isc_result_t result, } return; error: - atomic_store(&was_error, true); + atomic_store(&test_was_error, true); } static void -doh_recv_two(void **state) { - isc_nm_t **nm = (isc_nm_t **)*state; +doh_recv_two(void *arg __attribute__((__unused__))) { isc_nm_t *listen_nm = nm[0]; isc_nm_t *connect_nm = nm[1]; isc_result_t result = ISC_R_SUCCESS; @@ -922,6 +906,7 @@ doh_recv_two(void **state) { isc_quota_t *quotap = init_listener_quota(workers); atomic_store(&total_sends, 2); + expected_creads = 2; atomic_store(&nsends, atomic_load(&total_sends)); @@ -946,27 +931,11 @@ doh_recv_two(void **state) { atomic_load(&POST), doh_connect_send_two_requests_cb, NULL, ctx, client_sess_cache, 5000); - while (atomic_load(&nsends) > 0) { - if (atomic_load(&was_error)) { - break; - } - isc_thread_yield(); - } - - while (atomic_load(&ssends) != 2 || atomic_load(&sreads) != 2 || - atomic_load(&csends) != 2) - { - if (atomic_load(&was_error)) { - break; - } - isc_thread_yield(); - } - - isc_nm_stoplistening(listen_sock); - isc_nmsocket_close(&listen_sock); - assert_null(listen_sock); - isc__netmgr_shutdown(connect_nm); + isc_loop_teardown(mainloop, listen_sock_close, listen_sock); +} +static int +doh_recv_two_teardown(void **state) { X(total_sends); X(csends); X(creads); @@ -977,83 +946,70 @@ doh_recv_two(void **state) { assert_int_equal(atomic_load(&creads), 2); assert_int_equal(atomic_load(&sreads), 2); assert_int_equal(atomic_load(&ssends), 2); + + return (teardown_test(state)); } -ISC_RUN_TEST_IMPL(doh_recv_two_POST) { - SKIP_IN_CI; - +ISC_LOOP_TEST_IMPL(doh_recv_two_POST) { atomic_store(&POST, true); - doh_recv_two(state); + doh_recv_two(arg); } -ISC_RUN_TEST_IMPL(doh_recv_two_GET) { - SKIP_IN_CI; - +ISC_LOOP_TEST_IMPL(doh_recv_two_GET) { atomic_store(&POST, false); - doh_recv_two(state); + doh_recv_two(arg); } -ISC_RUN_TEST_IMPL(doh_recv_two_POST_TLS) { - SKIP_IN_CI; - +ISC_LOOP_TEST_IMPL(doh_recv_two_POST_TLS) { atomic_store(&use_TLS, true); atomic_store(&POST, true); - doh_recv_two(state); + doh_recv_two(arg); } -ISC_RUN_TEST_IMPL(doh_recv_two_GET_TLS) { - SKIP_IN_CI; - +ISC_LOOP_TEST_IMPL(doh_recv_two_GET_TLS) { atomic_store(&use_TLS, true); atomic_store(&POST, false); - doh_recv_two(state); + doh_recv_two(arg); } -ISC_RUN_TEST_IMPL(doh_recv_two_POST_quota) { - SKIP_IN_CI; - +ISC_LOOP_TEST_IMPL(doh_recv_two_POST_quota) { atomic_store(&POST, true); atomic_store(&check_listener_quota, true); - doh_recv_two(state); + doh_recv_two(arg); } -ISC_RUN_TEST_IMPL(doh_recv_two_GET_quota) { - SKIP_IN_CI; - +ISC_LOOP_TEST_IMPL(doh_recv_two_GET_quota) { atomic_store(&POST, false); atomic_store(&check_listener_quota, true); - doh_recv_two(state); + doh_recv_two(arg); } -ISC_RUN_TEST_IMPL(doh_recv_two_POST_TLS_quota) { - SKIP_IN_CI; - +ISC_LOOP_TEST_IMPL(doh_recv_two_POST_TLS_quota) { atomic_store(&use_TLS, true); atomic_store(&POST, true); atomic_store(&check_listener_quota, true); - doh_recv_two(state); + doh_recv_two(arg); } -ISC_RUN_TEST_IMPL(doh_recv_two_GET_TLS_quota) { - SKIP_IN_CI; - +ISC_LOOP_TEST_IMPL(doh_recv_two_GET_TLS_quota) { atomic_store(&use_TLS, true); atomic_store(&POST, false); atomic_store(&check_listener_quota, true); - doh_recv_two(state); + doh_recv_two(arg); } static void -doh_recv_send(void **state) { - isc_nm_t **nm = (isc_nm_t **)*state; +doh_recv_send(void *arg __attribute__((__unused__))) { isc_nm_t *listen_nm = nm[0]; isc_nm_t *connect_nm = nm[1]; isc_result_t result = ISC_R_SUCCESS; isc_nmsocket_t *listen_sock = NULL; - size_t nthreads = ISC_MAX(ISC_MIN(workers, 32), 1); - isc_thread_t threads[32] = { 0 }; + size_t nthreads = isc_loopmgr_nloops(loopmgr); isc_quota_t *quotap = init_listener_quota(workers); + atomic_store(&total_sends, 1000); + atomic_store(&nsends, 1000); + result = isc_nm_http_endpoints_add(endpoints, ISC_NM_HTTP_DEFAULT_PATH, doh_receive_request_cb, NULL); assert_int_equal(result, ISC_R_SUCCESS); @@ -1065,25 +1021,16 @@ doh_recv_send(void **state) { assert_int_equal(result, ISC_R_SUCCESS); for (size_t i = 0; i < nthreads; i++) { - isc_thread_create(doh_connect_thread, connect_nm, &threads[i]); + isc_async_run(isc_loop_get(loopmgr, i), doh_connect_thread, + connect_nm); } - /* wait for the all responses from the server */ - while (atomic_load(&ssends) < atomic_load(&total_sends)) { - if (atomic_load(&was_error)) { - break; - } - isc_test_nap(1); - } + isc_loop_teardown(mainloop, listen_sock_close, listen_sock); +} - for (size_t i = 0; i < nthreads; i++) { - isc_thread_join(threads[i], NULL); - } - - isc__netmgr_shutdown(connect_nm); - isc_nm_stoplistening(listen_sock); - isc_nmsocket_close(&listen_sock); - assert_null(listen_sock); +static int +doh_recv_send_teardown(void **state) { + int res = teardown_test(state); X(total_sends); X(csends); @@ -1095,431 +1042,78 @@ doh_recv_send(void **state) { CHECK_RANGE_FULL(creads); CHECK_RANGE_FULL(sreads); CHECK_RANGE_FULL(ssends); + + return (res); } -ISC_RUN_TEST_IMPL(doh_recv_send_POST) { - SKIP_IN_CI; - +ISC_LOOP_TEST_IMPL(doh_recv_send_POST) { atomic_store(&POST, true); - doh_recv_send(state); + doh_recv_send(arg); } -ISC_RUN_TEST_IMPL(doh_recv_send_GET) { - SKIP_IN_CI; - +ISC_LOOP_TEST_IMPL(doh_recv_send_GET) { atomic_store(&POST, false); - doh_recv_send(state); + doh_recv_send(arg); } -ISC_RUN_TEST_IMPL(doh_recv_send_POST_TLS) { - SKIP_IN_CI; - +ISC_LOOP_TEST_IMPL(doh_recv_send_POST_TLS) { atomic_store(&POST, true); atomic_store(&use_TLS, true); - doh_recv_send(state); + doh_recv_send(arg); } -ISC_RUN_TEST_IMPL(doh_recv_send_GET_TLS) { - SKIP_IN_CI; - +ISC_LOOP_TEST_IMPL(doh_recv_send_GET_TLS) { atomic_store(&POST, false); atomic_store(&use_TLS, true); - doh_recv_send(state); + doh_recv_send(arg); } -ISC_RUN_TEST_IMPL(doh_recv_send_POST_quota) { - SKIP_IN_CI; - +ISC_LOOP_TEST_IMPL(doh_recv_send_POST_quota) { atomic_store(&POST, true); atomic_store(&check_listener_quota, true); - doh_recv_send(state); + doh_recv_send(arg); } -ISC_RUN_TEST_IMPL(doh_recv_send_GET_quota) { - SKIP_IN_CI; - +ISC_LOOP_TEST_IMPL(doh_recv_send_GET_quota) { atomic_store(&POST, false); atomic_store(&check_listener_quota, true); - doh_recv_send(state); + doh_recv_send(arg); } -ISC_RUN_TEST_IMPL(doh_recv_send_POST_TLS_quota) { - SKIP_IN_CI; - +ISC_LOOP_TEST_IMPL(doh_recv_send_POST_TLS_quota) { atomic_store(&POST, true); atomic_store(&use_TLS, true); atomic_store(&check_listener_quota, true); - doh_recv_send(state); + doh_recv_send(arg); } -ISC_RUN_TEST_IMPL(doh_recv_send_GET_TLS_quota) { - SKIP_IN_CI; - +ISC_LOOP_TEST_IMPL(doh_recv_send_GET_TLS_quota) { atomic_store(&POST, false); atomic_store(&use_TLS, true); atomic_store(&check_listener_quota, true); - doh_recv_send(state); + doh_recv_send(arg); } -static void -doh_recv_half_send(void **state) { - isc_nm_t **nm = (isc_nm_t **)*state; - isc_nm_t *listen_nm = nm[0]; - isc_nm_t *connect_nm = nm[1]; - isc_result_t result = ISC_R_SUCCESS; - isc_nmsocket_t *listen_sock = NULL; - size_t nthreads = ISC_MAX(ISC_MIN(workers, 32), 1); - isc_thread_t threads[32] = { 0 }; - isc_quota_t *quotap = init_listener_quota(workers); - - atomic_store(&total_sends, atomic_load(&total_sends) / 2); - - atomic_store(&nsends, atomic_load(&total_sends)); - - result = isc_nm_http_endpoints_add(endpoints, ISC_NM_HTTP_DEFAULT_PATH, - doh_receive_request_cb, NULL); - assert_int_equal(result, ISC_R_SUCCESS); - - result = isc_nm_listenhttp(listen_nm, ISC_NM_LISTEN_ALL, - &tcp_listen_addr, 0, quotap, - atomic_load(&use_TLS) ? server_tlsctx : NULL, - endpoints, 0, &listen_sock); - assert_int_equal(result, ISC_R_SUCCESS); - - for (size_t i = 0; i < nthreads; i++) { - isc_thread_create(doh_connect_thread, connect_nm, &threads[i]); - } - - while (atomic_load(&nsends) > 0) { - isc_thread_yield(); - } - - isc__netmgr_shutdown(connect_nm); - - for (size_t i = 0; i < nthreads; i++) { - isc_thread_join(threads[i], NULL); - } - - isc_nm_stoplistening(listen_sock); - isc_nmsocket_close(&listen_sock); - assert_null(listen_sock); - +static int +doh_bad_connect_uri_teardown(void **state) { X(total_sends); X(csends); X(creads); X(sreads); X(ssends); - CHECK_RANGE_HALF(csends); - CHECK_RANGE_HALF(creads); - CHECK_RANGE_HALF(sreads); - CHECK_RANGE_HALF(ssends); -} - -ISC_RUN_TEST_IMPL(doh_recv_half_send_POST) { - SKIP_IN_CI; - - atomic_store(&POST, true); - doh_recv_half_send(state); -} - -ISC_RUN_TEST_IMPL(doh_recv_half_send_GET) { - SKIP_IN_CI; - - atomic_store(&POST, false); - doh_recv_half_send(state); -} - -ISC_RUN_TEST_IMPL(doh_recv_half_send_POST_TLS) { - SKIP_IN_CI; - - atomic_store(&use_TLS, true); - atomic_store(&POST, true); - doh_recv_half_send(state); -} - -ISC_RUN_TEST_IMPL(doh_recv_half_send_GET_TLS) { - SKIP_IN_CI; - - atomic_store(&use_TLS, true); - atomic_store(&POST, false); - doh_recv_half_send(state); -} - -ISC_RUN_TEST_IMPL(doh_recv_half_send_POST_quota) { - SKIP_IN_CI; - - atomic_store(&POST, true); - atomic_store(&check_listener_quota, true); - doh_recv_half_send(state); -} - -ISC_RUN_TEST_IMPL(doh_recv_half_send_GET_quota) { - SKIP_IN_CI; - - atomic_store(&POST, false); - atomic_store(&check_listener_quota, true); - doh_recv_half_send(state); -} - -ISC_RUN_TEST_IMPL(doh_recv_half_send_POST_TLS_quota) { - SKIP_IN_CI; - - atomic_store(&use_TLS, true); - atomic_store(&POST, true); - atomic_store(&check_listener_quota, true); - doh_recv_half_send(state); -} - -ISC_RUN_TEST_IMPL(doh_recv_half_send_GET_TLS_quota) { - SKIP_IN_CI; - - atomic_store(&use_TLS, true); - atomic_store(&POST, false); - atomic_store(&check_listener_quota, true); - doh_recv_half_send(state); -} - -static void -doh_half_recv_send(void **state) { - isc_nm_t **nm = (isc_nm_t **)*state; - isc_nm_t *listen_nm = nm[0]; - isc_nm_t *connect_nm = nm[1]; - isc_result_t result = ISC_R_SUCCESS; - isc_nmsocket_t *listen_sock = NULL; - size_t nthreads = ISC_MAX(ISC_MIN(workers, 32), 1); - isc_thread_t threads[32] = { 0 }; - isc_quota_t *quotap = init_listener_quota(workers); - - atomic_store(&total_sends, atomic_load(&total_sends) / 2); - - atomic_store(&nsends, atomic_load(&total_sends)); - - result = isc_nm_http_endpoints_add(endpoints, ISC_NM_HTTP_DEFAULT_PATH, - doh_receive_request_cb, NULL); - assert_int_equal(result, ISC_R_SUCCESS); - - result = isc_nm_listenhttp(listen_nm, ISC_NM_LISTEN_ALL, - &tcp_listen_addr, 0, quotap, - atomic_load(&use_TLS) ? server_tlsctx : NULL, - endpoints, 0, &listen_sock); - assert_int_equal(result, ISC_R_SUCCESS); - - for (size_t i = 0; i < nthreads; i++) { - isc_thread_create(doh_connect_thread, connect_nm, &threads[i]); - } - - while (atomic_load(&nsends) > 0) { - isc_thread_yield(); - } - - isc_nm_stoplistening(listen_sock); - isc_nmsocket_close(&listen_sock); - assert_null(listen_sock); - - for (size_t i = 0; i < nthreads; i++) { - isc_thread_join(threads[i], NULL); - } - - isc__netmgr_shutdown(connect_nm); - - X(total_sends); - X(csends); - X(creads); - X(sreads); - X(ssends); - - CHECK_RANGE_HALF(csends); - CHECK_RANGE_HALF(creads); - CHECK_RANGE_HALF(sreads); - CHECK_RANGE_HALF(ssends); -} - -ISC_RUN_TEST_IMPL(doh_half_recv_send_POST) { - SKIP_IN_CI; - - atomic_store(&POST, true); - doh_half_recv_send(state); -} - -ISC_RUN_TEST_IMPL(doh_half_recv_send_GET) { - SKIP_IN_CI; - - atomic_store(&POST, false); - doh_half_recv_send(state); -} - -ISC_RUN_TEST_IMPL(doh_half_recv_send_POST_TLS) { - SKIP_IN_CI; - - atomic_store(&use_TLS, true); - atomic_store(&POST, true); - doh_half_recv_send(state); -} - -ISC_RUN_TEST_IMPL(doh_half_recv_send_GET_TLS) { - SKIP_IN_CI; - - atomic_store(&use_TLS, true); - atomic_store(&POST, false); - doh_half_recv_send(state); -} - -ISC_RUN_TEST_IMPL(doh_half_recv_send_POST_quota) { - SKIP_IN_CI; - - atomic_store(&POST, true); - atomic_store(&check_listener_quota, true); - doh_half_recv_send(state); -} - -ISC_RUN_TEST_IMPL(doh_half_recv_send_GET_quota) { - SKIP_IN_CI; - - atomic_store(&POST, false); - atomic_store(&check_listener_quota, true); - doh_half_recv_send(state); -} - -ISC_RUN_TEST_IMPL(doh_half_recv_send_POST_TLS_quota) { - SKIP_IN_CI; - - atomic_store(&use_TLS, true); - atomic_store(&POST, true); - atomic_store(&check_listener_quota, true); - doh_half_recv_send(state); -} - -ISC_RUN_TEST_IMPL(doh_half_recv_send_GET_TLS_quota) { - SKIP_IN_CI; - - atomic_store(&use_TLS, true); - atomic_store(&POST, false); - atomic_store(&check_listener_quota, true); - doh_half_recv_send(state); -} - -static void -doh_half_recv_half_send(void **state) { - isc_nm_t **nm = (isc_nm_t **)*state; - isc_nm_t *listen_nm = nm[0]; - isc_nm_t *connect_nm = nm[1]; - isc_result_t result = ISC_R_SUCCESS; - isc_nmsocket_t *listen_sock = NULL; - size_t nthreads = ISC_MAX(ISC_MIN(workers, 32), 1); - isc_thread_t threads[32] = { 0 }; - isc_quota_t *quotap = init_listener_quota(workers); - - atomic_store(&total_sends, atomic_load(&total_sends) / 2); - - atomic_store(&nsends, atomic_load(&total_sends)); - - result = isc_nm_http_endpoints_add(endpoints, ISC_NM_HTTP_DEFAULT_PATH, - doh_receive_request_cb, NULL); - assert_int_equal(result, ISC_R_SUCCESS); - - result = isc_nm_listenhttp(listen_nm, ISC_NM_LISTEN_ALL, - &tcp_listen_addr, 0, quotap, - atomic_load(&use_TLS) ? server_tlsctx : NULL, - endpoints, 0, &listen_sock); - assert_int_equal(result, ISC_R_SUCCESS); - - for (size_t i = 0; i < nthreads; i++) { - isc_thread_create(doh_connect_thread, connect_nm, &threads[i]); - } - - while (atomic_load(&nsends) > 0) { - isc_thread_yield(); - } - - isc__netmgr_shutdown(connect_nm); - isc_nm_stoplistening(listen_sock); - isc_nmsocket_close(&listen_sock); - assert_null(listen_sock); - - for (size_t i = 0; i < nthreads; i++) { - isc_thread_join(threads[i], NULL); - } - - X(total_sends); - X(csends); - X(creads); - X(sreads); - X(ssends); - - CHECK_RANGE_HALF(csends); - CHECK_RANGE_HALF(creads); - CHECK_RANGE_HALF(sreads); - CHECK_RANGE_HALF(ssends); -} - -ISC_RUN_TEST_IMPL(doh_half_recv_half_send_POST) { - SKIP_IN_CI; - - atomic_store(&POST, true); - doh_half_recv_half_send(state); -} - -ISC_RUN_TEST_IMPL(doh_half_recv_half_send_GET) { - SKIP_IN_CI; - - atomic_store(&POST, false); - doh_half_recv_half_send(state); -} - -ISC_RUN_TEST_IMPL(doh_half_recv_half_send_POST_TLS) { - SKIP_IN_CI; - - atomic_store(&use_TLS, true); - atomic_store(&POST, true); - doh_half_recv_half_send(state); -} - -ISC_RUN_TEST_IMPL(doh_half_recv_half_send_GET_TLS) { - SKIP_IN_CI; - - atomic_store(&use_TLS, true); - atomic_store(&POST, false); - doh_half_recv_half_send(state); -} - -ISC_RUN_TEST_IMPL(doh_half_recv_half_send_POST_quota) { - SKIP_IN_CI; - - atomic_store(&POST, true); - atomic_store(&check_listener_quota, true); - doh_half_recv_half_send(state); -} - -ISC_RUN_TEST_IMPL(doh_half_recv_half_send_GET_quota) { - SKIP_IN_CI; - - atomic_store(&POST, false); - atomic_store(&check_listener_quota, true); - doh_half_recv_half_send(state); -} - -ISC_RUN_TEST_IMPL(doh_half_recv_half_send_POST_TLS_quota) { - SKIP_IN_CI; - - atomic_store(&use_TLS, true); - atomic_store(&POST, true); - atomic_store(&check_listener_quota, true); - doh_half_recv_half_send(state); -} - -ISC_RUN_TEST_IMPL(doh_half_recv_half_send_GET_TLS_quota) { - SKIP_IN_CI; - - atomic_store(&use_TLS, true); - atomic_store(&POST, false); - atomic_store(&check_listener_quota, true); - doh_half_recv_half_send(state); + /* As we used an ill-formed URI, there ought to be an error. */ + assert_true(atomic_load(&test_was_error)); + assert_int_equal(atomic_load(&csends), 0); + assert_int_equal(atomic_load(&creads), 0); + assert_int_equal(atomic_load(&sreads), 0); + assert_int_equal(atomic_load(&ssends), 0); + + return (teardown_test(state)); } /* See: GL #2858, !5319 */ -ISC_RUN_TEST_IMPL(doh_bad_connect_uri) { - isc_nm_t **nm = (isc_nm_t **)*state; +ISC_LOOP_TEST_IMPL(doh_bad_connect_uri) { isc_nm_t *listen_nm = nm[0]; isc_nm_t *connect_nm = nm[1]; isc_result_t result = ISC_R_SUCCESS; @@ -1552,34 +1146,10 @@ ISC_RUN_TEST_IMPL(doh_bad_connect_uri) { .length = send_msg.len }, doh_receive_reply_cb, NULL, true, 30000); - while (atomic_load(&nsends) > 0) { - if (atomic_load(&was_error)) { - break; - } - isc_thread_yield(); - } - - isc_nm_stoplistening(listen_sock); - isc_nmsocket_close(&listen_sock); - assert_null(listen_sock); - isc__netmgr_shutdown(connect_nm); - - X(total_sends); - X(csends); - X(creads); - X(sreads); - X(ssends); - - /* As we used an ill-formed URI, there ought to be an error. */ - assert_true(atomic_load(&was_error)); - assert_int_equal(atomic_load(&csends), 0); - assert_int_equal(atomic_load(&creads), 0); - assert_int_equal(atomic_load(&sreads), 0); - assert_int_equal(atomic_load(&ssends), 0); + isc_loop_teardown(mainloop, listen_sock_close, listen_sock); } ISC_RUN_TEST_IMPL(doh_parse_GET_query_string) { - UNUSED(state); /* valid */ { bool ret; @@ -1801,7 +1371,6 @@ ISC_RUN_TEST_IMPL(doh_parse_GET_query_string) { } ISC_RUN_TEST_IMPL(doh_base64url_to_base64) { - UNUSED(state); char *res; size_t res_len = 0; /* valid */ @@ -1940,7 +1509,6 @@ ISC_RUN_TEST_IMPL(doh_base64url_to_base64) { ISC_RUN_TEST_IMPL(doh_base64_to_base64url) { char *res; size_t res_len = 0; - UNUSED(state); /* valid */ { char res_test[] = "YW55IGNhcm5hbCBwbGVhc3VyZS4"; @@ -2075,8 +1643,6 @@ ISC_RUN_TEST_IMPL(doh_base64_to_base64url) { } ISC_RUN_TEST_IMPL(doh_path_validation) { - UNUSED(state); - assert_true(isc_nm_http_path_isvalid("/")); assert_true(isc_nm_http_path_isvalid(ISC_NM_HTTP_DEFAULT_PATH)); assert_false(isc_nm_http_path_isvalid("laaaa")); @@ -2112,7 +1678,6 @@ ISC_RUN_TEST_IMPL(doh_connect_makeuri) { struct in_addr localhostv4 = { .s_addr = ntohl(INADDR_LOOPBACK) }; isc_sockaddr_t sa; char uri[256]; - UNUSED(state); /* Firstly, test URI generation using isc_sockaddr_t */ isc_sockaddr_fromin(&sa, &localhostv4, 0); @@ -2244,67 +1809,47 @@ ISC_TEST_ENTRY_CUSTOM(doh_noop_POST, setup_test, teardown_test) ISC_TEST_ENTRY_CUSTOM(doh_noop_GET, setup_test, teardown_test) ISC_TEST_ENTRY_CUSTOM(doh_noresponse_POST, setup_test, teardown_test) ISC_TEST_ENTRY_CUSTOM(doh_noresponse_GET, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_timeout_recovery_POST, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_timeout_recovery_GET, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_recv_one_POST, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_recv_one_GET, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_recv_one_POST_TLS, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_recv_one_GET_TLS, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_recv_one_POST_quota, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_recv_one_GET_quota, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_recv_one_POST_TLS_quota, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_recv_one_GET_TLS_quota, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_recv_two_POST, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_recv_two_GET, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_recv_two_POST_TLS, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_recv_two_GET_TLS, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_recv_two_POST_quota, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_recv_two_GET_quota, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_recv_two_POST_TLS_quota, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_recv_two_GET_TLS_quota, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_recv_send_GET, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_recv_send_POST, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_recv_send_GET_TLS, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_recv_send_POST_TLS, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_recv_send_GET_quota, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_recv_send_POST_quota, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_recv_send_GET_TLS_quota, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_recv_send_POST_TLS_quota, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_recv_half_send_GET, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_recv_half_send_POST, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_recv_half_send_GET_TLS, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_recv_half_send_POST_TLS, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_recv_half_send_GET_quota, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_recv_half_send_POST_quota, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_recv_half_send_GET_TLS_quota, setup_test, - teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_recv_half_send_POST_TLS_quota, setup_test, - teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_half_recv_send_GET, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_half_recv_send_POST, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_half_recv_send_GET_TLS, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_half_recv_send_POST_TLS, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_half_recv_send_GET_quota, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_half_recv_send_POST_quota, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_half_recv_send_GET_TLS_quota, setup_test, - teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_half_recv_send_POST_TLS_quota, setup_test, - teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_half_recv_half_send_GET, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_half_recv_half_send_POST, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_half_recv_half_send_GET_TLS, setup_test, - teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_half_recv_half_send_POST_TLS, setup_test, - teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_half_recv_half_send_GET_quota, setup_test, - teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_half_recv_half_send_POST_quota, setup_test, - teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_half_recv_half_send_GET_TLS_quota, setup_test, - teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_half_recv_half_send_POST_TLS_quota, setup_test, - teardown_test) -ISC_TEST_ENTRY_CUSTOM(doh_bad_connect_uri, setup_test, teardown_test) +ISC_TEST_ENTRY_CUSTOM(doh_timeout_recovery_POST, setup_test, + doh_timeout_recovery_teardown) +ISC_TEST_ENTRY_CUSTOM(doh_timeout_recovery_GET, setup_test, + doh_timeout_recovery_teardown) +ISC_TEST_ENTRY_CUSTOM(doh_recv_one_POST, setup_test, doh_recv_one_teardown) +ISC_TEST_ENTRY_CUSTOM(doh_recv_one_GET, setup_test, doh_recv_one_teardown) +ISC_TEST_ENTRY_CUSTOM(doh_recv_one_POST_TLS, setup_test, doh_recv_one_teardown) +ISC_TEST_ENTRY_CUSTOM(doh_recv_one_GET_TLS, setup_test, doh_recv_one_teardown) +ISC_TEST_ENTRY_CUSTOM(doh_recv_one_POST_quota, setup_test, + doh_recv_one_teardown) +ISC_TEST_ENTRY_CUSTOM(doh_recv_one_GET_quota, setup_test, doh_recv_one_teardown) +ISC_TEST_ENTRY_CUSTOM(doh_recv_one_POST_TLS_quota, setup_test, + doh_recv_one_teardown) +ISC_TEST_ENTRY_CUSTOM(doh_recv_one_GET_TLS_quota, setup_test, + doh_recv_one_teardown) +ISC_TEST_ENTRY_CUSTOM(doh_recv_two_POST, setup_test, doh_recv_two_teardown) +ISC_TEST_ENTRY_CUSTOM(doh_recv_two_GET, setup_test, doh_recv_two_teardown) +ISC_TEST_ENTRY_CUSTOM(doh_recv_two_POST_TLS, setup_test, doh_recv_two_teardown) +ISC_TEST_ENTRY_CUSTOM(doh_recv_two_GET_TLS, setup_test, doh_recv_two_teardown) +ISC_TEST_ENTRY_CUSTOM(doh_recv_two_POST_quota, setup_test, + doh_recv_two_teardown) +ISC_TEST_ENTRY_CUSTOM(doh_recv_two_GET_quota, setup_test, doh_recv_two_teardown) +ISC_TEST_ENTRY_CUSTOM(doh_recv_two_POST_TLS_quota, setup_test, + doh_recv_two_teardown) +ISC_TEST_ENTRY_CUSTOM(doh_recv_two_GET_TLS_quota, setup_test, + doh_recv_two_teardown) +ISC_TEST_ENTRY_CUSTOM(doh_recv_send_GET, setup_test, doh_recv_send_teardown) +ISC_TEST_ENTRY_CUSTOM(doh_recv_send_POST, setup_test, doh_recv_send_teardown) +ISC_TEST_ENTRY_CUSTOM(doh_recv_send_GET_TLS, setup_test, doh_recv_send_teardown) +ISC_TEST_ENTRY_CUSTOM(doh_recv_send_POST_TLS, setup_test, + doh_recv_send_teardown) +ISC_TEST_ENTRY_CUSTOM(doh_recv_send_GET_quota, setup_test, + doh_recv_send_teardown) +ISC_TEST_ENTRY_CUSTOM(doh_recv_send_POST_quota, setup_test, + doh_recv_send_teardown) +ISC_TEST_ENTRY_CUSTOM(doh_recv_send_GET_TLS_quota, setup_test, + doh_recv_send_teardown) +ISC_TEST_ENTRY_CUSTOM(doh_recv_send_POST_TLS_quota, setup_test, + doh_recv_send_teardown) +ISC_TEST_ENTRY_CUSTOM(doh_bad_connect_uri, setup_test, + doh_bad_connect_uri_teardown) ISC_TEST_LIST_END ISC_TEST_MAIN diff --git a/tests/isc/lex_test.c b/tests/isc/lex_test.c index cf6bfe2ea5..48c7915f5c 100644 --- a/tests/isc/lex_test.c +++ b/tests/isc/lex_test.c @@ -348,7 +348,6 @@ ISC_TEST_ENTRY(lex_keypair) ISC_TEST_ENTRY(lex_setline) ISC_TEST_ENTRY(lex_string) ISC_TEST_ENTRY(lex_qstring) - ISC_TEST_LIST_END ISC_TEST_MAIN diff --git a/tests/isc/task_test.c b/tests/isc/task_test.c index f7a20786d7..1505c74a93 100644 --- a/tests/isc/task_test.c +++ b/tests/isc/task_test.c @@ -29,71 +29,26 @@ #include #include #include +#include +#include +#include #include #include +#include #include #include #include #include #include -#include +#include #include -/* Set to true (or use -v option) for verbose output */ -static bool verbose = false; - -static isc_mutex_t lock; -static isc_condition_t cv; - -atomic_int_fast32_t counter; +static atomic_int_fast32_t counter; static int active[10]; -static atomic_bool done; +static atomic_bool done = false; -static int -_setup(void **state) { - isc_mutex_init(&lock); - isc_condition_init(&cv); - - workers = 0; - setup_managers(state); - - return (0); -} - -static int -_setup2(void **state) { - isc_mutex_init(&lock); - isc_condition_init(&cv); - - /* Two worker threads */ - workers = 2; - setup_managers(state); - - return (0); -} - -static int -_setup4(void **state) { - isc_mutex_init(&lock); - isc_condition_init(&cv); - - /* Four worker threads */ - workers = 4; - setup_managers(state); - - return (0); -} - -static int -_teardown(void **state) { - teardown_managers(state); - - isc_condition_destroy(&cv); - isc_mutex_destroy(&lock); - - return (0); -} +atomic_int_fast32_t set_a, set_b; static void set(isc_task_t *task, isc_event_t *event) { @@ -103,63 +58,62 @@ set(isc_task_t *task, isc_event_t *event) { isc_event_free(&event); atomic_store(value, atomic_fetch_add(&counter, 1)); + + if ((atomic_load(&set_a) != 0 && atomic_load(&set_b) != 0)) { + isc_loopmgr_shutdown(loopmgr); + } } #include -/* Create a task */ -ISC_RUN_TEST_IMPL(create_task) { +ISC_LOOP_TEST_IMPL(create_task) { isc_result_t result; isc_task_t *task = NULL; - UNUSED(state); - - result = isc_task_create(taskmgr, 0, &task, 0); + result = isc_task_create(taskmgr, &task, 0); assert_int_equal(result, ISC_R_SUCCESS); isc_task_detach(&task); assert_null(task); + + isc_loopmgr_shutdown(loopmgr); } -/* Process events */ -ISC_RUN_TEST_IMPL(all_events) { +ISC_LOOP_SETUP_IMPL(all_events) { + atomic_init(&set_a, 0); + atomic_init(&set_b, 0); +} + +ISC_LOOP_TEARDOWN_IMPL(all_events) { + assert_int_not_equal(atomic_load(&set_a), 0); + assert_int_not_equal(atomic_load(&set_b), 0); +} + +ISC_LOOP_TEST_SETUP_TEARDOWN_IMPL(all_events) { isc_result_t result; isc_task_t *task = NULL; isc_event_t *event = NULL; - atomic_int_fast32_t a, b; - int i = 0; - - UNUSED(state); atomic_init(&counter, 1); - atomic_init(&a, 0); - atomic_init(&b, 0); - result = isc_task_create(taskmgr, 0, &task, 0); + result = isc_task_create(taskmgr, &task, 0); assert_int_equal(result, ISC_R_SUCCESS); /* First event */ - event = isc_event_allocate(mctx, task, ISC_TASKEVENT_TEST, set, &a, + event = isc_event_allocate(mctx, task, ISC_TASKEVENT_TEST, set, &set_a, sizeof(isc_event_t)); assert_non_null(event); - assert_int_equal(atomic_load(&a), 0); + assert_int_equal(atomic_load(&set_a), 0); isc_task_send(task, &event); - event = isc_event_allocate(mctx, task, ISC_TASKEVENT_TEST, set, &b, + event = isc_event_allocate(mctx, task, ISC_TASKEVENT_TEST, set, &set_b, sizeof(isc_event_t)); assert_non_null(event); - assert_int_equal(atomic_load(&b), 0); + assert_int_equal(atomic_load(&set_b), 0); isc_task_send(task, &event); - while ((atomic_load(&a) == 0 || atomic_load(&b) == 0) && i++ < 5000) { - uv_sleep(1); - } - - assert_int_not_equal(atomic_load(&a), 0); - assert_int_not_equal(atomic_load(&b), 0); - isc_task_detach(&task); assert_null(task); } @@ -179,23 +133,12 @@ basic_cb(isc_task_t *task, isc_event_t *event) { } UNUSED(j); - - if (verbose) { - print_message("# task %s\n", (char *)event->ev_arg); - } - isc_event_free(&event); } static void -basic_tick(isc_task_t *task, isc_event_t *event) { - UNUSED(task); - - if (verbose) { - print_message("# %s\n", (char *)event->ev_arg); - } - - isc_event_free(&event); +basic_tick(void *arg __attribute__((__unused__))) { + /* no-op */ } static char one[] = "1"; @@ -205,45 +148,20 @@ static char four[] = "4"; static char tick[] = "tick"; static char tock[] = "tock"; -ISC_RUN_TEST_IMPL(basic) { - isc_result_t result; - isc_task_t *task1 = NULL; - isc_task_t *task2 = NULL; - isc_task_t *task3 = NULL; - isc_task_t *task4 = NULL; - isc_event_t *event = NULL; - isc_timer_t *ti1 = NULL; - isc_timer_t *ti2 = NULL; - isc_interval_t interval; +isc_task_t *task1 = NULL; +isc_task_t *task2 = NULL; +isc_task_t *task3 = NULL; +isc_task_t *task4 = NULL; +isc_timer_t *ti1 = NULL; +isc_timer_t *ti2 = NULL; + +static void +basic_work(void *arg __attribute__((__unused__))) { char *testarray[] = { one, one, one, one, one, one, one, one, one, two, three, four, two, three, four, NULL }; - int i; - - UNUSED(state); - - result = isc_task_create(taskmgr, 0, &task1, 0); - assert_int_equal(result, ISC_R_SUCCESS); - result = isc_task_create(taskmgr, 0, &task2, 0); - assert_int_equal(result, ISC_R_SUCCESS); - result = isc_task_create(taskmgr, 0, &task3, 0); - assert_int_equal(result, ISC_R_SUCCESS); - result = isc_task_create(taskmgr, 0, &task4, 0); - assert_int_equal(result, ISC_R_SUCCESS); - - isc_interval_set(&interval, 1, 0); - isc_timer_create(timermgr, task1, basic_tick, tick, &ti1); - result = isc_timer_reset(ti1, isc_timertype_ticker, &interval, false); - assert_int_equal(result, ISC_R_SUCCESS); - - ti2 = NULL; - isc_interval_set(&interval, 1, 0); - isc_timer_create(timermgr, task2, basic_tick, tock, &ti2); - result = isc_timer_reset(ti2, isc_timertype_ticker, &interval, false); - assert_int_equal(result, ISC_R_SUCCESS); - sleep(2); - for (i = 0; testarray[i] != NULL; i++) { + for (size_t i = 0; testarray[i] != NULL; i++) { /* * Note: (void *)1 is used as a sender here, since some * compilers don't like casting a function pointer to a @@ -253,20 +171,57 @@ ISC_RUN_TEST_IMPL(basic) { * structure (socket, timer, task, etc) but this is just a * test program. */ - event = isc_event_allocate(mctx, (void *)1, 1, basic_cb, - testarray[i], sizeof(*event)); + isc_event_t *event = isc_event_allocate(mctx, &task1, 1, + basic_cb, testarray[i], + sizeof(*event)); assert_non_null(event); isc_task_send(task1, &event); } +} + +static void +basic_after_work(void *arg) { + UNUSED(arg); + + sleep(5); isc_task_detach(&task1); isc_task_detach(&task2); isc_task_detach(&task3); isc_task_detach(&task4); - sleep(10); + sleep(5); + isc_timer_destroy(&ti1); isc_timer_destroy(&ti2); + + isc_loopmgr_shutdown(loopmgr); +} + +ISC_LOOP_TEST_IMPL(basic) { + isc_result_t result; + isc_interval_t interval; + + UNUSED(arg); + + result = isc_task_create(taskmgr, &task1, 0); + assert_int_equal(result, ISC_R_SUCCESS); + result = isc_task_create(taskmgr, &task2, 0); + assert_int_equal(result, ISC_R_SUCCESS); + result = isc_task_create(taskmgr, &task3, 0); + assert_int_equal(result, ISC_R_SUCCESS); + result = isc_task_create(taskmgr, &task4, 0); + assert_int_equal(result, ISC_R_SUCCESS); + + isc_interval_set(&interval, 1, 0); + isc_timer_create(mainloop, basic_tick, tick, &ti1); + isc_timer_start(ti1, isc_timertype_ticker, &interval); + + isc_interval_set(&interval, 1, 0); + isc_timer_create(mainloop, basic_tick, tock, &ti2); + isc_timer_start(ti2, isc_timertype_ticker, &interval); + + isc_work_enqueue(mainloop, basic_work, basic_after_work, NULL); } /* @@ -274,34 +229,16 @@ ISC_RUN_TEST_IMPL(basic) { * When one task enters exclusive mode, all other active * tasks complete first. */ -static int -spin(int n) { - int i; - int r = 0; - for (i = 0; i < n; i++) { - r += i; - if (r > 1000000) { - r = 0; - } - } - return (r); -} static void exclusive_cb(isc_task_t *task, isc_event_t *event) { int taskno = *(int *)(event->ev_arg); - if (verbose) { - print_message("# task enter %d\n", taskno); - } - /* task chosen from the middle of the range */ if (taskno == 6) { - isc_result_t result; int i; - result = isc_task_beginexclusive(task); - assert_int_equal(result, ISC_R_SUCCESS); + isc_task_beginexclusive(task); for (i = 0; i < 10; i++) { assert_int_equal(active[i], 0); @@ -311,48 +248,51 @@ exclusive_cb(isc_task_t *task, isc_event_t *event) { atomic_store(&done, true); } else { active[taskno]++; - (void)spin(10000000); + isc_thread_yield(); active[taskno]--; } - if (verbose) { - print_message("# task exit %d\n", taskno); - } - if (atomic_load(&done)) { isc_mem_put(event->ev_destroy_arg, event->ev_arg, sizeof(int)); isc_event_free(&event); atomic_fetch_sub(&counter, 1); + isc_loopmgr_shutdown(loopmgr); } else { isc_task_send(task, &event); } } -ISC_RUN_TEST_IMPL(task_exclusive) { - isc_task_t *tasks[10]; - isc_result_t result; - int i; +isc_task_t *tasks[10] = { NULL }; - UNUSED(state); +ISC_LOOP_SETUP_IMPL(task_exclusive) { + isc_result_t result; atomic_init(&counter, 0); + atomic_init(&done, false); - for (i = 0; i < 10; i++) { - isc_event_t *event = NULL; - int *v; - - tasks[i] = NULL; + for (size_t i = 0; i < 10; i++) { + uint32_t tid = i % isc_loopmgr_nloops(loopmgr); if (i == 6) { /* task chosen from the middle of the range */ - result = isc_task_create(taskmgr, 0, &tasks[i], 0); + tid = 0; + result = isc_task_create(taskmgr, &tasks[i], tid); assert_int_equal(result, ISC_R_SUCCESS); - isc_taskmgr_setexcltask(taskmgr, tasks[6]); + isc_taskmgr_setexcltask(taskmgr, tasks[i]); } else { - result = isc_task_create(taskmgr, 0, &tasks[i], 0); + result = isc_task_create(taskmgr, &tasks[i], tid); assert_int_equal(result, ISC_R_SUCCESS); } + } +} + +ISC_LOOP_TEST_SETUP_IMPL(task_exclusive) { + UNUSED(arg); + + for (size_t i = 0; i < 10; i++) { + isc_event_t *event = NULL; + int *v; v = isc_mem_get(mctx, sizeof *v); assert_non_null(v); @@ -365,199 +305,57 @@ ISC_RUN_TEST_IMPL(task_exclusive) { isc_task_send(tasks[i], &event); atomic_fetch_add(&counter, 1); - } - - for (i = 0; i < 10; i++) { isc_task_detach(&tasks[i]); } - - while (atomic_load(&counter) > 0) { - uv_sleep(1); - } } -/* - * Max tasks test: - * The task system can create and execute many tasks. Tests with 10000. - */ - static void maxtask_cb(isc_task_t *task, isc_event_t *event) { isc_result_t result; uintptr_t ntasks = (uintptr_t)event->ev_arg; - if (ntasks-- > 0) { - task = NULL; + if (event->ev_arg != NULL) { + isc_task_t *newtask = NULL; - event->ev_arg = (void *)ntasks; + event->ev_arg = (void *)(ntasks - 1); /* * Create a new task and forward the message. */ - result = isc_task_create(taskmgr, 0, &task, 0); + result = isc_task_create(taskmgr, &newtask, 0); assert_int_equal(result, ISC_R_SUCCESS); - isc_task_send(task, &event); - isc_task_detach(&task); + isc_task_send(newtask, &event); } else { isc_event_free(&event); + isc_loopmgr_shutdown(loopmgr); + } - LOCK(&lock); - atomic_store(&done, true); - SIGNAL(&cv); - UNLOCK(&lock); + if (task != NULL) { + isc_task_detach(&task); } } -ISC_RUN_TEST_IMPL(manytasks) { +ISC_LOOP_TEST_IMPL(manytasks) { isc_event_t *event = NULL; - uintptr_t ntasks = 2; /* 0000; */ + uintptr_t ntasks = 10000; - UNUSED(state); + UNUSED(arg); - if (verbose) { - print_message("# Testing with %lu tasks\n", - (unsigned long)ntasks); - } - - atomic_init(&done, false); - - event = isc_event_allocate(mctx, NULL, 1, maxtask_cb, (void *)ntasks, - sizeof(*event)); + event = isc_event_allocate(mctx, (void *)1, 1, maxtask_cb, + (void *)ntasks, sizeof(*event)); assert_non_null(event); - LOCK(&lock); maxtask_cb(NULL, event); - while (!atomic_load(&done)) { - WAIT(&cv, &lock); - } - UNLOCK(&lock); -} - -/* - * Helper for the purge tests below: - */ - -#define SENDERCNT 3 -#define TYPECNT 4 -#define TAGCNT 5 -#define NEVENTS (SENDERCNT * TYPECNT * TAGCNT) - -static int eventcnt; - -atomic_bool started; - -/* - * Helpers for purge event tests - */ -static void -pge_event1(isc_task_t *task, isc_event_t *event) { - UNUSED(task); - - LOCK(&lock); - while (!atomic_load(&started)) { - WAIT(&cv, &lock); - } - UNLOCK(&lock); - - LOCK(&lock); - atomic_store(&done, true); - SIGNAL(&cv); - UNLOCK(&lock); - - isc_event_free(&event); -} - -static void -pge_event2(isc_task_t *task, isc_event_t *event) { - UNUSED(task); - - ++eventcnt; - isc_event_free(&event); -} - -static void -try_purgeevent(void) { - isc_result_t result; - isc_task_t *task = NULL; - bool purged; - isc_event_t *event1 = NULL; - isc_event_t *event2 = NULL; - isc_event_t *event2_clone = NULL; - isc_time_t now; - isc_interval_t interval; - - atomic_init(&started, false); - atomic_init(&done, false); - eventcnt = 0; - - result = isc_task_create(taskmgr, 0, &task, 0); - assert_int_equal(result, ISC_R_SUCCESS); - - /* - * Block the task on cv. - */ - event1 = isc_event_allocate(mctx, (void *)1, (isc_eventtype_t)1, - pge_event1, NULL, sizeof(*event1)); - assert_non_null(event1); - isc_task_send(task, &event1); - - event2 = isc_event_allocate(mctx, (void *)1, (isc_eventtype_t)1, - pge_event2, NULL, sizeof(*event2)); - assert_non_null(event2); - - event2_clone = event2; - - isc_task_send(task, &event2); - - purged = isc_task_purgeevent(task, event2_clone); - - assert_true(purged); - - /* - * Unblock the task, allowing event processing. - */ - LOCK(&lock); - atomic_store(&started, true); - SIGNAL(&cv); - - isc_interval_set(&interval, 5, 0); - - /* - * Wait for shutdown processing to complete. - */ - while (!atomic_load(&done)) { - result = isc_time_nowplusinterval(&now, &interval); - assert_int_equal(result, ISC_R_SUCCESS); - - WAITUNTIL(&cv, &lock, &now); - } - UNLOCK(&lock); - - isc_task_detach(&task); -} - -/* - * Purge event test: - * When the event is marked as purgeable, a call to - * isc_task_purgeevent(task, event) purges the event 'event' from the - * task's queue and returns true. - */ - -ISC_RUN_TEST_IMPL(purgeevent) { - UNUSED(state); - - try_purgeevent(); } ISC_TEST_LIST_START -ISC_TEST_ENTRY_CUSTOM(manytasks, _setup4, _teardown) -ISC_TEST_ENTRY_CUSTOM(all_events, _setup, _teardown) -ISC_TEST_ENTRY_CUSTOM(basic, _setup2, _teardown) -ISC_TEST_ENTRY_CUSTOM(create_task, _setup, _teardown) -ISC_TEST_ENTRY_CUSTOM(purgeevent, _setup2, _teardown) -ISC_TEST_ENTRY_CUSTOM(task_exclusive, _setup4, _teardown) +ISC_TEST_ENTRY_CUSTOM(manytasks, setup_managers, teardown_managers) +ISC_TEST_ENTRY_CUSTOM(all_events, setup_managers, teardown_managers) +ISC_TEST_ENTRY_CUSTOM(basic, setup_managers, teardown_managers) +ISC_TEST_ENTRY_CUSTOM(create_task, setup_managers, teardown_managers) +ISC_TEST_ENTRY_CUSTOM(task_exclusive, setup_managers, teardown_managers) ISC_TEST_LIST_END diff --git a/tests/isccfg/duration_test.c b/tests/isccfg/duration_test.c index 1fa15e32d9..7cb6653414 100644 --- a/tests/isccfg/duration_test.c +++ b/tests/isccfg/duration_test.c @@ -36,7 +36,6 @@ #include -isc_log_t *lctx = NULL; static isc_logcategory_t categories[] = { { "", 0 }, { "client", 0 }, { "network", 0 }, diff --git a/tests/isccfg/parser_test.c b/tests/isccfg/parser_test.c index 9a524ea2ad..7e4f02ba12 100644 --- a/tests/isccfg/parser_test.c +++ b/tests/isccfg/parser_test.c @@ -38,7 +38,6 @@ #include -isc_log_t *lctx = NULL; static isc_logcategory_t categories[] = { { "", 0 }, { "client", 0 }, { "network", 0 }, diff --git a/tests/libtest/dns.c b/tests/libtest/dns.c index bbe8ce6e43..ec3dd1e5f9 100644 --- a/tests/libtest/dns.c +++ b/tests/libtest/dns.c @@ -72,7 +72,7 @@ dns_test_makeview(const char *name, bool with_cache, dns_view_t **viewp) { } if (with_cache) { - result = dns_cache_create(mctx, mctx, taskmgr, timermgr, + result = dns_cache_create(mctx, mctx, taskmgr, dns_rdataclass_in, "", "rbt", 0, NULL, &cache); if (result != ISC_R_SUCCESS) { @@ -163,7 +163,7 @@ dns_test_setupzonemgr(void) { isc_result_t result; REQUIRE(zonemgr == NULL); - result = dns_zonemgr_create(mctx, taskmgr, timermgr, netmgr, &zonemgr); + result = dns_zonemgr_create(mctx, loopmgr, taskmgr, netmgr, &zonemgr); return (result); } diff --git a/tests/libtest/isc.c b/tests/libtest/isc.c index 1163c4caa1..b0f12576f2 100644 --- a/tests/libtest/isc.c +++ b/tests/libtest/isc.c @@ -30,20 +30,15 @@ #include #include -#include "netmgr_p.h" -#include "task_p.h" -#include "timer_p.h" - #include isc_mem_t *mctx = NULL; -isc_loopmgr_t *loopmgr = NULL; +isc_log_t *lctx = NULL; isc_loop_t *mainloop = NULL; +isc_loopmgr_t *loopmgr = NULL; isc_taskmgr_t *taskmgr = NULL; -isc_timermgr_t *timermgr = NULL; isc_nm_t *netmgr = NULL; -unsigned int workers = 0; -isc_task_t *maintask = NULL; +unsigned int workers = -1; int setup_mctx(void **state __attribute__((__unused__))) { @@ -92,46 +87,54 @@ teardown_loopmgr(void **state __attribute__((__unused__))) { return (0); } +int +setup_taskmgr(void **state __attribute__((__unused__))) { + REQUIRE(loopmgr != NULL); + + isc_taskmgr_create(mctx, loopmgr, &taskmgr); + + return (0); +} + +int +teardown_taskmgr(void **state __attribute__((__unused__))) { + isc_taskmgr_destroy(&taskmgr); + + return (0); +} + +int +setup_netmgr(void **state __attribute__((__unused__))) { + REQUIRE(loopmgr != NULL); + + isc_netmgr_create(mctx, loopmgr, &netmgr); + + return (0); +} + +int +teardown_netmgr(void **state __attribute__((__unused__))) { + REQUIRE(loopmgr != NULL); + + isc_netmgr_destroy(&netmgr); + + return (0); +} + int setup_managers(void **state) { - isc_result_t result; - - UNUSED(state); - - REQUIRE(mctx != NULL); - - if (workers == 0) { - char *env_workers = getenv("ISC_TASK_WORKERS"); - if (env_workers != NULL) { - workers = atoi(env_workers); - } else { - workers = isc_os_ncpus(); - } - INSIST(workers > 0); - } - - result = isc_managers_create(mctx, workers, 0, &netmgr, &taskmgr, - &timermgr); - if (result != ISC_R_SUCCESS) { - return (-1); - } - - result = isc_task_create(taskmgr, 0, &maintask, 0); - if (result != ISC_R_SUCCESS) { - return (-1); - } - - isc_taskmgr_setexcltask(taskmgr, maintask); + setup_loopmgr(state); + setup_taskmgr(state); + setup_netmgr(state); return (0); } int teardown_managers(void **state) { - UNUSED(state); - - isc_task_detach(&maintask); - isc_managers_destroy(&netmgr, &taskmgr, &timermgr); + teardown_netmgr(state); + teardown_taskmgr(state); + teardown_loopmgr(state); return (0); } diff --git a/tests/libtest/ns.c b/tests/libtest/ns.c index b85a215401..e79b72d4e3 100644 --- a/tests/libtest/ns.c +++ b/tests/libtest/ns.c @@ -22,6 +22,8 @@ #include #include #include +#include +#include #include #include #include @@ -52,11 +54,10 @@ #include +isc_task_t *maintask = NULL; dns_dispatchmgr_t *dispatchmgr = NULL; -ns_clientmgr_t *clientmgr = NULL; ns_interfacemgr_t *interfacemgr = NULL; ns_server_t *sctx = NULL; -bool debug_mem_record = true; static isc_result_t matchview(isc_netaddr_t *srcaddr, isc_netaddr_t *destaddr, @@ -72,6 +73,12 @@ matchview(isc_netaddr_t *srcaddr, isc_netaddr_t *destaddr, return (ISC_R_NOTIMPLEMENTED); } +static void +scan_interfaces(void *arg) { + UNUSED(arg); + ns_interfacemgr_scan(interfacemgr, true, false); +} + int setup_server(void **state) { isc_result_t result; @@ -84,36 +91,45 @@ setup_server(void **state) { result = dns_dispatchmgr_create(mctx, netmgr, &dispatchmgr); if (result != ISC_R_SUCCESS) { - return (-1); + goto cleanup; } - result = ns_interfacemgr_create(mctx, sctx, taskmgr, timermgr, netmgr, + result = ns_interfacemgr_create(mctx, sctx, loopmgr, taskmgr, netmgr, dispatchmgr, maintask, NULL, false, &interfacemgr); if (result != ISC_R_SUCCESS) { - return (-1); + goto cleanup; } result = ns_listenlist_default(mctx, port, -1, true, AF_INET, &listenon); if (result != ISC_R_SUCCESS) { - return (-1); + goto cleanup; } ns_interfacemgr_setlistenon4(interfacemgr, listenon); ns_listenlist_detach(&listenon); - clientmgr = ns_interfacemgr_getclientmgr(interfacemgr); + isc_loop_setup(mainloop, scan_interfaces, NULL); return (0); + +cleanup: + teardown_server(state); + return (-1); } -int -teardown_server(void **state) { +void +shutdown_interfacemgr(void *arg __attribute__((__unused__))) { if (interfacemgr != NULL) { ns_interfacemgr_shutdown(interfacemgr); ns_interfacemgr_detach(&interfacemgr); } +} + +int +teardown_server(void **state) { + shutdown_interfacemgr(NULL); if (dispatchmgr != NULL) { dns_dispatchmgr_detach(&dispatchmgr); @@ -129,57 +145,6 @@ teardown_server(void **state) { static dns_zone_t *served_zone = NULL; -/* - * We don't want to use netmgr-based client accounting, we need to emulate it. - */ -atomic_uint_fast32_t client_refs[32]; -atomic_uintptr_t client_addrs[32]; - -void -isc__nmhandle_attach(isc_nmhandle_t *source, isc_nmhandle_t **targetp FLARG) { - ns_client_t *client = (ns_client_t *)source; - int i; - - for (i = 0; i < 32; i++) { - if (atomic_load(&client_addrs[i]) == (uintptr_t)client) { - break; - } - } - INSIST(i < 32); - INSIST(atomic_load(&client_refs[i]) > 0); - - atomic_fetch_add(&client_refs[i], 1); - - *targetp = source; - return; -} - -void -isc__nmhandle_detach(isc_nmhandle_t **handlep FLARG) { - isc_nmhandle_t *handle = *handlep; - ns_client_t *client = (ns_client_t *)handle; - int i; - - *handlep = NULL; - - for (i = 0; i < 32; i++) { - if (atomic_load(&client_addrs[i]) == (uintptr_t)client) { - break; - } - } - INSIST(i < 32); - - if (atomic_fetch_sub(&client_refs[i], 1) == 1) { - dns_view_detach(&client->view); - client->state = 4; - ns__client_reset_cb(client); - ns__client_put_cb(client); - atomic_store(&client_addrs[i], (uintptr_t)NULL); - } - - return; -} - isc_result_t ns_test_serve_zone(const char *zonename, const char *filename, dns_view_t *view) { @@ -256,12 +221,16 @@ ns_test_cleanup_zone(void) { isc_result_t ns_test_getclient(ns_interface_t *ifp0, bool tcp, ns_client_t **clientp) { isc_result_t result; - ns_client_t *client = isc_mem_get(clientmgr->mctx, sizeof(*client)); + ns_client_t *client; + ns_clientmgr_t *clientmgr; int i; UNUSED(ifp0); UNUSED(tcp); + clientmgr = ns_interfacemgr_getclientmgr(interfacemgr); + + client = isc_mem_get(clientmgr->mctx, sizeof(*client)); result = ns__client_setup(client, clientmgr, true); for (i = 0; i < 32; i++) { @@ -637,10 +606,12 @@ ns_test_getdata(const char *file, unsigned char *buf, size_t bufsiz, continue; } if (len % 2 != 0U) { - CHECK(ISC_R_UNEXPECTEDEND); + result = ISC_R_UNEXPECTEDEND; + goto cleanup; } if (len > bufsiz * 2) { - CHECK(ISC_R_NOSPACE); + result = ISC_R_NOSPACE; + goto cleanup; } rp = s; for (i = 0; i < len; i += 2) { diff --git a/tests/ns/Makefile.am b/tests/ns/Makefile.am index 33f91d8c7b..c26bec85e6 100644 --- a/tests/ns/Makefile.am +++ b/tests/ns/Makefile.am @@ -20,6 +20,14 @@ check_PROGRAMS = \ plugin_test \ query_test +notify_test_SOURCES = \ + notify_test.c \ + netmgr_wrap.c + +query_test_SOURCES = \ + query_test.c \ + netmgr_wrap.c + EXTRA_DIST = testdata include $(top_srcdir)/Makefile.tests diff --git a/tests/ns/listenlist_test.c b/tests/ns/listenlist_test.c index 565b943f7d..a8cb8d37d5 100644 --- a/tests/ns/listenlist_test.c +++ b/tests/ns/listenlist_test.c @@ -35,24 +35,6 @@ #include -static int -_setup(void **state) { - isc__nm_force_tid(0); - - setup_managers(state); - - return (0); -} - -static int -_teardown(void **state) { - isc__nm_force_tid(-1); - - teardown_managers(state); - - return (0); -} - /* test that ns_listenlist_default() works */ ISC_RUN_TEST_IMPL(ns_listenlist_default) { isc_result_t result; @@ -111,9 +93,7 @@ ISC_RUN_TEST_IMPL(ns_listenlist_default) { } ISC_TEST_LIST_START - -ISC_TEST_ENTRY_CUSTOM(ns_listenlist_default, _setup, _teardown) - +ISC_TEST_ENTRY(ns_listenlist_default) ISC_TEST_LIST_END ISC_TEST_MAIN diff --git a/tests/ns/netmgr_wrap.c b/tests/ns/netmgr_wrap.c new file mode 100644 index 0000000000..076d9954bf --- /dev/null +++ b/tests/ns/netmgr_wrap.c @@ -0,0 +1,90 @@ +/* + * Copyright (C) Internet Systems Consortium, Inc. ("ISC") + * + * SPDX-License-Identifier: MPL-2.0 + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, you can obtain one at https://mozilla.org/MPL/2.0/. + * + * See the COPYRIGHT file distributed with this work for additional + * information regarding copyright ownership. + */ + +/*! \file */ + +#include +#include +#include + +#include + +#include + +#ifdef NETMGR_TRACE +#define FLARG \ + , const char *file __attribute__((unused)), \ + unsigned int line __attribute__((unused)), \ + const char *func __attribute__((unused)) +#else +#define FLARG +#endif + +/* + * We don't want to use netmgr-based client accounting, we need to emulate it. + */ +atomic_uint_fast32_t client_refs[32]; +atomic_uintptr_t client_addrs[32]; + +void +isc__nmhandle_attach(isc_nmhandle_t *source, isc_nmhandle_t **targetp FLARG) { + ns_client_t *client = (ns_client_t *)source; + int i; + + for (i = 0; i < 32; i++) { + if (atomic_load(&client_addrs[i]) == (uintptr_t)client) { + break; + } + } + INSIST(i < 32); + INSIST(atomic_load(&client_refs[i]) > 0); + + atomic_fetch_add(&client_refs[i], 1); +#if 0 + fprintf(stderr, "%s:%s:%s:%d -> %ld\n", __func__, func, file, line, + client_refs[i]); +#endif + + *targetp = source; + return; +} + +void +isc__nmhandle_detach(isc_nmhandle_t **handlep FLARG) { + isc_nmhandle_t *handle = *handlep; + ns_client_t *client = (ns_client_t *)handle; + int i; + + *handlep = NULL; + + for (i = 0; i < 32; i++) { + if (atomic_load(&client_addrs[i]) == (uintptr_t)client) { + break; + } + } + INSIST(i < 32); + + if (atomic_fetch_sub(&client_refs[i], 1) == 1) { + dns_view_detach(&client->view); + client->state = 4; + ns__client_reset_cb(client); + ns__client_put_cb(client); + atomic_store(&client_addrs[i], (uintptr_t)NULL); + } +#if 0 + fprintf(stderr, "%s:%s:%s:%d -> %ld\n", __func__, func, file, line, + client_refs[i]); +#endif + + return; +} diff --git a/tests/ns/notify_test.c b/tests/ns/notify_test.c index 548ad79784..bc9b951a28 100644 --- a/tests/ns/notify_test.c +++ b/tests/ns/notify_test.c @@ -37,21 +37,8 @@ #include #include -#include #include -static int -setup_test(void **state) { - isc__nm_force_tid(0); - return (setup_server(state)); -} - -static int -teardown_test(void **state) { - isc__nm_force_tid(-1); - return (teardown_server(state)); -} - static void check_response(isc_buffer_t *buf) { isc_result_t result; @@ -74,7 +61,7 @@ check_response(isc_buffer_t *buf) { } /* test ns_notify_start() */ -ISC_RUN_TEST_IMPL(ns_notify_start) { +ISC_LOOP_TEST_IMPL(notify_start) { isc_result_t result; ns_client_t *client = NULL; isc_nmhandle_t *handle = NULL; @@ -83,8 +70,6 @@ ISC_RUN_TEST_IMPL(ns_notify_start) { isc_buffer_t nbuf; size_t nsize; - UNUSED(state); - result = ns_test_getclient(NULL, false, &client); assert_int_equal(result, ISC_R_SUCCESS); @@ -132,10 +117,13 @@ ISC_RUN_TEST_IMPL(ns_notify_start) { handle = client->handle; isc_nmhandle_detach(&client->handle); isc_nmhandle_detach(&handle); + + isc_loop_teardown(mainloop, shutdown_interfacemgr, NULL); + isc_loopmgr_shutdown(loopmgr); } ISC_TEST_LIST_START -ISC_TEST_ENTRY_CUSTOM(ns_notify_start, setup_test, teardown_test) +ISC_TEST_ENTRY_CUSTOM(notify_start, setup_server, teardown_server) ISC_TEST_LIST_END ISC_TEST_MAIN diff --git a/tests/ns/plugin_test.c b/tests/ns/plugin_test.c index 6434fae3a4..90d6bb14de 100644 --- a/tests/ns/plugin_test.c +++ b/tests/ns/plugin_test.c @@ -30,9 +30,6 @@ #include #include -noreturn void -_fail(const char *const file, const int line); - #include #include @@ -157,9 +154,7 @@ ISC_RUN_TEST_IMPL(ns_plugin_expandpath) { } ISC_TEST_LIST_START - -ISC_TEST_ENTRY_CUSTOM(ns_plugin_expandpath, setup_managers, teardown_managers) - +ISC_TEST_ENTRY(ns_plugin_expandpath) ISC_TEST_LIST_END ISC_TEST_MAIN diff --git a/tests/ns/query_test.c b/tests/ns/query_test.c index eabfec40bd..0e95af3fc0 100644 --- a/tests/ns/query_test.c +++ b/tests/ns/query_test.c @@ -40,20 +40,6 @@ #include -static int -setup_test(void **state) { - isc__nm_force_tid(0); - setup_server(state); - return (0); -} - -static int -teardown_test(void **state) { - isc__nm_force_tid(-1); - teardown_server(state); - return (0); -} - /* can be used for client->sendcb to avoid disruption on sending a response */ static void send_noop(isc_buffer_t *buffer) { @@ -61,8 +47,8 @@ send_noop(isc_buffer_t *buffer) { } /***** -***** ns__query_sfcache() tests -*****/ + ***** ns__query_sfcache() tests + *****/ /*% * Structure containing parameters for ns__query_sfcache_test(). @@ -170,9 +156,7 @@ run_sfcache_test(const ns__query_sfcache_test_params_t *test) { } /* test ns__query_sfcache() */ -ISC_RUN_TEST_IMPL(ns_query_sfcache) { - size_t i; - +ISC_LOOP_TEST_IMPL(ns__query_sfcache) { const ns__query_sfcache_test_params_t tests[] = { /* * Sanity check for an empty SERVFAIL cache. @@ -243,11 +227,12 @@ ISC_RUN_TEST_IMPL(ns_query_sfcache) { }, }; - UNUSED(state); - - for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { + for (size_t i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { run_sfcache_test(&tests[i]); } + + isc_loop_teardown(mainloop, shutdown_interfacemgr, NULL); + isc_loopmgr_shutdown(loopmgr); } /***** @@ -312,7 +297,6 @@ run_start_test(const ns__query_start_test_params_t *test) { /* * Interrupt execution if query_lookup() or ns_query_done() is called. */ - ns_hooktable_create(mctx, &query_hooks); ns_hook_add(query_hooks, mctx, NS_QUERY_LOOKUP_BEGIN, &hook); ns_hook_add(query_hooks, mctx, NS_QUERY_DONE_BEGIN, &hook); @@ -434,7 +418,7 @@ run_start_test(const ns__query_start_test_params_t *test) { } /* test ns__query_start() */ -ISC_RUN_TEST_IMPL(ns_query_start) { +ISC_LOOP_TEST_IMPL(ns__query_start) { size_t i; const ns__query_start_test_params_t tests[] = { @@ -595,11 +579,12 @@ ISC_RUN_TEST_IMPL(ns_query_start) { }, }; - UNUSED(state); - for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { run_start_test(&tests[i]); } + + isc_loop_teardown(mainloop, shutdown_interfacemgr, NULL); + isc_loopmgr_shutdown(loopmgr); } /***** @@ -1013,11 +998,9 @@ run_hookasync_test(const ns__query_hookasync_test_params_t *test) { } } -ISC_RUN_TEST_IMPL(ns_query_hookasync) { +ISC_LOOP_TEST_IMPL(ns__query_hookasync) { size_t i; - UNUSED(state); - const ns__query_hookasync_test_params_t tests[] = { { NS_TEST_ID("normal case"), @@ -1253,6 +1236,9 @@ ISC_RUN_TEST_IMPL(ns_query_hookasync) { for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { run_hookasync_test(&tests[i]); } + + isc_loop_teardown(mainloop, shutdown_interfacemgr, NULL); + isc_loopmgr_shutdown(loopmgr); } /***** @@ -1446,9 +1432,7 @@ run_hookasync_e2e_test(const ns__query_hookasync_e2e_test_params_t *test) { ns_hooktable_free(mctx, (void **)&ns__hook_table); } -ISC_RUN_TEST_IMPL(ns_query_hookasync_e2e) { - UNUSED(state); - +ISC_LOOP_TEST_IMPL(ns__query_hookasync_e2e) { const ns__query_hookasync_e2e_test_params_t tests[] = { { NS_TEST_ID("positive answer"), @@ -1487,14 +1471,16 @@ ISC_RUN_TEST_IMPL(ns_query_hookasync_e2e) { for (size_t i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { run_hookasync_e2e_test(&tests[i]); } + + isc_loop_teardown(mainloop, shutdown_interfacemgr, NULL); + isc_loopmgr_shutdown(loopmgr); } ISC_TEST_LIST_START - -ISC_TEST_ENTRY_CUSTOM(ns_query_sfcache, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(ns_query_start, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(ns_query_hookasync, setup_test, teardown_test) -ISC_TEST_ENTRY_CUSTOM(ns_query_hookasync_e2e, setup_test, teardown_test) - +ISC_TEST_ENTRY_CUSTOM(ns__query_sfcache, setup_server, teardown_server) +ISC_TEST_ENTRY_CUSTOM(ns__query_start, setup_server, teardown_server) +ISC_TEST_ENTRY_CUSTOM(ns__query_hookasync, setup_server, teardown_server) +ISC_TEST_ENTRY_CUSTOM(ns__query_hookasync_e2e, setup_server, teardown_server) ISC_TEST_LIST_END + ISC_TEST_MAIN