2
0
mirror of https://gitlab.isc.org/isc-projects/bind9 synced 2025-08-30 14:07:59 +00:00

Refactor taskmgr to run on top of netmgr

This commit changes the taskmgr to run the individual tasks on the
netmgr internal workers.  While an effort has been put into keeping the
taskmgr interface intact, couple of changes have been made:

 * The taskmgr has no concept of universal privileged mode - rather the
   tasks are either privileged or unprivileged (normal).  The privileged
   tasks are run as a first thing when the netmgr is unpaused.  There
   are now four different queues in in the netmgr:

   1. priority queue - netievent on the priority queue are run even when
      the taskmgr enter exclusive mode and netmgr is paused.  This is
      needed to properly start listening on the interfaces, free
      resources and resume.

   2. privileged task queue - only privileged tasks are queued here and
      this is the first queue that gets processed when network manager
      is unpaused using isc_nm_resume().  All netmgr workers need to
      clean the privileged task queue before they all proceed normal
      operation.  Both task queues are processed when the workers are
      finished.

   3. task queue - only (traditional) task are scheduled here and this
      queue along with privileged task queues are process when the
      netmgr workers are finishing.  This is needed to process the task
      shutdown events.

   4. normal queue - this is the queue with netmgr events, e.g. reading,
      sending, callbacks and pretty much everything is processed here.

 * The isc_taskmgr_create() now requires initialized netmgr (isc_nm_t)
   object.

 * The isc_nm_destroy() function now waits for indefinite time, but it
   will print out the active objects when in tracing mode
   (-DNETMGR_TRACE=1 and -DNETMGR_TRACE_VERBOSE=1), the netmgr has been
   made a little bit more asynchronous and it might take longer time to
   shutdown all the active networking connections.

 * Previously, the isc_nm_stoplistening() was a synchronous operation.
   This has been changed and the isc_nm_stoplistening() just schedules
   the child sockets to stop listening and exits.  This was needed to
   prevent a deadlock as the the (traditional) tasks are now executed on
   the netmgr threads.

 * The socket selection logic in isc__nm_udp_send() was flawed, but
   fortunatelly, it was broken, so we never hit the problem where we
   created uvreq_t on a socket from nmhandle_t, but then a different
   socket could be picked up and then we were trying to run the send
   callback on a socket that had different threadid than currently
   running.
This commit is contained in:
Ondřej Surý 2021-04-09 11:31:19 +02:00 committed by Ondřej Surý
parent ae9edb1861
commit b540722bc3
38 changed files with 844 additions and 1264 deletions

View File

@ -36,6 +36,7 @@
#include <isc/log.h> #include <isc/log.h>
#include <isc/md.h> #include <isc/md.h>
#include <isc/mem.h> #include <isc/mem.h>
#include <isc/netmgr.h>
#ifdef WIN32 #ifdef WIN32
#include <isc/ntpaths.h> #include <isc/ntpaths.h>
#endif /* ifdef WIN32 */ #endif /* ifdef WIN32 */
@ -1736,6 +1737,7 @@ main(int argc, char *argv[]) {
dns_namelist_t namelist; dns_namelist_t namelist;
unsigned int resopt; unsigned int resopt;
isc_appctx_t *actx = NULL; isc_appctx_t *actx = NULL;
isc_nm_t *netmgr = NULL;
isc_taskmgr_t *taskmgr = NULL; isc_taskmgr_t *taskmgr = NULL;
isc_socketmgr_t *socketmgr = NULL; isc_socketmgr_t *socketmgr = NULL;
isc_timermgr_t *timermgr = NULL; isc_timermgr_t *timermgr = NULL;
@ -1759,7 +1761,8 @@ main(int argc, char *argv[]) {
isc_mem_create(&mctx); isc_mem_create(&mctx);
CHECK(isc_appctx_create(mctx, &actx)); CHECK(isc_appctx_create(mctx, &actx));
CHECK(isc_taskmgr_create(mctx, 1, 0, NULL, &taskmgr)); netmgr = isc_nm_start(mctx, 1);
CHECK(isc_taskmgr_create(mctx, 0, netmgr, &taskmgr));
CHECK(isc_socketmgr_create(mctx, &socketmgr)); CHECK(isc_socketmgr_create(mctx, &socketmgr));
CHECK(isc_timermgr_create(mctx, &timermgr)); CHECK(isc_timermgr_create(mctx, &timermgr));
@ -1867,6 +1870,9 @@ cleanup:
if (taskmgr != NULL) { if (taskmgr != NULL) {
isc_taskmgr_destroy(&taskmgr); isc_taskmgr_destroy(&taskmgr);
} }
if (netmgr != NULL) {
isc_nm_destroy(&netmgr);
}
if (timermgr != NULL) { if (timermgr != NULL) {
isc_timermgr_destroy(&timermgr); isc_timermgr_destroy(&timermgr);
} }

View File

@ -1362,7 +1362,7 @@ setup_libs(void) {
netmgr = isc_nm_start(mctx, 1); netmgr = isc_nm_start(mctx, 1);
result = isc_taskmgr_create(mctx, 1, 0, netmgr, &taskmgr); result = isc_taskmgr_create(mctx, 0, netmgr, &taskmgr);
check_result(result, "isc_taskmgr_create"); check_result(result, "isc_taskmgr_create");
result = isc_task_create(taskmgr, 0, &global_task); result = isc_task_create(taskmgr, 0, &global_task);

View File

@ -144,6 +144,7 @@ static unsigned int nsigned = 0, nretained = 0, ndropped = 0;
static unsigned int nverified = 0, nverifyfailed = 0; static unsigned int nverified = 0, nverifyfailed = 0;
static const char *directory = NULL, *dsdir = NULL; static const char *directory = NULL, *dsdir = NULL;
static isc_mutex_t namelock, statslock; static isc_mutex_t namelock, statslock;
static isc_nm_t *netmgr = NULL;
static isc_taskmgr_t *taskmgr = NULL; static isc_taskmgr_t *taskmgr = NULL;
static dns_db_t *gdb; /* The database */ static dns_db_t *gdb; /* The database */
static dns_dbversion_t *gversion; /* The database version */ static dns_dbversion_t *gversion; /* The database version */
@ -3953,7 +3954,9 @@ main(int argc, char *argv[]) {
print_time(outfp); print_time(outfp);
print_version(outfp); print_version(outfp);
result = isc_taskmgr_create(mctx, ntasks, 0, NULL, &taskmgr); netmgr = isc_nm_start(mctx, ntasks);
result = isc_taskmgr_create(mctx, 0, netmgr, &taskmgr);
if (result != ISC_R_SUCCESS) { if (result != ISC_R_SUCCESS) {
fatal("failed to create task manager: %s", fatal("failed to create task manager: %s",
isc_result_totext(result)); isc_result_totext(result));
@ -4009,6 +4012,7 @@ main(int argc, char *argv[]) {
isc_task_detach(&tasks[i]); isc_task_detach(&tasks[i]);
} }
isc_taskmgr_destroy(&taskmgr); isc_taskmgr_destroy(&taskmgr);
isc_nm_destroy(&netmgr);
isc_mem_put(mctx, tasks, ntasks * sizeof(isc_task_t *)); isc_mem_put(mctx, tasks, ntasks * sizeof(isc_task_t *));
postsign(); postsign();
TIME_NOW(&sign_finish); TIME_NOW(&sign_finish);

View File

@ -960,7 +960,7 @@ create_managers(void) {
return (ISC_R_UNEXPECTED); return (ISC_R_UNEXPECTED);
} }
result = isc_taskmgr_create(named_g_mctx, named_g_cpus, 0, named_g_nm, result = isc_taskmgr_create(named_g_mctx, 0, named_g_nm,
&named_g_taskmgr); &named_g_taskmgr);
if (result != ISC_R_SUCCESS) { if (result != ISC_R_SUCCESS) {
UNEXPECTED_ERROR(__FILE__, __LINE__, UNEXPECTED_ERROR(__FILE__, __LINE__,
@ -1011,13 +1011,9 @@ destroy_managers(void) {
* isc_taskmgr_destroy() will block until all tasks have exited. * isc_taskmgr_destroy() will block until all tasks have exited.
*/ */
isc_taskmgr_destroy(&named_g_taskmgr); isc_taskmgr_destroy(&named_g_taskmgr);
isc_nm_destroy(&named_g_nm);
isc_timermgr_destroy(&named_g_timermgr); isc_timermgr_destroy(&named_g_timermgr);
isc_socketmgr_destroy(&named_g_socketmgr); isc_socketmgr_destroy(&named_g_socketmgr);
/*
* At this point is safe to destroy the netmgr.
*/
isc_nm_destroy(&named_g_nm);
} }
static void static void

View File

@ -9866,7 +9866,7 @@ view_loaded(void *arg) {
} }
static isc_result_t static isc_result_t
load_zones(named_server_t *server, bool init, bool reconfig) { load_zones(named_server_t *server, bool reconfig) {
isc_result_t result; isc_result_t result;
dns_view_t *view; dns_view_t *view;
ns_zoneload_t *zl; ns_zoneload_t *zl;
@ -9921,16 +9921,6 @@ cleanup:
if (isc_refcount_decrement(&zl->refs) == 1) { if (isc_refcount_decrement(&zl->refs) == 1) {
isc_refcount_destroy(&zl->refs); isc_refcount_destroy(&zl->refs);
isc_mem_put(server->mctx, zl, sizeof(*zl)); isc_mem_put(server->mctx, zl, sizeof(*zl));
} else if (init) {
/*
* Place the task manager into privileged mode. This
* ensures that after we leave task-exclusive mode, no
* other tasks will be able to run except for the ones
* that are loading zones. (This should only be done during
* the initial server setup; it isn't necessary during
* a reload.)
*/
isc_taskmgr_setprivilegedmode(named_g_taskmgr);
} }
isc_task_endexclusive(server->task); isc_task_endexclusive(server->task);
@ -9998,7 +9988,7 @@ run_server(isc_task_t *task, isc_event_t *event) {
CHECKFATAL(load_configuration(named_g_conffile, server, true), CHECKFATAL(load_configuration(named_g_conffile, server, true),
"loading configuration"); "loading configuration");
CHECKFATAL(load_zones(server, true, false), "loading zones"); CHECKFATAL(load_zones(server, false), "loading zones");
#ifdef ENABLE_AFL #ifdef ENABLE_AFL
named_g_run_done = true; named_g_run_done = true;
#endif /* ifdef ENABLE_AFL */ #endif /* ifdef ENABLE_AFL */
@ -10511,7 +10501,7 @@ reload(named_server_t *server) {
CHECK(loadconfig(server)); CHECK(loadconfig(server));
result = load_zones(server, false, false); result = load_zones(server, false);
if (result == ISC_R_SUCCESS) { if (result == ISC_R_SUCCESS) {
isc_log_write(named_g_lctx, NAMED_LOGCATEGORY_GENERAL, isc_log_write(named_g_lctx, NAMED_LOGCATEGORY_GENERAL,
NAMED_LOGMODULE_SERVER, ISC_LOG_INFO, NAMED_LOGMODULE_SERVER, ISC_LOG_INFO,
@ -10880,7 +10870,7 @@ named_server_reconfigcommand(named_server_t *server) {
CHECK(loadconfig(server)); CHECK(loadconfig(server));
result = load_zones(server, false, true); result = load_zones(server, true);
if (result == ISC_R_SUCCESS) { if (result == ISC_R_SUCCESS) {
isc_log_write(named_g_lctx, NAMED_LOGCATEGORY_GENERAL, isc_log_write(named_g_lctx, NAMED_LOGCATEGORY_GENERAL,
NAMED_LOGMODULE_SERVER, ISC_LOG_INFO, NAMED_LOGMODULE_SERVER, ISC_LOG_INFO,

View File

@ -125,6 +125,7 @@ static bool usegsstsig = false;
static bool use_win2k_gsstsig = false; static bool use_win2k_gsstsig = false;
static bool tried_other_gsstsig = false; static bool tried_other_gsstsig = false;
static bool local_only = false; static bool local_only = false;
static isc_nm_t *netmgr = NULL;
static isc_taskmgr_t *taskmgr = NULL; static isc_taskmgr_t *taskmgr = NULL;
static isc_task_t *global_task = NULL; static isc_task_t *global_task = NULL;
static isc_event_t *global_event = NULL; static isc_event_t *global_event = NULL;
@ -927,7 +928,9 @@ setup_system(void) {
result = isc_timermgr_create(gmctx, &timermgr); result = isc_timermgr_create(gmctx, &timermgr);
check_result(result, "dns_timermgr_create"); check_result(result, "dns_timermgr_create");
result = isc_taskmgr_create(gmctx, 1, 0, NULL, &taskmgr); netmgr = isc_nm_start(gmctx, 1);
result = isc_taskmgr_create(gmctx, 0, netmgr, &taskmgr);
check_result(result, "isc_taskmgr_create"); check_result(result, "isc_taskmgr_create");
result = isc_task_create(taskmgr, 0, &global_task); result = isc_task_create(taskmgr, 0, &global_task);
@ -3311,6 +3314,9 @@ cleanup(void) {
ddebug("Shutting down task manager"); ddebug("Shutting down task manager");
isc_taskmgr_destroy(&taskmgr); isc_taskmgr_destroy(&taskmgr);
ddebug("Shutting down network manager");
isc_nm_destroy(&netmgr);
ddebug("Destroying event"); ddebug("Destroying event");
isc_event_free(&global_event); isc_event_free(&global_event);

View File

@ -1032,7 +1032,7 @@ main(int argc, char **argv) {
isc_mem_create(&rndc_mctx); isc_mem_create(&rndc_mctx);
netmgr = isc_nm_start(rndc_mctx, 1); netmgr = isc_nm_start(rndc_mctx, 1);
DO("create task manager", DO("create task manager",
isc_taskmgr_create(rndc_mctx, 1, 0, netmgr, &taskmgr)); isc_taskmgr_create(rndc_mctx, 0, netmgr, &taskmgr));
DO("create task", isc_task_create(taskmgr, 0, &rndc_task)); DO("create task", isc_task_create(taskmgr, 0, &rndc_task));
isc_log_create(rndc_mctx, &log, &logconfig); isc_log_create(rndc_mctx, &log, &logconfig);
isc_log_setcontext(log); isc_log_setcontext(log);

View File

@ -13,6 +13,7 @@ named.run
parallel.mk parallel.mk
/*.log /*.log
/*.trs /*.trs
/resolve
/run.sh /run.sh
/run.log /run.log
/start.sh /start.sh

View File

@ -206,7 +206,8 @@ main(int argc, char *argv[]) {
isc_result_t result; isc_result_t result;
isc_log_t *lctx; isc_log_t *lctx;
isc_logconfig_t *lcfg; isc_logconfig_t *lcfg;
isc_taskmgr_t *taskmgr; isc_nm_t *netmgr = NULL;
isc_taskmgr_t *taskmgr = NULL;
isc_task_t *task; isc_task_t *task;
isc_timermgr_t *timermgr; isc_timermgr_t *timermgr;
isc_socketmgr_t *socketmgr; isc_socketmgr_t *socketmgr;
@ -276,8 +277,9 @@ main(int argc, char *argv[]) {
RUNCHECK(dst_lib_init(mctx, NULL)); RUNCHECK(dst_lib_init(mctx, NULL));
taskmgr = NULL; netmgr = isc_nm_start(mctx, 1);
RUNCHECK(isc_taskmgr_create(mctx, 1, 0, NULL, &taskmgr));
RUNCHECK(isc_taskmgr_create(mctx, 0, netmgr, &taskmgr));
task = NULL; task = NULL;
RUNCHECK(isc_task_create(taskmgr, 0, &task)); RUNCHECK(isc_task_create(taskmgr, 0, &task));
timermgr = NULL; timermgr = NULL;
@ -322,6 +324,7 @@ main(int argc, char *argv[]) {
isc_task_shutdown(task); isc_task_shutdown(task);
isc_task_detach(&task); isc_task_detach(&task);
isc_taskmgr_destroy(&taskmgr); isc_taskmgr_destroy(&taskmgr);
isc_nm_destroy(&netmgr);
dst_lib_destroy(); dst_lib_destroy();

View File

@ -53,6 +53,84 @@
#include <irs/resconf.h> #include <irs/resconf.h>
/*
* Global contexts
*/
isc_mem_t *ctxs_mctx = NULL;
isc_appctx_t *ctxs_actx = NULL;
isc_nm_t *ctxs_netmgr = NULL;
isc_taskmgr_t *ctxs_taskmgr = NULL;
isc_socketmgr_t *ctxs_socketmgr = NULL;
isc_timermgr_t *ctxs_timermgr = NULL;
static void
ctxs_destroy(void) {
if (ctxs_netmgr != NULL) {
isc_nm_closedown(ctxs_netmgr);
}
if (ctxs_taskmgr != NULL) {
isc_taskmgr_destroy(&ctxs_taskmgr);
}
if (ctxs_netmgr != NULL) {
isc_nm_destroy(&ctxs_netmgr);
}
if (ctxs_timermgr != NULL) {
isc_timermgr_destroy(&ctxs_timermgr);
}
if (ctxs_socketmgr != NULL) {
isc_socketmgr_destroy(&ctxs_socketmgr);
}
if (ctxs_actx != NULL) {
isc_appctx_destroy(&ctxs_actx);
}
if (ctxs_mctx != NULL) {
isc_mem_destroy(&ctxs_mctx);
}
}
static isc_result_t
ctxs_init(void) {
isc_result_t result;
isc_mem_create(&ctxs_mctx);
result = isc_appctx_create(ctxs_mctx, &ctxs_actx);
if (result != ISC_R_SUCCESS) {
goto fail;
}
ctxs_netmgr = isc_nm_start(ctxs_mctx, 1);
result = isc_taskmgr_create(ctxs_mctx, 0, ctxs_netmgr, &ctxs_taskmgr);
if (result != ISC_R_SUCCESS) {
goto fail;
}
result = isc_socketmgr_create(ctxs_mctx, &ctxs_socketmgr);
if (result != ISC_R_SUCCESS) {
goto fail;
}
result = isc_timermgr_create(ctxs_mctx, &ctxs_timermgr);
if (result != ISC_R_SUCCESS) {
goto fail;
}
return (ISC_R_SUCCESS);
fail:
ctxs_destroy();
return (result);
}
static char *algname; static char *algname;
static isc_result_t static isc_result_t
@ -93,8 +171,7 @@ usage(void) {
} }
static void static void
set_key(dns_client_t *client, char *keynamestr, char *keystr, bool is_sep, set_key(dns_client_t *client, char *keynamestr, char *keystr, bool is_sep) {
isc_mem_t **mctxp) {
isc_result_t result; isc_result_t result;
dns_fixedname_t fkeyname; dns_fixedname_t fkeyname;
unsigned int namelen; unsigned int namelen;
@ -109,8 +186,6 @@ set_key(dns_client_t *client, char *keynamestr, char *keystr, bool is_sep,
isc_region_t r; isc_region_t r;
dns_secalg_t alg; dns_secalg_t alg;
isc_mem_create(mctxp);
if (algname != NULL) { if (algname != NULL) {
tr.base = algname; tr.base = algname;
tr.length = strlen(algname); tr.length = strlen(algname);
@ -176,7 +251,6 @@ addserver(dns_client_t *client, const char *addrstr, const char *port,
isc_sockaddr_t sa; isc_sockaddr_t sa;
isc_sockaddrlist_t servers; isc_sockaddrlist_t servers;
isc_result_t result; isc_result_t result;
unsigned int namelen;
isc_buffer_t b; isc_buffer_t b;
dns_fixedname_t fname; dns_fixedname_t fname;
dns_name_t *name = NULL; dns_name_t *name = NULL;
@ -201,7 +275,7 @@ addserver(dns_client_t *client, const char *addrstr, const char *port,
ISC_LIST_APPEND(servers, &sa, link); ISC_LIST_APPEND(servers, &sa, link);
if (name_space != NULL) { if (name_space != NULL) {
namelen = strlen(name_space); unsigned int namelen = strlen(name_space);
isc_buffer_constinit(&b, name_space, namelen); isc_buffer_constinit(&b, name_space, namelen);
isc_buffer_add(&b, namelen); isc_buffer_add(&b, namelen);
name = dns_fixedname_initname(&fname); name = dns_fixedname_initname(&fname);
@ -240,15 +314,9 @@ main(int argc, char *argv[]) {
dns_rdatatype_t type = dns_rdatatype_a; dns_rdatatype_t type = dns_rdatatype_a;
dns_rdataset_t *rdataset; dns_rdataset_t *rdataset;
dns_namelist_t namelist; dns_namelist_t namelist;
isc_mem_t *keymctx = NULL;
unsigned int clientopt, resopt = 0; unsigned int clientopt, resopt = 0;
bool is_sep = false; bool is_sep = false;
const char *port = "53"; const char *port = "53";
isc_mem_t *mctx = NULL;
isc_appctx_t *actx = NULL;
isc_taskmgr_t *taskmgr = NULL;
isc_socketmgr_t *socketmgr = NULL;
isc_timermgr_t *timermgr = NULL;
struct in_addr in4; struct in_addr in4;
struct in6_addr in6; struct in6_addr in6;
isc_sockaddr_t a4, a6; isc_sockaddr_t a4, a6;
@ -361,32 +429,15 @@ main(int argc, char *argv[]) {
exit(1); exit(1);
} }
isc_mem_create(&mctx); result = ctxs_init();
result = isc_appctx_create(mctx, &actx);
if (result != ISC_R_SUCCESS) {
goto cleanup;
}
result = isc_app_ctxstart(actx);
if (result != ISC_R_SUCCESS) {
goto cleanup;
}
result = isc_taskmgr_create(mctx, 1, 0, NULL, &taskmgr);
if (result != ISC_R_SUCCESS) {
goto cleanup;
}
result = isc_socketmgr_create(mctx, &socketmgr);
if (result != ISC_R_SUCCESS) {
goto cleanup;
}
result = isc_timermgr_create(mctx, &timermgr);
if (result != ISC_R_SUCCESS) { if (result != ISC_R_SUCCESS) {
goto cleanup; goto cleanup;
} }
clientopt = 0; clientopt = 0;
result = dns_client_create(mctx, actx, taskmgr, socketmgr, timermgr, result = dns_client_create(ctxs_mctx, ctxs_actx, ctxs_taskmgr,
clientopt, &client, addr4, addr6); ctxs_socketmgr, ctxs_timermgr, clientopt,
&client, addr4, addr6);
if (result != ISC_R_SUCCESS) { if (result != ISC_R_SUCCESS) {
fprintf(stderr, "dns_client_create failed: %u, %s\n", result, fprintf(stderr, "dns_client_create failed: %u, %s\n", result,
isc_result_totext(result)); isc_result_totext(result));
@ -398,7 +449,8 @@ main(int argc, char *argv[]) {
irs_resconf_t *resconf = NULL; irs_resconf_t *resconf = NULL;
isc_sockaddrlist_t *nameservers; isc_sockaddrlist_t *nameservers;
result = irs_resconf_load(mctx, "/etc/resolv.conf", &resconf); result = irs_resconf_load(ctxs_mctx, "/etc/resolv.conf",
&resconf);
if (result != ISC_R_SUCCESS && result != ISC_R_FILENOTFOUND) { if (result != ISC_R_SUCCESS && result != ISC_R_FILENOTFOUND) {
fprintf(stderr, "irs_resconf_load failed: %u\n", fprintf(stderr, "irs_resconf_load failed: %u\n",
result); result);
@ -430,7 +482,7 @@ main(int argc, char *argv[]) {
"while key name is provided\n"); "while key name is provided\n");
exit(1); exit(1);
} }
set_key(client, keynamestr, keystr, is_sep, &keymctx); set_key(client, keynamestr, keystr, is_sep);
} }
/* Construct qname */ /* Construct qname */
@ -470,25 +522,11 @@ main(int argc, char *argv[]) {
/* Cleanup */ /* Cleanup */
cleanup: cleanup:
dns_client_destroy(&client); if (client != NULL) {
dns_client_destroy(&client);
}
if (taskmgr != NULL) { ctxs_destroy();
isc_taskmgr_destroy(&taskmgr);
}
if (timermgr != NULL) {
isc_timermgr_destroy(&timermgr);
}
if (socketmgr != NULL) {
isc_socketmgr_destroy(&socketmgr);
}
if (actx != NULL) {
isc_appctx_destroy(&actx);
}
isc_mem_detach(&mctx);
if (keynamestr != NULL) {
isc_mem_destroy(&keymctx);
}
dns_lib_shutdown(); dns_lib_shutdown();
return (0); return (0);

View File

@ -192,7 +192,8 @@ sendquery(isc_task_t *task, isc_event_t *event) {
int int
main(int argc, char *argv[]) { main(int argc, char *argv[]) {
char *ourkeyname; char *ourkeyname;
isc_taskmgr_t *taskmgr; isc_nm_t *netmgr = NULL;
isc_taskmgr_t *taskmgr = NULL;
isc_timermgr_t *timermgr; isc_timermgr_t *timermgr;
isc_socketmgr_t *socketmgr; isc_socketmgr_t *socketmgr;
isc_socket_t *sock; isc_socket_t *sock;
@ -234,8 +235,9 @@ main(int argc, char *argv[]) {
RUNCHECK(dst_lib_init(mctx, NULL)); RUNCHECK(dst_lib_init(mctx, NULL));
taskmgr = NULL; netmgr = isc_nm_start(mctx, 1);
RUNCHECK(isc_taskmgr_create(mctx, 1, 0, NULL, &taskmgr));
RUNCHECK(isc_taskmgr_create(mctx, 0, netmgr, &taskmgr));
task = NULL; task = NULL;
RUNCHECK(isc_task_create(taskmgr, 0, &task)); RUNCHECK(isc_task_create(taskmgr, 0, &task));
timermgr = NULL; timermgr = NULL;
@ -292,6 +294,7 @@ main(int argc, char *argv[]) {
isc_task_shutdown(task); isc_task_shutdown(task);
isc_task_detach(&task); isc_task_detach(&task);
isc_taskmgr_destroy(&taskmgr); isc_taskmgr_destroy(&taskmgr);
isc_nm_destroy(&netmgr);
isc_socket_detach(&sock); isc_socket_detach(&sock);
isc_socketmgr_destroy(&socketmgr); isc_socketmgr_destroy(&socketmgr);
isc_timermgr_destroy(&timermgr); isc_timermgr_destroy(&timermgr);

View File

@ -17,6 +17,7 @@
#include <isc/hash.h> #include <isc/hash.h>
#include <isc/log.h> #include <isc/log.h>
#include <isc/mem.h> #include <isc/mem.h>
#include <isc/netmgr.h>
#include <isc/print.h> #include <isc/print.h>
#include <isc/random.h> #include <isc/random.h>
#include <isc/sockaddr.h> #include <isc/sockaddr.h>
@ -135,7 +136,8 @@ sendquery(isc_task_t *task, isc_event_t *event) {
int int
main(int argc, char **argv) { main(int argc, char **argv) {
char *keyname; char *keyname;
isc_taskmgr_t *taskmgr; isc_nm_t *netmgr;
isc_taskmgr_t *taskmgr = NULL;
isc_timermgr_t *timermgr; isc_timermgr_t *timermgr;
isc_socketmgr_t *socketmgr; isc_socketmgr_t *socketmgr;
isc_socket_t *sock; isc_socket_t *sock;
@ -177,8 +179,9 @@ main(int argc, char **argv) {
RUNCHECK(dst_lib_init(mctx, NULL)); RUNCHECK(dst_lib_init(mctx, NULL));
taskmgr = NULL; netmgr = isc_nm_start(mctx, 1);
RUNCHECK(isc_taskmgr_create(mctx, 1, 0, NULL, &taskmgr));
RUNCHECK(isc_taskmgr_create(mctx, 0, netmgr, &taskmgr));
task = NULL; task = NULL;
RUNCHECK(isc_task_create(taskmgr, 0, &task)); RUNCHECK(isc_task_create(taskmgr, 0, &task));
timermgr = NULL; timermgr = NULL;
@ -235,6 +238,7 @@ main(int argc, char **argv) {
isc_task_shutdown(task); isc_task_shutdown(task);
isc_task_detach(&task); isc_task_detach(&task);
isc_taskmgr_destroy(&taskmgr); isc_taskmgr_destroy(&taskmgr);
isc_nm_destroy(&netmgr);
isc_socket_detach(&sock); isc_socket_detach(&sock);
isc_socketmgr_destroy(&socketmgr); isc_socketmgr_destroy(&socketmgr);
isc_timermgr_destroy(&timermgr); isc_timermgr_destroy(&timermgr);

View File

@ -2083,7 +2083,8 @@ main(int argc, char *argv[]) {
isc_sockaddr_t bind_any; isc_sockaddr_t bind_any;
isc_log_t *lctx; isc_log_t *lctx;
isc_logconfig_t *lcfg; isc_logconfig_t *lcfg;
isc_taskmgr_t *taskmgr; isc_nm_t *netmgr = NULL;
isc_taskmgr_t *taskmgr = NULL;
isc_task_t *task; isc_task_t *task;
isc_timermgr_t *timermgr; isc_timermgr_t *timermgr;
isc_socketmgr_t *socketmgr; isc_socketmgr_t *socketmgr;
@ -2144,8 +2145,9 @@ main(int argc, char *argv[]) {
fatal("can't choose between IPv4 and IPv6"); fatal("can't choose between IPv4 and IPv6");
} }
taskmgr = NULL; netmgr = isc_nm_start(mctx, 1);
RUNCHECK(isc_taskmgr_create(mctx, 1, 0, NULL, &taskmgr));
RUNCHECK(isc_taskmgr_create(mctx, 0, netmgr, &taskmgr));
task = NULL; task = NULL;
RUNCHECK(isc_task_create(taskmgr, 0, &task)); RUNCHECK(isc_task_create(taskmgr, 0, &task));
timermgr = NULL; timermgr = NULL;
@ -2225,6 +2227,7 @@ main(int argc, char *argv[]) {
isc_task_shutdown(task); isc_task_shutdown(task);
isc_task_detach(&task); isc_task_detach(&task);
isc_taskmgr_destroy(&taskmgr); isc_taskmgr_destroy(&taskmgr);
isc_nm_destroy(&netmgr);
dst_lib_destroy(); dst_lib_destroy();

View File

@ -136,7 +136,7 @@ format.
Private header files, describing interfaces that are for internal use Private header files, describing interfaces that are for internal use
within a library but not for public use, are kept in the source tree at the within a library but not for public use, are kept in the source tree at the
same level as their related C files, and often have `"_p"` in their names, same level as their related C files, and often have `"_p"` in their names,
e.g. `lib/isc/task_p.h`. e.g. `lib/isc/mem_p.h`.
Header files that define modules should have a structure like the Header files that define modules should have a structure like the
following. Note that `<isc/lang.h>` MUST be included by any public header following. Note that `<isc/lang.h>` MUST be included by any public header

View File

@ -63,6 +63,7 @@
isc_mem_t *dt_mctx = NULL; isc_mem_t *dt_mctx = NULL;
isc_log_t *lctx = NULL; isc_log_t *lctx = NULL;
isc_nm_t *netmgr = NULL;
isc_taskmgr_t *taskmgr = NULL; isc_taskmgr_t *taskmgr = NULL;
isc_task_t *maintask = NULL; isc_task_t *maintask = NULL;
isc_timermgr_t *timermgr = NULL; isc_timermgr_t *timermgr = NULL;
@ -100,6 +101,9 @@ cleanup_managers(void) {
if (taskmgr != NULL) { if (taskmgr != NULL) {
isc_taskmgr_destroy(&taskmgr); isc_taskmgr_destroy(&taskmgr);
} }
if (netmgr != NULL) {
isc_nm_destroy(&netmgr);
}
if (timermgr != NULL) { if (timermgr != NULL) {
isc_timermgr_destroy(&timermgr); isc_timermgr_destroy(&timermgr);
} }
@ -113,7 +117,8 @@ create_managers(void) {
isc_result_t result; isc_result_t result;
ncpus = isc_os_ncpus(); ncpus = isc_os_ncpus();
CHECK(isc_taskmgr_create(dt_mctx, ncpus, 0, NULL, &taskmgr)); netmgr = isc_nm_start(dt_mctx, ncpus);
CHECK(isc_taskmgr_create(dt_mctx, 0, netmgr, &taskmgr));
CHECK(isc_timermgr_create(dt_mctx, &timermgr)); CHECK(isc_timermgr_create(dt_mctx, &timermgr));
CHECK(isc_socketmgr_create(dt_mctx, &socketmgr)); CHECK(isc_socketmgr_create(dt_mctx, &socketmgr));
CHECK(isc_task_create(taskmgr, 0, &maintask)); CHECK(isc_task_create(taskmgr, 0, &maintask));

View File

@ -18385,41 +18385,34 @@ dns_zonemgr_setsize(dns_zonemgr_t *zmgr, int num_zones) {
/* Create or resize the zone task pools. */ /* Create or resize the zone task pools. */
if (zmgr->zonetasks == NULL) { if (zmgr->zonetasks == NULL) {
result = isc_taskpool_create(zmgr->taskmgr, zmgr->mctx, ntasks, result = isc_taskpool_create(zmgr->taskmgr, zmgr->mctx, ntasks,
2, &pool); 2, false, &pool);
} else { } else {
result = isc_taskpool_expand(&zmgr->zonetasks, ntasks, &pool); result = isc_taskpool_expand(&zmgr->zonetasks, ntasks, false,
&pool);
} }
if (result == ISC_R_SUCCESS) { if (result == ISC_R_SUCCESS) {
zmgr->zonetasks = pool; zmgr->zonetasks = pool;
} }
pool = NULL;
if (zmgr->loadtasks == NULL) {
result = isc_taskpool_create(zmgr->taskmgr, zmgr->mctx, ntasks,
2, &pool);
} else {
result = isc_taskpool_expand(&zmgr->loadtasks, ntasks, &pool);
}
if (result == ISC_R_SUCCESS) {
zmgr->loadtasks = pool;
}
/* /*
* We always set all tasks in the zone-load task pool to * We always set all tasks in the zone-load task pool to
* privileged. This prevents other tasks in the system from * privileged. This prevents other tasks in the system from
* running while the server task manager is in privileged * running while the server task manager is in privileged
* mode. * mode.
*
* NOTE: If we start using task privileges for any other
* part of the system than zone tasks, then this will need to be
* revisted. In that case we'd want to turn on privileges for
* zone tasks only when we were loading, and turn them off the
* rest of the time. For now, however, it's okay to just
* set it and forget it.
*/ */
isc_taskpool_setprivilege(zmgr->loadtasks, true); pool = NULL;
if (zmgr->loadtasks == NULL) {
result = isc_taskpool_create(zmgr->taskmgr, zmgr->mctx, ntasks,
2, true, &pool);
} else {
result = isc_taskpool_expand(&zmgr->loadtasks, ntasks, true,
&pool);
}
if (result == ISC_R_SUCCESS) {
zmgr->loadtasks = pool;
}
/* Create or resize the zone memory context pool. */ /* Create or resize the zone memory context pool. */
if (zmgr->mctxpool == NULL) { if (zmgr->mctxpool == NULL) {

View File

@ -224,7 +224,6 @@ libisc_la_SOURCES = \
fsaccess_common_p.h \ fsaccess_common_p.h \
lib_p.h \ lib_p.h \
mem_p.h \ mem_p.h \
task_p.h \
tls_p.h tls_p.h
libisc_la_CPPFLAGS = \ libisc_la_CPPFLAGS = \

View File

@ -516,11 +516,10 @@ isc_appctx_create(isc_mem_t *mctx, isc_appctx_t **ctxp) {
REQUIRE(ctxp != NULL && *ctxp == NULL); REQUIRE(ctxp != NULL && *ctxp == NULL);
ctx = isc_mem_get(mctx, sizeof(*ctx)); ctx = isc_mem_get(mctx, sizeof(*ctx));
*ctx = (isc_appctx_t){ .magic = 0 };
ctx->magic = APPCTX_MAGIC;
ctx->mctx = NULL;
isc_mem_attach(mctx, &ctx->mctx); isc_mem_attach(mctx, &ctx->mctx);
ctx->magic = APPCTX_MAGIC;
*ctxp = ctx; *ctxp = ctx;

View File

@ -522,3 +522,16 @@ isc_nm_listenhttp(isc_nm_t *mgr, isc_nmiface_t *iface, int backlog,
isc_result_t isc_result_t
isc_nm_http_endpoint(isc_nmsocket_t *sock, const char *uri, isc_nm_recv_cb_t cb, isc_nm_http_endpoint(isc_nmsocket_t *sock, const char *uri, isc_nm_recv_cb_t cb,
void *cbarg, size_t extrahandlesize); void *cbarg, size_t extrahandlesize);
void
isc_nm_task_enqueue(isc_nm_t *mgr, isc_task_t *task, int threadid);
/*%<
* Enqueue the 'task' onto the netmgr ievents queue.
*
* Requires:
* \li 'mgr' is a valid netmgr object
* \li 'task' is a valid task
* \li 'threadid' is either the preferred netmgr tid or -1, in which case
* tid will be picked randomly. The threadid is capped (by modulo) to
* maximum number of 'workers' as specifed in isc_nm_start()
*/

View File

@ -102,12 +102,11 @@ typedef enum {
isc_result_t isc_result_t
isc_task_create(isc_taskmgr_t *manager, unsigned int quantum, isc_task_create(isc_taskmgr_t *manager, unsigned int quantum,
isc_task_t **taskp); isc_task_t **taskp);
isc_result_t isc_result_t
isc_task_create_bound(isc_taskmgr_t *manager, unsigned int quantum, isc_task_create_bound(isc_taskmgr_t *manager, unsigned int quantum,
isc_task_t **taskp, int threadid); isc_task_t **taskp, int threadid);
/*%< /*%<
* Create a task. * Create a task, optionally bound to a particular threadid.
* *
* Notes: * Notes:
* *
@ -138,6 +137,23 @@ isc_task_create_bound(isc_taskmgr_t *manager, unsigned int quantum,
*\li #ISC_R_SHUTTINGDOWN *\li #ISC_R_SHUTTINGDOWN
*/ */
isc_result_t
isc_task_run(isc_task_t *task);
/*%<
* Run all the queued events for the 'task', returning
* when the queue is empty or the number of events executed
* exceeds the 'quantum' specified when the task was created.
*
* Requires:
*
*\li 'task' is a valid task.
*
* Returns:
*
*\li #ISC_R_SUCCESS
*\li #ISC_R_QUOTA
*/
void void
isc_task_attach(isc_task_t *source, isc_task_t **targetp); isc_task_attach(isc_task_t *source, isc_task_t **targetp);
/*%< /*%<
@ -611,20 +627,13 @@ isc_task_privilege(isc_task_t *task);
*****/ *****/
isc_result_t isc_result_t
isc_taskmgr_create(isc_mem_t *mctx, unsigned int workers, isc_taskmgr_create(isc_mem_t *mctx, unsigned int default_quantum, isc_nm_t *nm,
unsigned int default_quantum, isc_nm_t *nm,
isc_taskmgr_t **managerp); isc_taskmgr_t **managerp);
/*%< /*%<
* Create a new task manager. * Create a new task manager.
* *
* Notes: * Notes:
* *
*\li 'workers' in the number of worker threads to create. In general,
* the value should be close to the number of processors in the system.
* The 'workers' value is advisory only. An attempt will be made to
* create 'workers' threads, but if at least one thread creation
* succeeds, isc_taskmgr_create() may return ISC_R_SUCCESS.
*
*\li If 'default_quantum' is non-zero, then it will be used as the default *\li If 'default_quantum' is non-zero, then it will be used as the default
* quantum value when tasks are created. If zero, then an implementation * quantum value when tasks are created. If zero, then an implementation
* defined default quantum will be used. * defined default quantum will be used.
@ -636,8 +645,6 @@ isc_taskmgr_create(isc_mem_t *mctx, unsigned int workers,
* *
*\li 'mctx' is a valid memory context. *\li 'mctx' is a valid memory context.
* *
*\li workers > 0
*
*\li managerp != NULL && *managerp == NULL *\li managerp != NULL && *managerp == NULL
* *
* Ensures: * Ensures:
@ -651,34 +658,14 @@ isc_taskmgr_create(isc_mem_t *mctx, unsigned int workers,
*\li #ISC_R_NOMEMORY *\li #ISC_R_NOMEMORY
*\li #ISC_R_NOTHREADS No threads could be created. *\li #ISC_R_NOTHREADS No threads could be created.
*\li #ISC_R_UNEXPECTED An unexpected error occurred. *\li #ISC_R_UNEXPECTED An unexpected error occurred.
*\li #ISC_R_SHUTTINGDOWN The non-threaded, shared, task *\li #ISC_R_SHUTTINGDOWN The non-threaded, shared, task
* manager shutting down. * manager shutting down.
*/ */
void void
isc_taskmgr_setprivilegedmode(isc_taskmgr_t *manager); isc_taskmgr_attach(isc_taskmgr_t *, isc_taskmgr_t **);
void
isc_taskmgrmode_t isc_taskmgr_detach(isc_taskmgr_t *);
isc_taskmgr_mode(isc_taskmgr_t *manager);
/*%<
* Set/get the current operating mode of the task manager. Valid modes are:
*
*\li isc_taskmgrmode_normal
*\li isc_taskmgrmode_privileged
*
* In privileged execution mode, only tasks that have had the "privilege"
* flag set via isc_task_setprivilege() can be executed. When all such
* tasks are complete, the manager automatically returns to normal mode
* and proceeds with running non-privileged ready tasks. This means it is
* necessary to have at least one privileged task waiting on the ready
* queue *before* setting the manager into privileged execution mode,
* which in turn means the task which calls this function should be in
* task-exclusive mode when it does so.
*
* Requires:
*
*\li 'manager' is a valid task manager.
*/
void void
isc_taskmgr_destroy(isc_taskmgr_t **managerp); isc_taskmgr_destroy(isc_taskmgr_t **managerp);

View File

@ -51,7 +51,7 @@ typedef struct isc_taskpool isc_taskpool_t;
isc_result_t isc_result_t
isc_taskpool_create(isc_taskmgr_t *tmgr, isc_mem_t *mctx, unsigned int ntasks, isc_taskpool_create(isc_taskmgr_t *tmgr, isc_mem_t *mctx, unsigned int ntasks,
unsigned int quantum, isc_taskpool_t **poolp); unsigned int quantum, bool priv, isc_taskpool_t **poolp);
/*%< /*%<
* Create a task pool of "ntasks" tasks, each with quantum * Create a task pool of "ntasks" tasks, each with quantum
* "quantum". * "quantum".
@ -90,7 +90,7 @@ isc_taskpool_size(isc_taskpool_t *pool);
*/ */
isc_result_t isc_result_t
isc_taskpool_expand(isc_taskpool_t **sourcep, unsigned int size, isc_taskpool_expand(isc_taskpool_t **sourcep, unsigned int size, bool priv,
isc_taskpool_t **targetp); isc_taskpool_t **targetp);
/*%< /*%<
@ -131,19 +131,6 @@ isc_taskpool_destroy(isc_taskpool_t **poolp);
* \li '*poolp' is a valid task pool. * \li '*poolp' is a valid task pool.
*/ */
void
isc_taskpool_setprivilege(isc_taskpool_t *pool, bool priv);
/*%<
* Set the privilege flag on all tasks in 'pool' to 'priv'. If 'priv' is
* true, then when the task manager is set into privileged mode, only
* tasks wihin this pool will be able to execute. (Note: It is important
* to turn the pool tasks' privilege back off before the last task finishes
* executing.)
*
* Requires:
* \li 'pool' is a valid task pool.
*/
ISC_LANG_ENDDECLS ISC_LANG_ENDDECLS
#endif /* ISC_TASKPOOL_H */ #endif /* ISC_TASKPOOL_H */

View File

@ -41,6 +41,9 @@
#define ISC_NETMGR_TID_UNKNOWN -1 #define ISC_NETMGR_TID_UNKNOWN -1
/* Must be different from ISC_NETMGR_TID_UNKNOWN */
#define ISC_NETMGR_NON_INTERLOCKED -2
#define ISC_NETMGR_TLSBUF_SIZE 65536 #define ISC_NETMGR_TLSBUF_SIZE 65536
#if !defined(WIN32) #if !defined(WIN32)
@ -174,6 +177,8 @@ typedef struct isc__networker {
bool finished; bool finished;
isc_thread_t thread; isc_thread_t thread;
isc_queue_t *ievents; /* incoming async events */ isc_queue_t *ievents; /* incoming async events */
isc_queue_t *ievents_priv; /* privileged async tasks */
isc_queue_t *ievents_task; /* async tasks */
isc_queue_t *ievents_prio; /* priority async events isc_queue_t *ievents_prio; /* priority async events
* used for listening etc. * used for listening etc.
* can be processed while * can be processed while
@ -236,27 +241,27 @@ struct isc_nmiface {
typedef enum isc__netievent_type { typedef enum isc__netievent_type {
netievent_udpconnect, netievent_udpconnect,
netievent_udpclose,
netievent_udpsend, netievent_udpsend,
netievent_udpread, netievent_udpread,
netievent_udpstop, netievent_udpstop,
netievent_udpcancel, netievent_udpcancel,
netievent_udpclose,
netievent_tcpconnect, netievent_tcpconnect,
netievent_tcpclose,
netievent_tcpsend, netievent_tcpsend,
netievent_tcpstartread, netievent_tcpstartread,
netievent_tcppauseread, netievent_tcppauseread,
netievent_tcpaccept, netievent_tcpaccept,
netievent_tcpstop, netievent_tcpstop,
netievent_tcpcancel, netievent_tcpcancel,
netievent_tcpclose,
netievent_tcpdnsaccept, netievent_tcpdnsaccept,
netievent_tcpdnsconnect, netievent_tcpdnsconnect,
netievent_tcpdnsclose,
netievent_tcpdnssend, netievent_tcpdnssend,
netievent_tcpdnsread, netievent_tcpdnsread,
netievent_tcpdnscancel, netievent_tcpdnscancel,
netievent_tcpdnsclose,
netievent_tcpdnsstop, netievent_tcpdnsstop,
netievent_tlsclose, netievent_tlsclose,
@ -268,19 +273,18 @@ typedef enum isc__netievent_type {
netievent_tlsdnsaccept, netievent_tlsdnsaccept,
netievent_tlsdnsconnect, netievent_tlsdnsconnect,
netievent_tlsdnsclose,
netievent_tlsdnssend, netievent_tlsdnssend,
netievent_tlsdnsread, netievent_tlsdnsread,
netievent_tlsdnscancel, netievent_tlsdnscancel,
netievent_tlsdnsclose,
netievent_tlsdnsstop, netievent_tlsdnsstop,
netievent_tlsdnscycle, netievent_tlsdnscycle,
netievent_tlsdnsshutdown, netievent_tlsdnsshutdown,
netievent_httpclose,
netievent_httpstop, netievent_httpstop,
netievent_httpsend, netievent_httpsend,
netievent_httpclose,
netievent_close,
netievent_shutdown, netievent_shutdown,
netievent_stop, netievent_stop,
netievent_pause, netievent_pause,
@ -289,6 +293,9 @@ typedef enum isc__netievent_type {
netievent_readcb, netievent_readcb,
netievent_sendcb, netievent_sendcb,
netievent_task,
netievent_privilegedtask,
netievent_prio = 0xff, /* event type values higher than this netievent_prio = 0xff, /* event type values higher than this
* will be treated as high-priority * will be treated as high-priority
* events, which can be processed * events, which can be processed
@ -300,6 +307,8 @@ typedef enum isc__netievent_type {
netievent_tlsdnslisten, netievent_tlsdnslisten,
netievent_resume, netievent_resume,
netievent_detach, netievent_detach,
netievent_close,
} isc__netievent_type; } isc__netievent_type;
typedef union { typedef union {
@ -556,6 +565,36 @@ typedef struct isc__netievent__socket_quota {
isc__nm_put_netievent(nm, ievent); \ isc__nm_put_netievent(nm, ievent); \
} }
typedef struct isc__netievent__task {
isc__netievent_type type;
isc_task_t *task;
} isc__netievent__task_t;
#define NETIEVENT_TASK_TYPE(type) \
typedef isc__netievent__task_t isc__netievent_##type##_t;
#define NETIEVENT_TASK_DECL(type) \
isc__netievent_##type##_t *isc__nm_get_netievent_##type( \
isc_nm_t *nm, isc_task_t *task); \
void isc__nm_put_netievent_##type(isc_nm_t *nm, \
isc__netievent_##type##_t *ievent);
#define NETIEVENT_TASK_DEF(type) \
isc__netievent_##type##_t *isc__nm_get_netievent_##type( \
isc_nm_t *nm, isc_task_t *task) { \
isc__netievent_##type##_t *ievent = \
isc__nm_get_netievent(nm, netievent_##type); \
ievent->task = task; \
\
return (ievent); \
} \
\
void isc__nm_put_netievent_##type(isc_nm_t *nm, \
isc__netievent_##type##_t *ievent) { \
ievent->task = NULL; \
isc__nm_put_netievent(nm, ievent); \
}
typedef struct isc__netievent_udpsend { typedef struct isc__netievent_udpsend {
NETIEVENT__SOCKET; NETIEVENT__SOCKET;
isc_sockaddr_t peer; isc_sockaddr_t peer;
@ -617,6 +656,7 @@ struct isc_nm {
uint32_t nworkers; uint32_t nworkers;
isc_mutex_t lock; isc_mutex_t lock;
isc_condition_t wkstatecond; isc_condition_t wkstatecond;
isc_condition_t wkpausecond;
isc__networker_t *workers; isc__networker_t *workers;
isc_stats_t *stats; isc_stats_t *stats;
@ -631,6 +671,8 @@ struct isc_nm {
uint_fast32_t workers_paused; uint_fast32_t workers_paused;
atomic_uint_fast32_t maxudp; atomic_uint_fast32_t maxudp;
atomic_bool paused;
/* /*
* Active connections are being closed and new connections are * Active connections are being closed and new connections are
* no longer allowed. * no longer allowed.
@ -643,7 +685,7 @@ struct isc_nm {
* or pause, or we'll deadlock. We have to either re-enqueue our * or pause, or we'll deadlock. We have to either re-enqueue our
* event or wait for the other one to finish if we want to pause. * event or wait for the other one to finish if we want to pause.
*/ */
atomic_bool interlocked; atomic_int interlocked;
/* /*
* Timeout values for TCP connections, corresponding to * Timeout values for TCP connections, corresponding to
@ -1783,6 +1825,9 @@ NETIEVENT_TYPE(resume);
NETIEVENT_TYPE(shutdown); NETIEVENT_TYPE(shutdown);
NETIEVENT_TYPE(stop); NETIEVENT_TYPE(stop);
NETIEVENT_TASK_TYPE(task);
NETIEVENT_TASK_TYPE(privilegedtask);
/* Now declared the helper functions */ /* Now declared the helper functions */
NETIEVENT_SOCKET_DECL(close); NETIEVENT_SOCKET_DECL(close);
@ -1846,6 +1891,9 @@ NETIEVENT_DECL(resume);
NETIEVENT_DECL(shutdown); NETIEVENT_DECL(shutdown);
NETIEVENT_DECL(stop); NETIEVENT_DECL(stop);
NETIEVENT_TASK_DECL(task);
NETIEVENT_TASK_DECL(privilegedtask);
void void
isc__nm_udp_failed_read_cb(isc_nmsocket_t *sock, isc_result_t result); isc__nm_udp_failed_read_cb(isc_nmsocket_t *sock, isc_result_t result);
void void

View File

@ -30,6 +30,7 @@
#include <isc/sockaddr.h> #include <isc/sockaddr.h>
#include <isc/stats.h> #include <isc/stats.h>
#include <isc/strerr.h> #include <isc/strerr.h>
#include <isc/task.h>
#include <isc/thread.h> #include <isc/thread.h>
#include <isc/tls.h> #include <isc/tls.h>
#include <isc/util.h> #include <isc/util.h>
@ -145,10 +146,12 @@ static bool
process_queue(isc__networker_t *worker, isc_queue_t *queue); process_queue(isc__networker_t *worker, isc_queue_t *queue);
static bool static bool
process_priority_queue(isc__networker_t *worker); process_priority_queue(isc__networker_t *worker);
static bool
process_normal_queue(isc__networker_t *worker);
static void static void
process_queues(isc__networker_t *worker); process_privilege_queue(isc__networker_t *worker);
static void
process_tasks_queue(isc__networker_t *worker);
static void
process_normal_queue(isc__networker_t *worker);
static void static void
isc__nm_async_stop(isc__networker_t *worker, isc__netievent_t *ev0); isc__nm_async_stop(isc__networker_t *worker, isc__netievent_t *ev0);
@ -217,6 +220,8 @@ isc_nm_start(isc_mem_t *mctx, uint32_t workers) {
isc_nm_t *mgr = NULL; isc_nm_t *mgr = NULL;
char name[32]; char name[32];
REQUIRE(workers > 0);
#ifdef WIN32 #ifdef WIN32
isc__nm_winsock_initialize(); isc__nm_winsock_initialize();
#endif /* WIN32 */ #endif /* WIN32 */
@ -227,9 +232,10 @@ isc_nm_start(isc_mem_t *mctx, uint32_t workers) {
isc_mem_attach(mctx, &mgr->mctx); isc_mem_attach(mctx, &mgr->mctx);
isc_mutex_init(&mgr->lock); isc_mutex_init(&mgr->lock);
isc_condition_init(&mgr->wkstatecond); isc_condition_init(&mgr->wkstatecond);
isc_condition_init(&mgr->wkpausecond);
isc_refcount_init(&mgr->references, 1); isc_refcount_init(&mgr->references, 1);
atomic_init(&mgr->maxudp, 0); atomic_init(&mgr->maxudp, 0);
atomic_init(&mgr->interlocked, false); atomic_init(&mgr->interlocked, ISC_NETMGR_NON_INTERLOCKED);
#ifdef NETMGR_TRACE #ifdef NETMGR_TRACE
ISC_LIST_INIT(mgr->active_sockets); ISC_LIST_INIT(mgr->active_sockets);
@ -280,6 +286,8 @@ isc_nm_start(isc_mem_t *mctx, uint32_t workers) {
isc_condition_init(&worker->cond); isc_condition_init(&worker->cond);
worker->ievents = isc_queue_new(mgr->mctx, 128); worker->ievents = isc_queue_new(mgr->mctx, 128);
worker->ievents_priv = isc_queue_new(mgr->mctx, 128);
worker->ievents_task = isc_queue_new(mgr->mctx, 128);
worker->ievents_prio = isc_queue_new(mgr->mctx, 128); worker->ievents_prio = isc_queue_new(mgr->mctx, 128);
worker->recvbuf = isc_mem_get(mctx, ISC_NETMGR_RECVBUF_SIZE); worker->recvbuf = isc_mem_get(mctx, ISC_NETMGR_RECVBUF_SIZE);
worker->sendbuf = isc_mem_get(mctx, ISC_NETMGR_SENDBUF_SIZE); worker->sendbuf = isc_mem_get(mctx, ISC_NETMGR_SENDBUF_SIZE);
@ -339,6 +347,11 @@ nm_destroy(isc_nm_t **mgr0) {
isc_mempool_put(mgr->evpool, ievent); isc_mempool_put(mgr->evpool, ievent);
} }
INSIST(isc_queue_dequeue(worker->ievents_priv) ==
(uintptr_t)NULL);
INSIST(isc_queue_dequeue(worker->ievents_task) ==
(uintptr_t)NULL);
while ((ievent = (isc__netievent_t *)isc_queue_dequeue( while ((ievent = (isc__netievent_t *)isc_queue_dequeue(
worker->ievents_prio)) != NULL) worker->ievents_prio)) != NULL)
{ {
@ -349,6 +362,8 @@ nm_destroy(isc_nm_t **mgr0) {
INSIST(r == 0); INSIST(r == 0);
isc_queue_destroy(worker->ievents); isc_queue_destroy(worker->ievents);
isc_queue_destroy(worker->ievents_priv);
isc_queue_destroy(worker->ievents_task);
isc_queue_destroy(worker->ievents_prio); isc_queue_destroy(worker->ievents_prio);
isc_mutex_destroy(&worker->lock); isc_mutex_destroy(&worker->lock);
isc_condition_destroy(&worker->cond); isc_condition_destroy(&worker->cond);
@ -365,6 +380,7 @@ nm_destroy(isc_nm_t **mgr0) {
} }
isc_condition_destroy(&mgr->wkstatecond); isc_condition_destroy(&mgr->wkstatecond);
isc_condition_destroy(&mgr->wkpausecond);
isc_mutex_destroy(&mgr->lock); isc_mutex_destroy(&mgr->lock);
isc_mempool_destroy(&mgr->evpool); isc_mempool_destroy(&mgr->evpool);
@ -385,42 +401,58 @@ nm_destroy(isc_nm_t **mgr0) {
void void
isc_nm_pause(isc_nm_t *mgr) { isc_nm_pause(isc_nm_t *mgr) {
REQUIRE(VALID_NM(mgr)); REQUIRE(VALID_NM(mgr));
REQUIRE(!isc__nm_in_netthread()); uint_fast32_t pausing = 0;
REQUIRE(!atomic_load(&mgr->paused));
isc__nm_acquire_interlocked_force(mgr); isc__nm_acquire_interlocked_force(mgr);
for (size_t i = 0; i < mgr->nworkers; i++) { for (size_t i = 0; i < mgr->nworkers; i++) {
isc__networker_t *worker = &mgr->workers[i]; isc__networker_t *worker = &mgr->workers[i];
isc__netievent_resume_t *event = if (i != (size_t)isc_nm_tid()) {
isc__nm_get_netievent_pause(mgr); isc__netievent_resume_t *event =
isc__nm_enqueue_ievent(worker, (isc__netievent_t *)event); isc__nm_get_netievent_pause(mgr);
pausing++;
isc__nm_enqueue_ievent(worker,
(isc__netievent_t *)event);
} else {
isc__nm_async_pause(worker, NULL);
}
} }
LOCK(&mgr->lock); LOCK(&mgr->lock);
while (mgr->workers_paused != mgr->workers_running) { while (mgr->workers_paused != pausing) {
WAIT(&mgr->wkstatecond, &mgr->lock); WAIT(&mgr->wkstatecond, &mgr->lock);
} }
REQUIRE(atomic_compare_exchange_strong(&mgr->paused, &(bool){ false },
true));
UNLOCK(&mgr->lock); UNLOCK(&mgr->lock);
} }
void void
isc_nm_resume(isc_nm_t *mgr) { isc_nm_resume(isc_nm_t *mgr) {
REQUIRE(VALID_NM(mgr)); REQUIRE(VALID_NM(mgr));
REQUIRE(!isc__nm_in_netthread()); REQUIRE(atomic_load(&mgr->paused));
for (size_t i = 0; i < mgr->nworkers; i++) { for (size_t i = 0; i < mgr->nworkers; i++) {
isc__networker_t *worker = &mgr->workers[i]; isc__networker_t *worker = &mgr->workers[i];
isc__netievent_resume_t *event = if (i != (size_t)isc_nm_tid()) {
isc__nm_get_netievent_resume(mgr); isc__netievent_resume_t *event =
isc__nm_enqueue_ievent(worker, (isc__netievent_t *)event); isc__nm_get_netievent_resume(mgr);
isc__nm_enqueue_ievent(worker,
(isc__netievent_t *)event);
} else {
isc__nm_async_resume(worker, NULL);
}
} }
LOCK(&mgr->lock); LOCK(&mgr->lock);
while (mgr->workers_paused != 0) { while (mgr->workers_paused != 0) {
WAIT(&mgr->wkstatecond, &mgr->lock); WAIT(&mgr->wkstatecond, &mgr->lock);
} }
REQUIRE(atomic_compare_exchange_strong(&mgr->paused, &(bool){ true },
false));
BROADCAST(&mgr->wkpausecond);
UNLOCK(&mgr->lock); UNLOCK(&mgr->lock);
isc__nm_drop_interlocked(mgr); isc__nm_drop_interlocked(mgr);
} }
@ -465,7 +497,6 @@ void
isc_nm_destroy(isc_nm_t **mgr0) { isc_nm_destroy(isc_nm_t **mgr0) {
isc_nm_t *mgr = NULL; isc_nm_t *mgr = NULL;
int counter = 0; int counter = 0;
uint_fast32_t references;
REQUIRE(mgr0 != NULL); REQUIRE(mgr0 != NULL);
REQUIRE(VALID_NM(*mgr0)); REQUIRE(VALID_NM(*mgr0));
@ -480,9 +511,7 @@ isc_nm_destroy(isc_nm_t **mgr0) {
/* /*
* Wait for the manager to be dereferenced elsewhere. * Wait for the manager to be dereferenced elsewhere.
*/ */
while ((references = isc_refcount_current(&mgr->references)) > 1 && while (isc_refcount_current(&mgr->references) > 1 && counter++ < 1000) {
counter++ < 1000)
{
#ifdef WIN32 #ifdef WIN32
_sleep(10); _sleep(10);
#else /* ifdef WIN32 */ #else /* ifdef WIN32 */
@ -491,10 +520,23 @@ isc_nm_destroy(isc_nm_t **mgr0) {
} }
#ifdef NETMGR_TRACE #ifdef NETMGR_TRACE
isc__nm_dump_active(mgr); if (isc_refcount_current(&mgr->references) > 1) {
isc__nm_dump_active(mgr);
INSIST(0);
ISC_UNREACHABLE();
}
#endif #endif
INSIST(references == 1); /*
* Now just patiently wait
*/
while (isc_refcount_current(&mgr->references) > 1) {
#ifdef WIN32
_sleep(10);
#else /* ifdef WIN32 */
usleep(10000);
#endif /* ifdef WIN32 */
}
/* /*
* Detach final reference. * Detach final reference.
@ -545,7 +587,30 @@ isc_nm_gettimeouts(isc_nm_t *mgr, uint32_t *initial, uint32_t *idle,
/* /*
* nm_thread is a single worker thread, that runs uv_run event loop * nm_thread is a single worker thread, that runs uv_run event loop
* until asked to stop. * until asked to stop.
*
* There are four queues for asynchronous events:
*
* 1. priority queue - netievents on the priority queue are run even when
* the taskmgr enters exclusive mode and the netmgr is paused. This
* is needed to properly start listening on the interfaces, free
* resources on shutdown, or resume from a pause.
*
* 2. privileged task queue - only privileged tasks are queued here and
* this is the first queue that gets processed when network manager
* is unpaused using isc_nm_resume(). All netmgr workers need to
* clean the privileged task queue before they all proceed to normal
* operation. Both task queues are processed when the workers are
* shutting down.
*
* 3. task queue - only (traditional) tasks are scheduled here, and this
* queue and the privileged task queue are both processed when the
* netmgr workers are finishing. This is needed to process the task
* shutdown events.
*
* 4. normal queue - this is the queue with netmgr events, e.g. reading,
* sending, callbacks, etc.
*/ */
static isc_threadresult_t static isc_threadresult_t
nm_thread(isc_threadarg_t worker0) { nm_thread(isc_threadarg_t worker0) {
isc__networker_t *worker = (isc__networker_t *)worker0; isc__networker_t *worker = (isc__networker_t *)worker0;
@ -555,18 +620,26 @@ nm_thread(isc_threadarg_t worker0) {
isc_thread_setaffinity(isc__nm_tid_v); isc_thread_setaffinity(isc__nm_tid_v);
while (true) { while (true) {
/*
* uv_run() runs async_cb() in a loop, which processes
* all four event queues until a "pause" or "stop" event
* is encountered. On pause, we process only priority and
* privileged events until resuming.
*/
int r = uv_run(&worker->loop, UV_RUN_DEFAULT); int r = uv_run(&worker->loop, UV_RUN_DEFAULT);
/* There's always the async handle until we are done */
INSIST(r > 0 || worker->finished); INSIST(r > 0 || worker->finished);
if (worker->paused) { if (worker->paused) {
LOCK(&worker->lock); INSIST(atomic_load(&mgr->interlocked) != isc_nm_tid());
/* We need to lock the worker first otherwise
* isc_nm_resume() might slip in before WAIT() in the
* while loop starts and the signal never gets delivered
* and we are forever stuck in the paused loop.
*/
/*
* We need to lock the worker first; otherwise
* isc_nm_resume() might slip in before WAIT() in
* the while loop starts, then the signal never
* gets delivered and we are stuck forever in the
* paused loop.
*/
LOCK(&worker->lock);
LOCK(&mgr->lock); LOCK(&mgr->lock);
mgr->workers_paused++; mgr->workers_paused++;
SIGNAL(&mgr->wkstatecond); SIGNAL(&mgr->wkstatecond);
@ -574,15 +647,28 @@ nm_thread(isc_threadarg_t worker0) {
while (worker->paused) { while (worker->paused) {
WAIT(&worker->cond, &worker->lock); WAIT(&worker->cond, &worker->lock);
UNLOCK(&worker->lock);
(void)process_priority_queue(worker); (void)process_priority_queue(worker);
LOCK(&worker->lock);
} }
LOCK(&mgr->lock); LOCK(&mgr->lock);
mgr->workers_paused--; mgr->workers_paused--;
SIGNAL(&mgr->wkstatecond); SIGNAL(&mgr->wkstatecond);
UNLOCK(&mgr->lock); UNLOCK(&mgr->lock);
UNLOCK(&worker->lock); UNLOCK(&worker->lock);
/*
* All workers must run the privileged event
* queue before we resume from pause.
*/
process_privilege_queue(worker);
LOCK(&mgr->lock);
while (atomic_load(&mgr->paused)) {
WAIT(&mgr->wkpausecond, &mgr->lock);
}
UNLOCK(&mgr->lock);
} }
if (r == 0) { if (r == 0) {
@ -593,11 +679,24 @@ nm_thread(isc_threadarg_t worker0) {
INSIST(!worker->finished); INSIST(!worker->finished);
/* /*
* Empty the async queue. * We've fully resumed from pause. Drain the normal
* asynchronous event queues before resuming the uv_run()
* loop. (This is not strictly necessary, it just ensures
* that all pending events are processed before another
* pause can slip in.)
*/ */
process_queues(worker); process_tasks_queue(worker);
process_normal_queue(worker);
} }
/*
* We are shutting down. Process the task queues
* (they may include shutdown events) but do not process
* the netmgr event queue.
*/
process_privilege_queue(worker);
process_tasks_queue(worker);
LOCK(&mgr->lock); LOCK(&mgr->lock);
mgr->workers_running--; mgr->workers_running--;
SIGNAL(&mgr->wkstatecond); SIGNAL(&mgr->wkstatecond);
@ -607,15 +706,26 @@ nm_thread(isc_threadarg_t worker0) {
} }
/* /*
* async_cb is a universal callback for 'async' events sent to event loop. * async_cb() is a universal callback for 'async' events sent to event loop.
* It's the only way to safely pass data to the libuv event loop. We use a * It's the only way to safely pass data to the libuv event loop. We use a
* single async event and a lockless queue of 'isc__netievent_t' structures * single async event and a set of lockless queues of 'isc__netievent_t'
* passed from other threads. * structures passed from other threads.
*/ */
static void static void
async_cb(uv_async_t *handle) { async_cb(uv_async_t *handle) {
isc__networker_t *worker = (isc__networker_t *)handle->loop->data; isc__networker_t *worker = (isc__networker_t *)handle->loop->data;
process_queues(worker);
/*
* process_priority_queue() returns false when pausing or stopping,
* so we don't want to process the other queues in that case.
*/
if (!process_priority_queue(worker)) {
return;
}
process_privilege_queue(worker);
process_tasks_queue(worker);
process_normal_queue(worker);
} }
static void static void
@ -624,13 +734,13 @@ isc__nm_async_stop(isc__networker_t *worker, isc__netievent_t *ev0) {
worker->finished = true; worker->finished = true;
/* Close the async handler */ /* Close the async handler */
uv_close((uv_handle_t *)&worker->async, NULL); uv_close((uv_handle_t *)&worker->async, NULL);
/* uv_stop(&worker->loop); */
} }
static void static void
isc__nm_async_pause(isc__networker_t *worker, isc__netievent_t *ev0) { isc__nm_async_pause(isc__networker_t *worker, isc__netievent_t *ev0) {
UNUSED(ev0); UNUSED(ev0);
REQUIRE(worker->paused == false); REQUIRE(worker->paused == false);
worker->paused = true; worker->paused = true;
uv_stop(&worker->loop); uv_stop(&worker->loop);
} }
@ -639,25 +749,78 @@ static void
isc__nm_async_resume(isc__networker_t *worker, isc__netievent_t *ev0) { isc__nm_async_resume(isc__networker_t *worker, isc__netievent_t *ev0) {
UNUSED(ev0); UNUSED(ev0);
REQUIRE(worker->paused == true); REQUIRE(worker->paused == true);
worker->paused = false; worker->paused = false;
} }
void
isc_nm_task_enqueue(isc_nm_t *nm, isc_task_t *task, int threadid) {
isc__netievent_t *event = NULL;
int tid;
isc__networker_t *worker = NULL;
if (threadid == -1) {
tid = (int)isc_random_uniform(nm->nworkers);
} else {
tid = threadid % nm->nworkers;
}
worker = &nm->workers[tid];
if (isc_task_privilege(task)) {
event = (isc__netievent_t *)
isc__nm_get_netievent_privilegedtask(nm, task);
} else {
event = (isc__netievent_t *)isc__nm_get_netievent_task(nm,
task);
}
isc__nm_enqueue_ievent(worker, event);
}
#define isc__nm_async_privilegedtask(worker, ev0) \
isc__nm_async_task(worker, ev0)
static void
isc__nm_async_task(isc__networker_t *worker, isc__netievent_t *ev0) {
isc__netievent_task_t *ievent = (isc__netievent_task_t *)ev0;
isc_result_t result;
UNUSED(worker);
result = isc_task_run(ievent->task);
switch (result) {
case ISC_R_QUOTA:
isc_nm_task_enqueue(worker->mgr, (isc_task_t *)ievent->task,
isc_nm_tid());
return;
case ISC_R_SUCCESS:
return;
default:
INSIST(0);
ISC_UNREACHABLE();
}
}
static bool static bool
process_priority_queue(isc__networker_t *worker) { process_priority_queue(isc__networker_t *worker) {
return (process_queue(worker, worker->ievents_prio)); return (process_queue(worker, worker->ievents_prio));
} }
static bool static void
process_normal_queue(isc__networker_t *worker) { process_privilege_queue(isc__networker_t *worker) {
return (process_queue(worker, worker->ievents)); (void)process_queue(worker, worker->ievents_priv);
} }
static void static void
process_queues(isc__networker_t *worker) { process_tasks_queue(isc__networker_t *worker) {
if (!process_priority_queue(worker)) { (void)process_queue(worker, worker->ievents_task);
return; }
}
(void)process_normal_queue(worker); static void
process_normal_queue(isc__networker_t *worker) {
(void)process_queue(worker, worker->ievents);
} }
/* /*
@ -690,6 +853,9 @@ process_netievent(isc__networker_t *worker, isc__netievent_t *ievent) {
/* Don't process more ievents when we are stopping */ /* Don't process more ievents when we are stopping */
NETIEVENT_CASE_NOMORE(stop); NETIEVENT_CASE_NOMORE(stop);
NETIEVENT_CASE(privilegedtask);
NETIEVENT_CASE(task);
NETIEVENT_CASE(udpconnect); NETIEVENT_CASE(udpconnect);
NETIEVENT_CASE(udplisten); NETIEVENT_CASE(udplisten);
NETIEVENT_CASE(udpstop); NETIEVENT_CASE(udpstop);
@ -749,7 +915,6 @@ process_netievent(isc__networker_t *worker, isc__netievent_t *ievent) {
NETIEVENT_CASE(shutdown); NETIEVENT_CASE(shutdown);
NETIEVENT_CASE(resume); NETIEVENT_CASE(resume);
NETIEVENT_CASE_NOMORE(pause); NETIEVENT_CASE_NOMORE(pause);
default: default:
INSIST(0); INSIST(0);
ISC_UNREACHABLE(); ISC_UNREACHABLE();
@ -843,6 +1008,9 @@ NETIEVENT_DEF(resume);
NETIEVENT_DEF(shutdown); NETIEVENT_DEF(shutdown);
NETIEVENT_DEF(stop); NETIEVENT_DEF(stop);
NETIEVENT_TASK_DEF(task);
NETIEVENT_TASK_DEF(privilegedtask);
void void
isc__nm_maybe_enqueue_ievent(isc__networker_t *worker, isc__nm_maybe_enqueue_ievent(isc__networker_t *worker,
isc__netievent_t *event) { isc__netievent_t *event) {
@ -869,6 +1037,10 @@ isc__nm_enqueue_ievent(isc__networker_t *worker, isc__netievent_t *event) {
isc_queue_enqueue(worker->ievents_prio, (uintptr_t)event); isc_queue_enqueue(worker->ievents_prio, (uintptr_t)event);
SIGNAL(&worker->cond); SIGNAL(&worker->cond);
UNLOCK(&worker->lock); UNLOCK(&worker->lock);
} else if (event->type == netievent_privilegedtask) {
isc_queue_enqueue(worker->ievents_priv, (uintptr_t)event);
} else if (event->type == netievent_task) {
isc_queue_enqueue(worker->ievents_task, (uintptr_t)event);
} else { } else {
isc_queue_enqueue(worker->ievents, (uintptr_t)event); isc_queue_enqueue(worker->ievents, (uintptr_t)event);
} }
@ -2537,8 +2709,9 @@ isc__nm_async_shutdown(isc__networker_t *worker, isc__netievent_t *ev0) {
bool bool
isc__nm_acquire_interlocked(isc_nm_t *mgr) { isc__nm_acquire_interlocked(isc_nm_t *mgr) {
LOCK(&mgr->lock); LOCK(&mgr->lock);
bool success = atomic_compare_exchange_strong(&mgr->interlocked, bool success = atomic_compare_exchange_strong(
&(bool){ false }, true); &mgr->interlocked, &(int){ ISC_NETMGR_NON_INTERLOCKED },
isc_nm_tid());
UNLOCK(&mgr->lock); UNLOCK(&mgr->lock);
return (success); return (success);
} }
@ -2546,9 +2719,9 @@ isc__nm_acquire_interlocked(isc_nm_t *mgr) {
void void
isc__nm_drop_interlocked(isc_nm_t *mgr) { isc__nm_drop_interlocked(isc_nm_t *mgr) {
LOCK(&mgr->lock); LOCK(&mgr->lock);
bool success = atomic_compare_exchange_strong(&mgr->interlocked, int tid = atomic_exchange(&mgr->interlocked,
&(bool){ true }, false); ISC_NETMGR_NON_INTERLOCKED);
INSIST(success); INSIST(tid != ISC_NETMGR_NON_INTERLOCKED);
BROADCAST(&mgr->wkstatecond); BROADCAST(&mgr->wkstatecond);
UNLOCK(&mgr->lock); UNLOCK(&mgr->lock);
} }
@ -2556,8 +2729,9 @@ isc__nm_drop_interlocked(isc_nm_t *mgr) {
void void
isc__nm_acquire_interlocked_force(isc_nm_t *mgr) { isc__nm_acquire_interlocked_force(isc_nm_t *mgr) {
LOCK(&mgr->lock); LOCK(&mgr->lock);
while (!atomic_compare_exchange_strong(&mgr->interlocked, while (!atomic_compare_exchange_strong(
&(bool){ false }, true)) &mgr->interlocked, &(int){ ISC_NETMGR_NON_INTERLOCKED },
isc_nm_tid()))
{ {
WAIT(&mgr->wkstatecond, &mgr->lock); WAIT(&mgr->wkstatecond, &mgr->lock);
} }

View File

@ -433,8 +433,8 @@ isc_nm_listentcp(isc_nm_t *mgr, isc_nmiface_t *iface,
REQUIRE(csock->fd >= 0); REQUIRE(csock->fd >= 0);
ievent = isc__nm_get_netievent_tcplisten(mgr, csock); ievent = isc__nm_get_netievent_tcplisten(mgr, csock);
isc__nm_enqueue_ievent(&mgr->workers[i], isc__nm_maybe_enqueue_ievent(&mgr->workers[i],
(isc__netievent_t *)ievent); (isc__netievent_t *)ievent);
} }
#if !HAVE_SO_REUSEPORT_LB && !defined(WIN32) #if !HAVE_SO_REUSEPORT_LB && !defined(WIN32)
@ -651,15 +651,7 @@ isc__nm_async_tcpstop(isc__networker_t *worker, isc__netievent_t *ev0) {
return; return;
} }
/* stop_tcp_parent(sock);
* If network manager is interlocked, re-enqueue the event for later.
*/
if (!isc__nm_acquire_interlocked(sock->mgr)) {
enqueue_stoplistening(sock);
} else {
stop_tcp_parent(sock);
isc__nm_drop_interlocked(sock->mgr);
}
} }
static void static void
@ -1200,6 +1192,8 @@ timer_close_cb(uv_handle_t *handle) {
static void static void
stop_tcp_child(isc_nmsocket_t *sock) { stop_tcp_child(isc_nmsocket_t *sock) {
bool last_child = false;
REQUIRE(sock->type == isc_nm_tcpsocket); REQUIRE(sock->type == isc_nm_tcpsocket);
REQUIRE(sock->tid == isc_nm_tid()); REQUIRE(sock->tid == isc_nm_tid());
@ -1212,8 +1206,13 @@ stop_tcp_child(isc_nmsocket_t *sock) {
LOCK(&sock->parent->lock); LOCK(&sock->parent->lock);
sock->parent->rchildren -= 1; sock->parent->rchildren -= 1;
last_child = (sock->parent->rchildren == 0);
UNLOCK(&sock->parent->lock); UNLOCK(&sock->parent->lock);
BROADCAST(&sock->parent->cond);
if (last_child) {
atomic_store(&sock->parent->closed, true);
isc__nmsocket_prep_destroy(sock->parent);
}
} }
static void static void
@ -1228,24 +1227,10 @@ stop_tcp_parent(isc_nmsocket_t *sock) {
atomic_store(&csock->active, false); atomic_store(&csock->active, false);
if (csock->tid == isc_nm_tid()) {
stop_tcp_child(csock);
continue;
}
ievent = isc__nm_get_netievent_tcpstop(sock->mgr, csock); ievent = isc__nm_get_netievent_tcpstop(sock->mgr, csock);
isc__nm_enqueue_ievent(&sock->mgr->workers[csock->tid], isc__nm_enqueue_ievent(&sock->mgr->workers[csock->tid],
(isc__netievent_t *)ievent); (isc__netievent_t *)ievent);
} }
LOCK(&sock->lock);
while (sock->rchildren > 0) {
WAIT(&sock->cond, &sock->lock);
}
atomic_store(&sock->closed, true);
UNLOCK(&sock->lock);
isc__nmsocket_prep_destroy(sock);
} }
static void static void

View File

@ -407,8 +407,8 @@ isc_nm_listentcpdns(isc_nm_t *mgr, isc_nmiface_t *iface,
REQUIRE(csock->fd >= 0); REQUIRE(csock->fd >= 0);
ievent = isc__nm_get_netievent_tcpdnslisten(mgr, csock); ievent = isc__nm_get_netievent_tcpdnslisten(mgr, csock);
isc__nm_enqueue_ievent(&mgr->workers[i], isc__nm_maybe_enqueue_ievent(&mgr->workers[i],
(isc__netievent_t *)ievent); (isc__netievent_t *)ievent);
} }
#if !HAVE_SO_REUSEPORT_LB && !defined(WIN32) #if !HAVE_SO_REUSEPORT_LB && !defined(WIN32)
@ -626,15 +626,7 @@ isc__nm_async_tcpdnsstop(isc__networker_t *worker, isc__netievent_t *ev0) {
return; return;
} }
/* stop_tcpdns_parent(sock);
* If network manager is interlocked, re-enqueue the event for later.
*/
if (!isc__nm_acquire_interlocked(sock->mgr)) {
enqueue_stoplistening(sock);
} else {
stop_tcpdns_parent(sock);
isc__nm_drop_interlocked(sock->mgr);
}
} }
void void
@ -1230,6 +1222,8 @@ timer_close_cb(uv_handle_t *timer) {
static void static void
stop_tcpdns_child(isc_nmsocket_t *sock) { stop_tcpdns_child(isc_nmsocket_t *sock) {
bool last_child = false;
REQUIRE(sock->type == isc_nm_tcpdnssocket); REQUIRE(sock->type == isc_nm_tcpdnssocket);
REQUIRE(sock->tid == isc_nm_tid()); REQUIRE(sock->tid == isc_nm_tid());
@ -1242,8 +1236,13 @@ stop_tcpdns_child(isc_nmsocket_t *sock) {
LOCK(&sock->parent->lock); LOCK(&sock->parent->lock);
sock->parent->rchildren -= 1; sock->parent->rchildren -= 1;
last_child = (sock->parent->rchildren == 0);
UNLOCK(&sock->parent->lock); UNLOCK(&sock->parent->lock);
BROADCAST(&sock->parent->cond);
if (last_child) {
atomic_store(&sock->parent->closed, true);
isc__nmsocket_prep_destroy(sock->parent);
}
} }
static void static void
@ -1258,24 +1257,10 @@ stop_tcpdns_parent(isc_nmsocket_t *sock) {
atomic_store(&csock->active, false); atomic_store(&csock->active, false);
if (csock->tid == isc_nm_tid()) {
stop_tcpdns_child(csock);
continue;
}
ievent = isc__nm_get_netievent_tcpdnsstop(sock->mgr, csock); ievent = isc__nm_get_netievent_tcpdnsstop(sock->mgr, csock);
isc__nm_enqueue_ievent(&sock->mgr->workers[csock->tid], isc__nm_enqueue_ievent(&sock->mgr->workers[csock->tid],
(isc__netievent_t *)ievent); (isc__netievent_t *)ievent);
} }
LOCK(&sock->lock);
while (sock->rchildren > 0) {
WAIT(&sock->cond, &sock->lock);
}
atomic_store(&sock->closed, true);
UNLOCK(&sock->lock);
isc__nmsocket_prep_destroy(sock);
} }
static void static void

View File

@ -475,8 +475,8 @@ isc_nm_listentlsdns(isc_nm_t *mgr, isc_nmiface_t *iface,
REQUIRE(csock->fd >= 0); REQUIRE(csock->fd >= 0);
ievent = isc__nm_get_netievent_tlsdnslisten(mgr, csock); ievent = isc__nm_get_netievent_tlsdnslisten(mgr, csock);
isc__nm_enqueue_ievent(&mgr->workers[i], isc__nm_maybe_enqueue_ievent(&mgr->workers[i],
(isc__netievent_t *)ievent); (isc__netievent_t *)ievent);
} }
#if !HAVE_SO_REUSEPORT_LB && !defined(WIN32) #if !HAVE_SO_REUSEPORT_LB && !defined(WIN32)
@ -770,16 +770,7 @@ isc__nm_async_tlsdnsstop(isc__networker_t *worker, isc__netievent_t *ev0) {
return; return;
} }
/* stop_tlsdns_parent(sock);
* If network manager is interlocked, re-enqueue the event for
* later.
*/
if (!isc__nm_acquire_interlocked(sock->mgr)) {
enqueue_stoplistening(sock);
} else {
stop_tlsdns_parent(sock);
isc__nm_drop_interlocked(sock->mgr);
}
} }
void void
@ -1777,6 +1768,8 @@ timer_close_cb(uv_handle_t *handle) {
static void static void
stop_tlsdns_child(isc_nmsocket_t *sock) { stop_tlsdns_child(isc_nmsocket_t *sock) {
bool last_child = false;
REQUIRE(sock->type == isc_nm_tlsdnssocket); REQUIRE(sock->type == isc_nm_tlsdnssocket);
REQUIRE(sock->tid == isc_nm_tid()); REQUIRE(sock->tid == isc_nm_tid());
@ -1789,8 +1782,13 @@ stop_tlsdns_child(isc_nmsocket_t *sock) {
LOCK(&sock->parent->lock); LOCK(&sock->parent->lock);
sock->parent->rchildren -= 1; sock->parent->rchildren -= 1;
last_child = (sock->parent->rchildren == 0);
UNLOCK(&sock->parent->lock); UNLOCK(&sock->parent->lock);
BROADCAST(&sock->parent->cond);
if (last_child) {
atomic_store(&sock->parent->closed, true);
isc__nmsocket_prep_destroy(sock->parent);
}
} }
static void static void
@ -1806,24 +1804,10 @@ stop_tlsdns_parent(isc_nmsocket_t *sock) {
atomic_store(&csock->active, false); atomic_store(&csock->active, false);
if (csock->tid == isc_nm_tid()) {
stop_tlsdns_child(csock);
continue;
}
ievent = isc__nm_get_netievent_tlsdnsstop(sock->mgr, csock); ievent = isc__nm_get_netievent_tlsdnsstop(sock->mgr, csock);
isc__nm_enqueue_ievent(&sock->mgr->workers[csock->tid], isc__nm_enqueue_ievent(&sock->mgr->workers[csock->tid],
(isc__netievent_t *)ievent); (isc__netievent_t *)ievent);
} }
LOCK(&sock->lock);
while (sock->rchildren > 0) {
WAIT(&sock->cond, &sock->lock);
}
atomic_store(&sock->closed, true);
UNLOCK(&sock->lock);
isc__nmsocket_prep_destroy(sock);
} }
static void static void

View File

@ -140,8 +140,8 @@ isc_nm_listenudp(isc_nm_t *mgr, isc_nmiface_t *iface, isc_nm_recv_cb_t cb,
REQUIRE(csock->fd >= 0); REQUIRE(csock->fd >= 0);
ievent = isc__nm_get_netievent_udplisten(mgr, csock); ievent = isc__nm_get_netievent_udplisten(mgr, csock);
isc__nm_enqueue_ievent(&mgr->workers[i], isc__nm_maybe_enqueue_ievent(&mgr->workers[i],
(isc__netievent_t *)ievent); (isc__netievent_t *)ievent);
} }
#if !HAVE_SO_REUSEPORT_LB && !defined(WIN32) #if !HAVE_SO_REUSEPORT_LB && !defined(WIN32)
@ -324,12 +324,7 @@ isc__nm_async_udpstop(isc__networker_t *worker, isc__netievent_t *ev0) {
/* /*
* If network manager is paused, re-enqueue the event for later. * If network manager is paused, re-enqueue the event for later.
*/ */
if (!isc__nm_acquire_interlocked(sock->mgr)) { stop_udp_parent(sock);
enqueue_stoplistening(sock);
} else {
stop_udp_parent(sock);
isc__nm_drop_interlocked(sock->mgr);
}
} }
/* /*
@ -435,20 +430,13 @@ void
isc__nm_udp_send(isc_nmhandle_t *handle, const isc_region_t *region, isc__nm_udp_send(isc_nmhandle_t *handle, const isc_region_t *region,
isc_nm_cb_t cb, void *cbarg) { isc_nm_cb_t cb, void *cbarg) {
isc_nmsocket_t *sock = handle->sock; isc_nmsocket_t *sock = handle->sock;
isc_nmsocket_t *psock = NULL, *rsock = sock; isc_nmsocket_t *rsock = NULL;
isc_sockaddr_t *peer = &handle->peer; isc_sockaddr_t *peer = &handle->peer;
isc__nm_uvreq_t *uvreq = NULL; isc__nm_uvreq_t *uvreq = NULL;
uint32_t maxudp = atomic_load(&sock->mgr->maxudp); uint32_t maxudp = atomic_load(&sock->mgr->maxudp);
int ntid; int ntid;
uvreq = isc__nm_uvreq_get(sock->mgr, sock); INSIST(sock->type == isc_nm_udpsocket);
uvreq->uvbuf.base = (char *)region->base;
uvreq->uvbuf.len = region->length;
isc_nmhandle_attach(handle, &uvreq->handle);
uvreq->cb.send = cb;
uvreq->cbarg = cbarg;
/* /*
* We're simulating a firewall blocking UDP packets bigger than * We're simulating a firewall blocking UDP packets bigger than
@ -459,41 +447,45 @@ isc__nm_udp_send(isc_nmhandle_t *handle, const isc_region_t *region,
* we need to do so here. * we need to do so here.
*/ */
if (maxudp != 0 && region->length > maxudp) { if (maxudp != 0 && region->length > maxudp) {
isc__nm_uvreq_put(&uvreq, sock); isc_nmhandle_detach(&handle);
isc_nmhandle_detach(&handle); /* FIXME? */
return; return;
} }
if (sock->type == isc_nm_udpsocket && !atomic_load(&sock->client)) { if (atomic_load(&sock->client)) {
INSIST(sock->parent != NULL); /*
psock = sock->parent; * When we are sending from the client socket, we directly use
} else if (sock->type == isc_nm_udplistener) { * the socket provided.
psock = sock; */
} else if (!atomic_load(&sock->client)) { rsock = sock;
INSIST(0); goto send;
ISC_UNREACHABLE();
}
/*
* If we're in the network thread, we can send directly. If the
* handle is associated with a UDP socket, we can reuse its
* thread (assuming CPU affinity). Otherwise, pick a thread at
* random.
*/
if (isc__nm_in_netthread()) {
ntid = isc_nm_tid();
} else if (sock->type == isc_nm_udpsocket &&
!atomic_load(&sock->client)) {
ntid = sock->tid;
} else { } else {
ntid = (int)isc_random_uniform(sock->nchildren); /*
* When we are sending from the server socket, we either use the
* socket associated with the network thread we are in, or we
* use the thread from the socket associated with the handle.
*/
INSIST(sock->parent != NULL);
if (isc__nm_in_netthread()) {
ntid = isc_nm_tid();
} else {
ntid = sock->tid;
}
rsock = &sock->parent->children[ntid];
} }
if (psock != NULL) { send:
rsock = &psock->children[ntid]; uvreq = isc__nm_uvreq_get(rsock->mgr, rsock);
} uvreq->uvbuf.base = (char *)region->base;
uvreq->uvbuf.len = region->length;
isc_nmhandle_attach(handle, &uvreq->handle);
uvreq->cb.send = cb;
uvreq->cbarg = cbarg;
if (isc_nm_tid() == rsock->tid) { if (isc_nm_tid() == rsock->tid) {
REQUIRE(rsock->tid == isc_nm_tid());
isc__netievent_udpsend_t ievent = { .sock = rsock, isc__netievent_udpsend_t ievent = { .sock = rsock,
.req = uvreq, .req = uvreq,
.peer = *peer }; .peer = *peer };
@ -544,6 +536,7 @@ udp_send_cb(uv_udp_send_t *req, int status) {
REQUIRE(VALID_UVREQ(uvreq)); REQUIRE(VALID_UVREQ(uvreq));
REQUIRE(VALID_NMHANDLE(uvreq->handle)); REQUIRE(VALID_NMHANDLE(uvreq->handle));
REQUIRE(sock->tid == isc_nm_tid());
if (status < 0) { if (status < 0) {
result = isc__nm_uverr2result(status); result = isc__nm_uverr2result(status);
@ -976,6 +969,8 @@ stop_udp_child(isc_nmsocket_t *sock) {
REQUIRE(sock->type == isc_nm_udpsocket); REQUIRE(sock->type == isc_nm_udpsocket);
REQUIRE(sock->tid == isc_nm_tid()); REQUIRE(sock->tid == isc_nm_tid());
bool last_child = false;
if (!atomic_compare_exchange_strong(&sock->closing, &(bool){ false }, if (!atomic_compare_exchange_strong(&sock->closing, &(bool){ false },
true)) { true)) {
return; return;
@ -985,8 +980,13 @@ stop_udp_child(isc_nmsocket_t *sock) {
LOCK(&sock->parent->lock); LOCK(&sock->parent->lock);
sock->parent->rchildren -= 1; sock->parent->rchildren -= 1;
last_child = (sock->parent->rchildren == 0);
UNLOCK(&sock->parent->lock); UNLOCK(&sock->parent->lock);
BROADCAST(&sock->parent->cond);
if (last_child) {
atomic_store(&sock->parent->closed, true);
isc__nmsocket_prep_destroy(sock->parent);
}
} }
static void static void
@ -1001,24 +1001,10 @@ stop_udp_parent(isc_nmsocket_t *sock) {
atomic_store(&csock->active, false); atomic_store(&csock->active, false);
if (csock->tid == isc_nm_tid()) {
stop_udp_child(csock);
continue;
}
ievent = isc__nm_get_netievent_udpstop(sock->mgr, csock); ievent = isc__nm_get_netievent_udpstop(sock->mgr, csock);
isc__nm_enqueue_ievent(&sock->mgr->workers[i], isc__nm_enqueue_ievent(&sock->mgr->workers[i],
(isc__netievent_t *)ievent); (isc__netievent_t *)ievent);
} }
LOCK(&sock->lock);
while (sock->rchildren > 0) {
WAIT(&sock->cond, &sock->lock);
}
atomic_store(&sock->closed, true);
UNLOCK(&sock->lock);
isc__nmsocket_prep_destroy(sock);
} }
static void static void

File diff suppressed because it is too large Load Diff

View File

@ -1,26 +0,0 @@
/*
* Copyright (C) Internet Systems Consortium, Inc. ("ISC")
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, you can obtain one at https://mozilla.org/MPL/2.0/.
*
* See the COPYRIGHT file distributed with this work for additional
* information regarding copyright ownership.
*/
#ifndef ISC_TASK_P_H
#define ISC_TASK_P_H
/*! \file */
/*%
* These functions allow unit tests to manipulate the processing
* of the task queue. They are not intended as part of the public API.
*/
void
isc__taskmgr_pause(isc_taskmgr_t *taskmgr);
void
isc__taskmgr_resume(isc_taskmgr_t *taskmgr);
#endif /* ISC_TASK_P_H */

View File

@ -57,10 +57,9 @@ alloc_pool(isc_taskmgr_t *tmgr, isc_mem_t *mctx, unsigned int ntasks,
isc_result_t isc_result_t
isc_taskpool_create(isc_taskmgr_t *tmgr, isc_mem_t *mctx, unsigned int ntasks, isc_taskpool_create(isc_taskmgr_t *tmgr, isc_mem_t *mctx, unsigned int ntasks,
unsigned int quantum, isc_taskpool_t **poolp) { unsigned int quantum, bool priv, isc_taskpool_t **poolp) {
unsigned int i; unsigned int i;
isc_taskpool_t *pool = NULL; isc_taskpool_t *pool = NULL;
isc_result_t result;
INSIST(ntasks > 0); INSIST(ntasks > 0);
@ -69,11 +68,13 @@ isc_taskpool_create(isc_taskmgr_t *tmgr, isc_mem_t *mctx, unsigned int ntasks,
/* Create the tasks */ /* Create the tasks */
for (i = 0; i < ntasks; i++) { for (i = 0; i < ntasks; i++) {
result = isc_task_create(tmgr, quantum, &pool->tasks[i]); isc_result_t result = isc_task_create_bound(tmgr, quantum,
&pool->tasks[i], i);
if (result != ISC_R_SUCCESS) { if (result != ISC_R_SUCCESS) {
isc_taskpool_destroy(&pool); isc_taskpool_destroy(&pool);
return (result); return (result);
} }
isc_task_setprivilege(pool->tasks[i], priv);
isc_task_setname(pool->tasks[i], "taskpool", NULL); isc_task_setname(pool->tasks[i], "taskpool", NULL);
} }
@ -93,9 +94,8 @@ isc_taskpool_size(isc_taskpool_t *pool) {
} }
isc_result_t isc_result_t
isc_taskpool_expand(isc_taskpool_t **sourcep, unsigned int size, isc_taskpool_expand(isc_taskpool_t **sourcep, unsigned int size, bool priv,
isc_taskpool_t **targetp) { isc_taskpool_t **targetp) {
isc_result_t result;
isc_taskpool_t *pool; isc_taskpool_t *pool;
REQUIRE(sourcep != NULL && *sourcep != NULL); REQUIRE(sourcep != NULL && *sourcep != NULL);
@ -119,13 +119,15 @@ isc_taskpool_expand(isc_taskpool_t **sourcep, unsigned int size,
/* Create new tasks */ /* Create new tasks */
for (i = pool->ntasks; i < size; i++) { for (i = pool->ntasks; i < size; i++) {
result = isc_task_create(pool->tmgr, pool->quantum, isc_result_t result =
&newpool->tasks[i]); isc_task_create_bound(pool->tmgr, pool->quantum,
&newpool->tasks[i], i);
if (result != ISC_R_SUCCESS) { if (result != ISC_R_SUCCESS) {
*sourcep = pool; *sourcep = pool;
isc_taskpool_destroy(&newpool); isc_taskpool_destroy(&newpool);
return (result); return (result);
} }
isc_task_setprivilege(newpool->tasks[i], priv);
isc_task_setname(newpool->tasks[i], "taskpool", NULL); isc_task_setname(newpool->tasks[i], "taskpool", NULL);
} }
@ -151,16 +153,3 @@ isc_taskpool_destroy(isc_taskpool_t **poolp) {
pool->ntasks * sizeof(isc_task_t *)); pool->ntasks * sizeof(isc_task_t *));
isc_mem_putanddetach(&pool->mctx, pool, sizeof(*pool)); isc_mem_putanddetach(&pool->mctx, pool, sizeof(*pool));
} }
void
isc_taskpool_setprivilege(isc_taskpool_t *pool, bool priv) {
unsigned int i;
REQUIRE(pool != NULL);
for (i = 0; i < pool->ntasks; i++) {
if (pool->tasks[i] != NULL) {
isc_task_setprivilege(pool->tasks[i], priv);
}
}
}

View File

@ -64,12 +64,12 @@ cleanup_managers(void) {
if (taskmgr != NULL) { if (taskmgr != NULL) {
isc_taskmgr_destroy(&taskmgr); isc_taskmgr_destroy(&taskmgr);
} }
if (netmgr != NULL) {
isc_nm_destroy(&netmgr);
}
if (timermgr != NULL) { if (timermgr != NULL) {
isc_timermgr_destroy(&timermgr); isc_timermgr_destroy(&timermgr);
} }
if (netmgr != NULL) {
isc_nm_detach(&netmgr);
}
} }
static isc_result_t static isc_result_t
@ -89,7 +89,7 @@ create_managers(unsigned int workers) {
isc_hp_init(6 * workers); isc_hp_init(6 * workers);
netmgr = isc_nm_start(test_mctx, workers); netmgr = isc_nm_start(test_mctx, workers);
CHECK(isc_taskmgr_create(test_mctx, workers, 0, netmgr, &taskmgr)); CHECK(isc_taskmgr_create(test_mctx, 0, netmgr, &taskmgr));
CHECK(isc_task_create(taskmgr, 0, &maintask)); CHECK(isc_task_create(taskmgr, 0, &maintask));
isc_taskmgr_setexcltask(taskmgr, maintask); isc_taskmgr_setexcltask(taskmgr, maintask);

View File

@ -37,7 +37,6 @@
#include <isc/timer.h> #include <isc/timer.h>
#include <isc/util.h> #include <isc/util.h>
#include "../task_p.h"
#include "isctest.h" #include "isctest.h"
/* Set to true (or use -v option) for verbose output */ /* Set to true (or use -v option) for verbose output */
@ -120,6 +119,8 @@ set(isc_task_t *task, isc_event_t *event) {
atomic_store(value, atomic_fetch_add(&counter, 1)); atomic_store(value, atomic_fetch_add(&counter, 1));
} }
#include <isc/thread.h>
static void static void
set_and_drop(isc_task_t *task, isc_event_t *event) { set_and_drop(isc_task_t *task, isc_event_t *event) {
atomic_int_fast32_t *value = (atomic_int_fast32_t *)event->ev_arg; atomic_int_fast32_t *value = (atomic_int_fast32_t *)event->ev_arg;
@ -128,8 +129,7 @@ set_and_drop(isc_task_t *task, isc_event_t *event) {
isc_event_free(&event); isc_event_free(&event);
LOCK(&lock); LOCK(&lock);
atomic_store(value, (int)isc_taskmgr_mode(taskmgr)); atomic_store(value, atomic_fetch_add(&counter, 1));
atomic_fetch_add(&counter, 1);
UNLOCK(&lock); UNLOCK(&lock);
} }
@ -204,17 +204,17 @@ privileged_events(void **state) {
UNUSED(state); UNUSED(state);
atomic_init(&counter, 1); atomic_init(&counter, 1);
atomic_init(&a, 0); atomic_init(&a, -1);
atomic_init(&b, 0); atomic_init(&b, -1);
atomic_init(&c, 0); atomic_init(&c, -1);
atomic_init(&d, 0); atomic_init(&d, -1);
atomic_init(&e, 0); atomic_init(&e, -1);
/* /*
* Pause the task manager so we can fill up the work queue * Pause the net/task manager so we can fill up the work
* without things happening while we do it. * queue without things happening while we do it.
*/ */
isc__taskmgr_pause(taskmgr); isc_nm_pause(netmgr);
result = isc_task_create(taskmgr, 0, &task1); result = isc_task_create(taskmgr, 0, &task1);
assert_int_equal(result, ISC_R_SUCCESS); assert_int_equal(result, ISC_R_SUCCESS);
@ -233,7 +233,7 @@ privileged_events(void **state) {
&a, sizeof(isc_event_t)); &a, sizeof(isc_event_t));
assert_non_null(event); assert_non_null(event);
assert_int_equal(atomic_load(&a), 0); assert_int_equal(atomic_load(&a), -1);
isc_task_send(task1, &event); isc_task_send(task1, &event);
/* Second event: not privileged */ /* Second event: not privileged */
@ -241,7 +241,7 @@ privileged_events(void **state) {
&b, sizeof(isc_event_t)); &b, sizeof(isc_event_t));
assert_non_null(event); assert_non_null(event);
assert_int_equal(atomic_load(&b), 0); assert_int_equal(atomic_load(&b), -1);
isc_task_send(task2, &event); isc_task_send(task2, &event);
/* Third event: privileged */ /* Third event: privileged */
@ -249,7 +249,7 @@ privileged_events(void **state) {
&c, sizeof(isc_event_t)); &c, sizeof(isc_event_t));
assert_non_null(event); assert_non_null(event);
assert_int_equal(atomic_load(&c), 0); assert_int_equal(atomic_load(&c), -1);
isc_task_send(task1, &event); isc_task_send(task1, &event);
/* Fourth event: privileged */ /* Fourth event: privileged */
@ -257,7 +257,7 @@ privileged_events(void **state) {
&d, sizeof(isc_event_t)); &d, sizeof(isc_event_t));
assert_non_null(event); assert_non_null(event);
assert_int_equal(atomic_load(&d), 0); assert_int_equal(atomic_load(&d), -1);
isc_task_send(task1, &event); isc_task_send(task1, &event);
/* Fifth event: not privileged */ /* Fifth event: not privileged */
@ -265,19 +265,15 @@ privileged_events(void **state) {
&e, sizeof(isc_event_t)); &e, sizeof(isc_event_t));
assert_non_null(event); assert_non_null(event);
assert_int_equal(atomic_load(&e), 0); assert_int_equal(atomic_load(&e), -1);
isc_task_send(task2, &event); isc_task_send(task2, &event);
assert_int_equal(isc_taskmgr_mode(taskmgr), isc_taskmgrmode_normal); isc_nm_resume(netmgr);
isc_taskmgr_setprivilegedmode(taskmgr);
assert_int_equal(isc_taskmgr_mode(taskmgr), isc_taskmgrmode_privileged);
isc__taskmgr_resume(taskmgr);
/* We're waiting for *all* variables to be set */ /* We're waiting for *all* variables to be set */
while ((atomic_load(&a) == 0 || atomic_load(&b) == 0 || while ((atomic_load(&a) < 0 || atomic_load(&b) < 0 ||
atomic_load(&c) == 0 || atomic_load(&d) == 0 || atomic_load(&c) < 0 || atomic_load(&d) < 0 ||
atomic_load(&e) == 0) && atomic_load(&e) < 0) &&
i++ < 5000) i++ < 5000)
{ {
isc_test_nap(1000); isc_test_nap(1000);
@ -293,16 +289,14 @@ privileged_events(void **state) {
assert_true(atomic_load(&d) <= 3); assert_true(atomic_load(&d) <= 3);
/* ...and the non-privileged tasks that set b and e, last */ /* ...and the non-privileged tasks that set b and e, last */
assert_true(atomic_load(&b) >= 4); assert_true(atomic_load(&b) > 3);
assert_true(atomic_load(&e) >= 4); assert_true(atomic_load(&e) > 3);
assert_int_equal(atomic_load(&counter), 6); assert_int_equal(atomic_load(&counter), 6);
isc_task_setprivilege(task1, false); isc_task_setprivilege(task1, false);
assert_false(isc_task_privilege(task1)); assert_false(isc_task_privilege(task1));
assert_int_equal(isc_taskmgr_mode(taskmgr), isc_taskmgrmode_normal);
isc_task_destroy(&task1); isc_task_destroy(&task1);
assert_null(task1); assert_null(task1);
isc_task_destroy(&task2); isc_task_destroy(&task2);
@ -331,10 +325,10 @@ privilege_drop(void **state) {
atomic_init(&e, -1); atomic_init(&e, -1);
/* /*
* Pause the task manager so we can fill up the work queue * Pause the net/task manager so we can fill up the work queue
* without things happening while we do it. * without things happening while we do it.
*/ */
isc__taskmgr_pause(taskmgr); isc_nm_pause(netmgr);
result = isc_task_create(taskmgr, 0, &task1); result = isc_task_create(taskmgr, 0, &task1);
assert_int_equal(result, ISC_R_SUCCESS); assert_int_equal(result, ISC_R_SUCCESS);
@ -388,11 +382,7 @@ privilege_drop(void **state) {
assert_int_equal(atomic_load(&e), -1); assert_int_equal(atomic_load(&e), -1);
isc_task_send(task2, &event); isc_task_send(task2, &event);
assert_int_equal(isc_taskmgr_mode(taskmgr), isc_taskmgrmode_normal); isc_nm_resume(netmgr);
isc_taskmgr_setprivilegedmode(taskmgr);
assert_int_equal(isc_taskmgr_mode(taskmgr), isc_taskmgrmode_privileged);
isc__taskmgr_resume(taskmgr);
/* We're waiting for all variables to be set. */ /* We're waiting for all variables to be set. */
while ((atomic_load(&a) == -1 || atomic_load(&b) == -1 || while ((atomic_load(&a) == -1 || atomic_load(&b) == -1 ||
@ -407,19 +397,17 @@ privilege_drop(void **state) {
* We need to check that all privilege mode events were fired * We need to check that all privilege mode events were fired
* in privileged mode, and non privileged in non-privileged. * in privileged mode, and non privileged in non-privileged.
*/ */
assert_true(atomic_load(&a) == isc_taskmgrmode_privileged || assert_true(atomic_load(&a) <= 3);
atomic_load(&c) == isc_taskmgrmode_privileged || assert_true(atomic_load(&c) <= 3);
atomic_load(&d) == isc_taskmgrmode_privileged); assert_true(atomic_load(&d) <= 3);
/* ...and neither of the non-privileged tasks did... */ /* ...and neither of the non-privileged tasks did... */
assert_true(atomic_load(&b) == isc_taskmgrmode_normal || assert_true(atomic_load(&b) > 3);
atomic_load(&e) == isc_taskmgrmode_normal); assert_true(atomic_load(&e) > 3);
/* ...but all five of them did run. */ /* ...but all five of them did run. */
assert_int_equal(atomic_load(&counter), 6); assert_int_equal(atomic_load(&counter), 6);
assert_int_equal(isc_taskmgr_mode(taskmgr), isc_taskmgrmode_normal);
isc_task_destroy(&task1); isc_task_destroy(&task1);
assert_null(task1); assert_null(task1);
isc_task_destroy(&task2); isc_task_destroy(&task2);
@ -695,6 +683,7 @@ exclusive_cb(isc_task_t *task, isc_event_t *event) {
if (atomic_load(&done)) { if (atomic_load(&done)) {
isc_mem_put(event->ev_destroy_arg, event->ev_arg, sizeof(int)); isc_mem_put(event->ev_destroy_arg, event->ev_arg, sizeof(int));
isc_event_free(&event); isc_event_free(&event);
atomic_fetch_sub(&counter, 1);
} else { } else {
isc_task_send(task, &event); isc_task_send(task, &event);
} }
@ -708,6 +697,8 @@ task_exclusive(void **state) {
UNUSED(state); UNUSED(state);
atomic_init(&counter, 0);
for (i = 0; i < 10; i++) { for (i = 0; i < 10; i++) {
isc_event_t *event = NULL; isc_event_t *event = NULL;
int *v; int *v;
@ -732,11 +723,16 @@ task_exclusive(void **state) {
assert_non_null(event); assert_non_null(event);
isc_task_send(tasks[i], &event); isc_task_send(tasks[i], &event);
atomic_fetch_add(&counter, 1);
} }
for (i = 0; i < 10; i++) { for (i = 0; i < 10; i++) {
isc_task_detach(&tasks[i]); isc_task_detach(&tasks[i]);
} }
while (atomic_load(&counter) > 0) {
isc_test_nap(1000);
}
} }
/* /*
@ -805,7 +801,8 @@ manytasks(void **state) {
isc_mem_debugging = ISC_MEM_DEBUGRECORD; isc_mem_debugging = ISC_MEM_DEBUGRECORD;
isc_mem_create(&mctx); isc_mem_create(&mctx);
result = isc_taskmgr_create(mctx, 4, 0, NULL, &taskmgr); netmgr = isc_nm_start(mctx, 4);
result = isc_taskmgr_create(mctx, 0, netmgr, &taskmgr);
assert_int_equal(result, ISC_R_SUCCESS); assert_int_equal(result, ISC_R_SUCCESS);
atomic_init(&done, false); atomic_init(&done, false);
@ -822,6 +819,7 @@ manytasks(void **state) {
UNLOCK(&lock); UNLOCK(&lock);
isc_taskmgr_destroy(&taskmgr); isc_taskmgr_destroy(&taskmgr);
isc_nm_destroy(&netmgr);
isc_mem_destroy(&mctx); isc_mem_destroy(&mctx);
isc_condition_destroy(&cv); isc_condition_destroy(&cv);
isc_mutex_destroy(&lock); isc_mutex_destroy(&lock);

View File

@ -28,6 +28,9 @@
#include "isctest.h" #include "isctest.h"
#define TASK_MAGIC ISC_MAGIC('T', 'A', 'S', 'K')
#define VALID_TASK(t) ISC_MAGIC_VALID(t, TASK_MAGIC)
static int static int
_setup(void **state) { _setup(void **state) {
isc_result_t result; isc_result_t result;
@ -57,7 +60,7 @@ create_pool(void **state) {
UNUSED(state); UNUSED(state);
result = isc_taskpool_create(taskmgr, test_mctx, 8, 2, &pool); result = isc_taskpool_create(taskmgr, test_mctx, 8, 2, false, &pool);
assert_int_equal(result, ISC_R_SUCCESS); assert_int_equal(result, ISC_R_SUCCESS);
assert_int_equal(isc_taskpool_size(pool), 8); assert_int_equal(isc_taskpool_size(pool), 8);
@ -73,13 +76,13 @@ expand_pool(void **state) {
UNUSED(state); UNUSED(state);
result = isc_taskpool_create(taskmgr, test_mctx, 10, 2, &pool1); result = isc_taskpool_create(taskmgr, test_mctx, 10, 2, false, &pool1);
assert_int_equal(result, ISC_R_SUCCESS); assert_int_equal(result, ISC_R_SUCCESS);
assert_int_equal(isc_taskpool_size(pool1), 10); assert_int_equal(isc_taskpool_size(pool1), 10);
/* resizing to a smaller size should have no effect */ /* resizing to a smaller size should have no effect */
hold = pool1; hold = pool1;
result = isc_taskpool_expand(&pool1, 5, &pool2); result = isc_taskpool_expand(&pool1, 5, false, &pool2);
assert_int_equal(result, ISC_R_SUCCESS); assert_int_equal(result, ISC_R_SUCCESS);
assert_int_equal(isc_taskpool_size(pool2), 10); assert_int_equal(isc_taskpool_size(pool2), 10);
assert_ptr_equal(pool2, hold); assert_ptr_equal(pool2, hold);
@ -89,7 +92,7 @@ expand_pool(void **state) {
/* resizing to the same size should have no effect */ /* resizing to the same size should have no effect */
hold = pool1; hold = pool1;
result = isc_taskpool_expand(&pool1, 10, &pool2); result = isc_taskpool_expand(&pool1, 10, false, &pool2);
assert_int_equal(result, ISC_R_SUCCESS); assert_int_equal(result, ISC_R_SUCCESS);
assert_int_equal(isc_taskpool_size(pool2), 10); assert_int_equal(isc_taskpool_size(pool2), 10);
assert_ptr_equal(pool2, hold); assert_ptr_equal(pool2, hold);
@ -99,7 +102,7 @@ expand_pool(void **state) {
/* resizing to larger size should make a new pool */ /* resizing to larger size should make a new pool */
hold = pool1; hold = pool1;
result = isc_taskpool_expand(&pool1, 20, &pool2); result = isc_taskpool_expand(&pool1, 20, false, &pool2);
assert_int_equal(result, ISC_R_SUCCESS); assert_int_equal(result, ISC_R_SUCCESS);
assert_int_equal(isc_taskpool_size(pool2), 20); assert_int_equal(isc_taskpool_size(pool2), 20);
assert_ptr_not_equal(pool2, hold); assert_ptr_not_equal(pool2, hold);
@ -118,19 +121,19 @@ get_tasks(void **state) {
UNUSED(state); UNUSED(state);
result = isc_taskpool_create(taskmgr, test_mctx, 2, 2, &pool); result = isc_taskpool_create(taskmgr, test_mctx, 2, 2, false, &pool);
assert_int_equal(result, ISC_R_SUCCESS); assert_int_equal(result, ISC_R_SUCCESS);
assert_int_equal(isc_taskpool_size(pool), 2); assert_int_equal(isc_taskpool_size(pool), 2);
/* two tasks in pool; make sure we can access them more than twice */ /* two tasks in pool; make sure we can access them more than twice */
isc_taskpool_gettask(pool, &task1); isc_taskpool_gettask(pool, &task1);
assert_non_null(task1); assert_true(VALID_TASK(task1));
isc_taskpool_gettask(pool, &task2); isc_taskpool_gettask(pool, &task2);
assert_non_null(task2); assert_true(VALID_TASK(task2));
isc_taskpool_gettask(pool, &task3); isc_taskpool_gettask(pool, &task3);
assert_non_null(task3); assert_true(VALID_TASK(task3));
isc_task_destroy(&task1); isc_task_destroy(&task1);
isc_task_destroy(&task2); isc_task_destroy(&task2);
@ -149,30 +152,22 @@ set_privilege(void **state) {
UNUSED(state); UNUSED(state);
result = isc_taskpool_create(taskmgr, test_mctx, 2, 2, &pool); result = isc_taskpool_create(taskmgr, test_mctx, 2, 2, true, &pool);
assert_int_equal(result, ISC_R_SUCCESS); assert_int_equal(result, ISC_R_SUCCESS);
assert_int_equal(isc_taskpool_size(pool), 2); assert_int_equal(isc_taskpool_size(pool), 2);
isc_taskpool_setprivilege(pool, true);
isc_taskpool_gettask(pool, &task1); isc_taskpool_gettask(pool, &task1);
isc_taskpool_gettask(pool, &task2); isc_taskpool_gettask(pool, &task2);
isc_taskpool_gettask(pool, &task3); isc_taskpool_gettask(pool, &task3);
assert_non_null(task1); assert_true(VALID_TASK(task1));
assert_non_null(task2); assert_true(VALID_TASK(task2));
assert_non_null(task3); assert_true(VALID_TASK(task3));
assert_true(isc_task_privilege(task1)); assert_true(isc_task_privilege(task1));
assert_true(isc_task_privilege(task2)); assert_true(isc_task_privilege(task2));
assert_true(isc_task_privilege(task3)); assert_true(isc_task_privilege(task3));
isc_taskpool_setprivilege(pool, false);
assert_false(isc_task_privilege(task1));
assert_false(isc_task_privilege(task2));
assert_false(isc_task_privilege(task3));
isc_task_destroy(&task1); isc_task_destroy(&task1);
isc_task_destroy(&task2); isc_task_destroy(&task2);
isc_task_destroy(&task3); isc_task_destroy(&task3);

View File

@ -435,8 +435,8 @@ reset(void **state) {
setup_test(isc_timertype_ticker, &expires, &interval, test_reset); setup_test(isc_timertype_ticker, &expires, &interval, test_reset);
} }
static int startflag; static atomic_bool startflag;
static int shutdownflag; static atomic_bool shutdownflag;
static isc_timer_t *tickertimer = NULL; static isc_timer_t *tickertimer = NULL;
static isc_timer_t *oncetimer = NULL; static isc_timer_t *oncetimer = NULL;
static isc_task_t *task1 = NULL; static isc_task_t *task1 = NULL;
@ -447,23 +447,6 @@ static isc_task_t *task2 = NULL;
* in its queue, until signaled by task2. * in its queue, until signaled by task2.
*/ */
static void
start_event(isc_task_t *task, isc_event_t *event) {
UNUSED(task);
if (verbose) {
print_message("# start_event\n");
}
LOCK(&mx);
while (!startflag) {
(void)isc_condition_wait(&cv, &mx);
}
UNLOCK(&mx);
isc_event_free(&event);
}
static void static void
tick_event(isc_task_t *task, isc_event_t *event) { tick_event(isc_task_t *task, isc_event_t *event) {
isc_result_t result; isc_result_t result;
@ -472,6 +455,14 @@ tick_event(isc_task_t *task, isc_event_t *event) {
UNUSED(task); UNUSED(task);
if (!atomic_load(&startflag)) {
if (verbose) {
print_message("# tick_event %d\n", -1);
}
isc_event_free(&event);
return;
}
int tick = atomic_fetch_add(&eventcnt, 1); int tick = atomic_fetch_add(&eventcnt, 1);
if (verbose) { if (verbose) {
print_message("# tick_event %d\n", tick); print_message("# tick_event %d\n", tick);
@ -496,8 +487,6 @@ tick_event(isc_task_t *task, isc_event_t *event) {
static void static void
once_event(isc_task_t *task, isc_event_t *event) { once_event(isc_task_t *task, isc_event_t *event) {
isc_result_t result;
if (verbose) { if (verbose) {
print_message("# once_event\n"); print_message("# once_event\n");
} }
@ -505,12 +494,7 @@ once_event(isc_task_t *task, isc_event_t *event) {
/* /*
* Allow task1 to start processing events. * Allow task1 to start processing events.
*/ */
LOCK(&mx); atomic_store(&startflag, true);
startflag = 1;
result = isc_condition_broadcast(&cv);
subthread_assert_result_equal(result, ISC_R_SUCCESS);
UNLOCK(&mx);
isc_event_free(&event); isc_event_free(&event);
isc_task_shutdown(task); isc_task_shutdown(task);
@ -518,8 +502,6 @@ once_event(isc_task_t *task, isc_event_t *event) {
static void static void
shutdown_purge(isc_task_t *task, isc_event_t *event) { shutdown_purge(isc_task_t *task, isc_event_t *event) {
isc_result_t result;
UNUSED(task); UNUSED(task);
UNUSED(event); UNUSED(event);
@ -530,12 +512,7 @@ shutdown_purge(isc_task_t *task, isc_event_t *event) {
/* /*
* Signal shutdown processing complete. * Signal shutdown processing complete.
*/ */
LOCK(&mx); atomic_store(&shutdownflag, 1);
shutdownflag = 1;
result = isc_condition_signal(&cv);
subthread_assert_result_equal(result, ISC_R_SUCCESS);
UNLOCK(&mx);
isc_event_free(&event); isc_event_free(&event);
} }
@ -544,22 +521,17 @@ shutdown_purge(isc_task_t *task, isc_event_t *event) {
static void static void
purge(void **state) { purge(void **state) {
isc_result_t result; isc_result_t result;
isc_event_t *event = NULL;
isc_time_t expires; isc_time_t expires;
isc_interval_t interval; isc_interval_t interval;
UNUSED(state); UNUSED(state);
startflag = 0; atomic_init(&startflag, 0);
shutdownflag = 0; atomic_init(&shutdownflag, 0);
atomic_init(&eventcnt, 0); atomic_init(&eventcnt, 0);
seconds = 1; seconds = 1;
nanoseconds = 0; nanoseconds = 0;
isc_mutex_init(&mx);
isc_condition_init(&cv);
result = isc_task_create(taskmgr, 0, &task1); result = isc_task_create(taskmgr, 0, &task1);
assert_int_equal(result, ISC_R_SUCCESS); assert_int_equal(result, ISC_R_SUCCESS);
@ -569,13 +541,6 @@ purge(void **state) {
result = isc_task_create(taskmgr, 0, &task2); result = isc_task_create(taskmgr, 0, &task2);
assert_int_equal(result, ISC_R_SUCCESS); assert_int_equal(result, ISC_R_SUCCESS);
LOCK(&mx);
event = isc_event_allocate(test_mctx, (void *)1, (isc_eventtype_t)1,
start_event, NULL, sizeof(*event));
assert_non_null(event);
isc_task_send(task1, &event);
isc_time_settoepoch(&expires); isc_time_settoepoch(&expires);
isc_interval_set(&interval, seconds, 0); isc_interval_set(&interval, seconds, 0);
@ -600,13 +565,10 @@ purge(void **state) {
/* /*
* Wait for shutdown processing to complete. * Wait for shutdown processing to complete.
*/ */
while (!shutdownflag) { while (!atomic_load(&shutdownflag)) {
result = isc_condition_wait(&cv, &mx); isc_test_nap(1000);
assert_int_equal(result, ISC_R_SUCCESS);
} }
UNLOCK(&mx);
assert_int_equal(atomic_load(&errcnt), ISC_R_SUCCESS); assert_int_equal(atomic_load(&errcnt), ISC_R_SUCCESS);
assert_int_equal(atomic_load(&eventcnt), 1); assert_int_equal(atomic_load(&eventcnt), 1);
@ -615,7 +577,6 @@ purge(void **state) {
isc_timer_detach(&oncetimer); isc_timer_detach(&oncetimer);
isc_task_destroy(&task1); isc_task_destroy(&task1);
isc_task_destroy(&task2); isc_task_destroy(&task2);
isc_mutex_destroy(&mx);
} }
int int

View File

@ -94,14 +94,13 @@ struct isc_timermgr {
}; };
void void
isc_timermgr_poke(isc_timermgr_t *manager0); isc_timermgr_poke(isc_timermgr_t *manager);
static inline isc_result_t static inline isc_result_t
schedule(isc_timer_t *timer, isc_time_t *now, bool signal_ok) { schedule(isc_timer_t *timer, isc_time_t *now, bool signal_ok) {
isc_result_t result; isc_result_t result;
isc_timermgr_t *manager; isc_timermgr_t *manager;
isc_time_t due; isc_time_t due;
int cmp;
/*! /*!
* Note: the caller must ensure locking. * Note: the caller must ensure locking.
@ -145,7 +144,7 @@ schedule(isc_timer_t *timer, isc_time_t *now, bool signal_ok) {
/* /*
* Already scheduled. * Already scheduled.
*/ */
cmp = isc_time_compare(&due, &timer->due); int cmp = isc_time_compare(&due, &timer->due);
timer->due = due; timer->due = due;
switch (cmp) { switch (cmp) {
case -1: case -1:
@ -187,7 +186,6 @@ schedule(isc_timer_t *timer, isc_time_t *now, bool signal_ok) {
static inline void static inline void
deschedule(isc_timer_t *timer) { deschedule(isc_timer_t *timer) {
bool need_wakeup = false;
isc_timermgr_t *manager; isc_timermgr_t *manager;
/* /*
@ -196,6 +194,7 @@ deschedule(isc_timer_t *timer) {
manager = timer->manager; manager = timer->manager;
if (timer->index > 0) { if (timer->index > 0) {
bool need_wakeup = false;
if (timer->index == 1) { if (timer->index == 1) {
need_wakeup = true; need_wakeup = true;
} }
@ -223,6 +222,7 @@ destroy(isc_timer_t *timer) {
(void)isc_task_purgerange(timer->task, timer, ISC_TIMEREVENT_FIRSTEVENT, (void)isc_task_purgerange(timer->task, timer, ISC_TIMEREVENT_FIRSTEVENT,
ISC_TIMEREVENT_LASTEVENT, NULL); ISC_TIMEREVENT_LASTEVENT, NULL);
deschedule(timer); deschedule(timer);
UNLINK(manager->timers, timer, link); UNLINK(manager->timers, timer, link);
UNLOCK(&manager->lock); UNLOCK(&manager->lock);
@ -467,10 +467,6 @@ isc_timer_touch(isc_timer_t *timer) {
void void
isc_timer_attach(isc_timer_t *timer, isc_timer_t **timerp) { isc_timer_attach(isc_timer_t *timer, isc_timer_t **timerp) {
/*
* Attach *timerp to timer.
*/
REQUIRE(VALID_TIMER(timer)); REQUIRE(VALID_TIMER(timer));
REQUIRE(timerp != NULL && *timerp == NULL); REQUIRE(timerp != NULL && *timerp == NULL);
isc_refcount_increment(&timer->references); isc_refcount_increment(&timer->references);

View File

@ -104,10 +104,10 @@ isc_socketmgr_setreserved
isc_socketmgr_setstats isc_socketmgr_setstats
isc_task_getname isc_task_getname
isc_task_gettag isc_task_gettag
isc_task_run
isc_task_unsendrange isc_task_unsendrange
isc_taskmgr_mode isc_taskmgr_attach
isc__taskmgr_pause isc_taskmgr_detach
isc__taskmgr_resume
isc_aes128_crypt isc_aes128_crypt
isc_aes192_crypt isc_aes192_crypt
isc_aes256_crypt isc_aes256_crypt
@ -654,7 +654,6 @@ isc_task_unsend
isc_taskmgr_create isc_taskmgr_create
isc_taskmgr_destroy isc_taskmgr_destroy
isc_taskmgr_excltask isc_taskmgr_excltask
isc_taskmgr_mode
@IF NOTYET @IF NOTYET
isc_taskmgr_renderjson isc_taskmgr_renderjson
@END NOTYET @END NOTYET
@ -662,12 +661,10 @@ isc_taskmgr_renderjson
isc_taskmgr_renderxml isc_taskmgr_renderxml
@END LIBXML2 @END LIBXML2
isc_taskmgr_setexcltask isc_taskmgr_setexcltask
isc_taskmgr_setprivilegedmode
isc_taskpool_create isc_taskpool_create
isc_taskpool_destroy isc_taskpool_destroy
isc_taskpool_expand isc_taskpool_expand
isc_taskpool_gettask isc_taskpool_gettask
isc_taskpool_setprivilege
isc_taskpool_size isc_taskpool_size
isc_thread_create isc_thread_create
isc_thread_join isc_thread_join

View File

@ -51,6 +51,7 @@
isc_mem_t *mctx = NULL; isc_mem_t *mctx = NULL;
isc_log_t *lctx = NULL; isc_log_t *lctx = NULL;
isc_nm_t *netmgr = NULL;
isc_taskmgr_t *taskmgr = NULL; isc_taskmgr_t *taskmgr = NULL;
isc_task_t *maintask = NULL; isc_task_t *maintask = NULL;
isc_timermgr_t *timermgr = NULL; isc_timermgr_t *timermgr = NULL;
@ -213,6 +214,9 @@ cleanup_managers(void) {
if (taskmgr != NULL) { if (taskmgr != NULL) {
isc_taskmgr_destroy(&taskmgr); isc_taskmgr_destroy(&taskmgr);
} }
if (netmgr != NULL) {
isc_nm_destroy(&netmgr);
}
if (timermgr != NULL) { if (timermgr != NULL) {
isc_timermgr_destroy(&timermgr); isc_timermgr_destroy(&timermgr);
} }
@ -237,7 +241,8 @@ create_managers(void) {
isc_event_t *event = NULL; isc_event_t *event = NULL;
ncpus = isc_os_ncpus(); ncpus = isc_os_ncpus();
CHECK(isc_taskmgr_create(mctx, ncpus, 0, NULL, &taskmgr)); netmgr = isc_nm_start(mctx, ncpus);
CHECK(isc_taskmgr_create(mctx, 0, netmgr, &taskmgr));
CHECK(isc_task_create(taskmgr, 0, &maintask)); CHECK(isc_task_create(taskmgr, 0, &maintask));
isc_taskmgr_setexcltask(taskmgr, maintask); isc_taskmgr_setexcltask(taskmgr, maintask);
CHECK(isc_task_onshutdown(maintask, shutdown_managers, NULL)); CHECK(isc_task_onshutdown(maintask, shutdown_managers, NULL));

View File

@ -1981,7 +1981,6 @@
./lib/isc/string.c C 1999,2000,2001,2003,2004,2005,2006,2007,2011,2012,2014,2015,2016,2018,2019,2020,2021 ./lib/isc/string.c C 1999,2000,2001,2003,2004,2005,2006,2007,2011,2012,2014,2015,2016,2018,2019,2020,2021
./lib/isc/symtab.c C 1996,1997,1998,1999,2000,2001,2004,2005,2007,2011,2012,2013,2016,2018,2019,2020,2021 ./lib/isc/symtab.c C 1996,1997,1998,1999,2000,2001,2004,2005,2007,2011,2012,2013,2016,2018,2019,2020,2021
./lib/isc/task.c C 1998,1999,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018,2019,2020,2021 ./lib/isc/task.c C 1998,1999,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018,2019,2020,2021
./lib/isc/task_p.h C 2018,2019,2020,2021
./lib/isc/taskpool.c C 1999,2000,2001,2004,2005,2007,2011,2012,2013,2016,2018,2019,2020,2021 ./lib/isc/taskpool.c C 1999,2000,2001,2004,2005,2007,2011,2012,2013,2016,2018,2019,2020,2021
./lib/isc/tests/aes_test.c C 2014,2016,2018,2019,2020,2021 ./lib/isc/tests/aes_test.c C 2014,2016,2018,2019,2020,2021
./lib/isc/tests/buffer_test.c C 2014,2015,2016,2017,2018,2019,2020,2021 ./lib/isc/tests/buffer_test.c C 2014,2015,2016,2017,2018,2019,2020,2021