1999-05-12 09:44:35 +00:00
|
|
|
/*
|
2018-02-23 09:53:12 +01:00
|
|
|
* Copyright (C) Internet Systems Consortium, Inc. ("ISC")
|
2000-08-01 01:33:37 +00:00
|
|
|
*
|
2021-06-03 08:37:05 +02:00
|
|
|
* SPDX-License-Identifier: MPL-2.0
|
|
|
|
*
|
2016-06-27 14:56:38 +10:00
|
|
|
* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
2020-09-14 16:20:40 -07:00
|
|
|
* file, you can obtain one at https://mozilla.org/MPL/2.0/.
|
2018-02-23 09:53:12 +01:00
|
|
|
*
|
|
|
|
* See the COPYRIGHT file distributed with this work for additional
|
|
|
|
* information regarding copyright ownership.
|
1999-05-12 09:44:35 +00:00
|
|
|
*/
|
|
|
|
|
2005-04-27 04:57:32 +00:00
|
|
|
/*! \file */
|
2000-06-22 22:00:42 +00:00
|
|
|
|
2020-02-12 13:59:18 +01:00
|
|
|
#include <errno.h>
|
2021-05-20 15:53:50 +02:00
|
|
|
#include <inttypes.h>
|
|
|
|
#include <pthread.h>
|
|
|
|
#include <signal.h>
|
2018-04-17 08:29:14 -07:00
|
|
|
#include <stdbool.h>
|
1999-05-12 09:44:35 +00:00
|
|
|
#include <stddef.h>
|
2001-01-17 19:48:45 +00:00
|
|
|
#include <stdlib.h>
|
2021-05-20 15:53:50 +02:00
|
|
|
#include <sys/time.h>
|
2020-02-12 13:59:18 +01:00
|
|
|
#include <sys/types.h>
|
2020-03-09 16:17:26 +01:00
|
|
|
#include <unistd.h>
|
2020-02-12 13:59:18 +01:00
|
|
|
|
1999-05-12 09:44:35 +00:00
|
|
|
#include <isc/app.h>
|
2020-02-12 13:59:18 +01:00
|
|
|
#include <isc/atomic.h>
|
2000-12-26 21:45:08 +00:00
|
|
|
#include <isc/condition.h>
|
2020-02-12 13:59:18 +01:00
|
|
|
#include <isc/event.h>
|
2009-09-01 00:22:28 +00:00
|
|
|
#include <isc/mem.h>
|
2000-12-26 21:45:08 +00:00
|
|
|
#include <isc/mutex.h>
|
2018-08-28 15:43:44 -07:00
|
|
|
#include <isc/strerr.h>
|
2000-05-08 14:38:29 +00:00
|
|
|
#include <isc/string.h>
|
2000-04-28 21:08:52 +00:00
|
|
|
#include <isc/task.h>
|
2019-05-28 23:15:38 +02:00
|
|
|
#include <isc/thread.h>
|
2020-02-12 13:59:18 +01:00
|
|
|
#include <isc/time.h>
|
1999-12-16 22:24:22 +00:00
|
|
|
#include <isc/util.h>
|
1999-05-12 09:44:35 +00:00
|
|
|
|
2009-09-01 00:22:28 +00:00
|
|
|
/*%
|
2019-10-01 09:47:19 +02:00
|
|
|
* For BIND9 applications built with threads, we use a single app
|
|
|
|
* context and let multiple taskmgr and netmgr threads do actual jobs.
|
2009-09-01 00:22:28 +00:00
|
|
|
*/
|
2019-05-28 23:15:38 +02:00
|
|
|
|
2020-02-12 13:59:18 +01:00
|
|
|
static isc_thread_t blockedthread;
|
2022-03-08 23:55:10 +01:00
|
|
|
static atomic_bool is_running = 0;
|
2009-09-01 00:22:28 +00:00
|
|
|
|
|
|
|
/*
|
2019-05-28 23:15:38 +02:00
|
|
|
* The application context of this module.
|
2009-09-01 00:22:28 +00:00
|
|
|
*/
|
2020-02-13 14:44:37 -08:00
|
|
|
#define APPCTX_MAGIC ISC_MAGIC('A', 'p', 'c', 'x')
|
2020-02-12 13:59:18 +01:00
|
|
|
#define VALID_APPCTX(c) ISC_MAGIC_VALID(c, APPCTX_MAGIC)
|
2009-09-01 00:22:28 +00:00
|
|
|
|
2019-05-13 20:58:20 +07:00
|
|
|
struct isc_appctx {
|
2020-02-13 14:44:37 -08:00
|
|
|
unsigned int magic;
|
|
|
|
isc_mem_t *mctx;
|
|
|
|
isc_mutex_t lock;
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_eventlist_t on_run;
|
2020-02-13 14:44:37 -08:00
|
|
|
atomic_bool shutdown_requested;
|
|
|
|
atomic_bool running;
|
|
|
|
atomic_bool want_shutdown;
|
|
|
|
atomic_bool want_reload;
|
|
|
|
atomic_bool blocked;
|
|
|
|
isc_mutex_t readylock;
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_condition_t ready;
|
2019-05-13 20:58:20 +07:00
|
|
|
};
|
2009-09-01 00:22:28 +00:00
|
|
|
|
2019-05-13 20:58:20 +07:00
|
|
|
static isc_appctx_t isc_g_appctx;
|
2000-01-22 01:39:35 +00:00
|
|
|
|
|
|
|
static void
|
2020-02-13 14:44:37 -08:00
|
|
|
handle_signal(int sig, void (*handler)(int)) {
|
1999-05-12 22:54:46 +00:00
|
|
|
struct sigaction sa;
|
|
|
|
|
2001-11-27 01:56:32 +00:00
|
|
|
memset(&sa, 0, sizeof(sa));
|
1999-05-12 22:54:46 +00:00
|
|
|
sa.sa_handler = handler;
|
|
|
|
|
2020-02-12 13:59:18 +01:00
|
|
|
if (sigfillset(&sa.sa_mask) != 0 || sigaction(sig, &sa, NULL) < 0) {
|
2019-05-13 20:58:20 +07:00
|
|
|
char strbuf[ISC_STRERRORSIZE];
|
2018-08-21 15:27:42 +02:00
|
|
|
strerror_r(errno, strbuf, sizeof(strbuf));
|
2019-05-13 20:58:20 +07:00
|
|
|
isc_error_fatal(__FILE__, __LINE__,
|
2020-02-12 13:59:18 +01:00
|
|
|
"handle_signal() %d setup: %s", sig, strbuf);
|
1999-05-12 22:54:46 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
isc_result_t
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_app_ctxstart(isc_appctx_t *ctx) {
|
2009-09-01 00:22:28 +00:00
|
|
|
REQUIRE(VALID_APPCTX(ctx));
|
1999-05-12 09:44:35 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Start an ISC library application.
|
|
|
|
*/
|
|
|
|
|
2019-05-13 20:58:20 +07:00
|
|
|
isc_mutex_init(&ctx->lock);
|
2009-09-01 00:22:28 +00:00
|
|
|
|
2019-05-13 20:58:20 +07:00
|
|
|
isc_mutex_init(&ctx->readylock);
|
2018-11-15 17:20:36 +01:00
|
|
|
isc_condition_init(&ctx->ready);
|
2013-04-10 13:49:57 -07:00
|
|
|
|
2009-09-01 00:22:28 +00:00
|
|
|
ISC_LIST_INIT(ctx->on_run);
|
|
|
|
|
2019-05-13 20:58:20 +07:00
|
|
|
atomic_init(&ctx->shutdown_requested, false);
|
|
|
|
atomic_init(&ctx->running, false);
|
|
|
|
atomic_init(&ctx->want_shutdown, false);
|
|
|
|
atomic_init(&ctx->want_reload, false);
|
|
|
|
atomic_init(&ctx->blocked, false);
|
1999-05-12 09:44:35 +00:00
|
|
|
|
2020-02-13 14:44:37 -08:00
|
|
|
int presult;
|
|
|
|
sigset_t sset;
|
|
|
|
char strbuf[ISC_STRERRORSIZE];
|
2019-05-28 23:15:38 +02:00
|
|
|
|
1999-05-12 22:54:46 +00:00
|
|
|
/*
|
|
|
|
* Always ignore SIGPIPE.
|
|
|
|
*/
|
2019-05-13 20:58:20 +07:00
|
|
|
handle_signal(SIGPIPE, SIG_IGN);
|
1999-05-12 22:54:46 +00:00
|
|
|
|
2019-05-13 20:58:20 +07:00
|
|
|
handle_signal(SIGHUP, SIG_DFL);
|
|
|
|
handle_signal(SIGTERM, SIG_DFL);
|
|
|
|
handle_signal(SIGINT, SIG_DFL);
|
2000-06-27 18:49:14 +00:00
|
|
|
|
1999-05-12 09:44:35 +00:00
|
|
|
/*
|
2000-01-22 01:39:35 +00:00
|
|
|
* Block SIGHUP, SIGINT, SIGTERM.
|
1999-05-12 09:44:35 +00:00
|
|
|
*
|
|
|
|
* If isc_app_start() is called from the main thread before any other
|
|
|
|
* threads have been created, then the pthread_sigmask() call below
|
2000-03-18 01:29:48 +00:00
|
|
|
* will result in all threads having SIGHUP, SIGINT and SIGTERM
|
|
|
|
* blocked by default, ensuring that only the thread that calls
|
|
|
|
* sigwait() for them will get those signals.
|
1999-05-12 09:44:35 +00:00
|
|
|
*/
|
2020-02-12 13:59:18 +01:00
|
|
|
if (sigemptyset(&sset) != 0 || sigaddset(&sset, SIGHUP) != 0 ||
|
2020-02-13 14:44:37 -08:00
|
|
|
sigaddset(&sset, SIGINT) != 0 || sigaddset(&sset, SIGTERM) != 0)
|
|
|
|
{
|
2018-08-21 15:27:42 +02:00
|
|
|
strerror_r(errno, strbuf, sizeof(strbuf));
|
2019-05-13 20:58:20 +07:00
|
|
|
isc_error_fatal(__FILE__, __LINE__,
|
2020-02-12 13:59:18 +01:00
|
|
|
"isc_app_start() sigsetops: %s", strbuf);
|
1999-05-12 09:44:35 +00:00
|
|
|
}
|
|
|
|
presult = pthread_sigmask(SIG_BLOCK, &sset, NULL);
|
|
|
|
if (presult != 0) {
|
2018-08-21 15:27:42 +02:00
|
|
|
strerror_r(presult, strbuf, sizeof(strbuf));
|
2019-05-13 20:58:20 +07:00
|
|
|
isc_error_fatal(__FILE__, __LINE__,
|
2020-02-12 13:59:18 +01:00
|
|
|
"isc_app_start() pthread_sigmask: %s", strbuf);
|
1999-05-12 09:44:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return (ISC_R_SUCCESS);
|
2013-04-10 13:49:57 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
isc_result_t
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_app_start(void) {
|
2019-05-13 20:58:20 +07:00
|
|
|
isc_g_appctx.magic = APPCTX_MAGIC;
|
2013-04-10 13:49:57 -07:00
|
|
|
isc_g_appctx.mctx = NULL;
|
|
|
|
/* The remaining members will be initialized in ctxstart() */
|
|
|
|
|
2019-05-13 20:58:20 +07:00
|
|
|
return (isc_app_ctxstart(&isc_g_appctx));
|
1999-05-12 09:44:35 +00:00
|
|
|
}
|
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
isc_result_t
|
2018-08-06 13:00:55 +02:00
|
|
|
isc_app_onrun(isc_mem_t *mctx, isc_task_t *task, isc_taskaction_t action,
|
2020-02-13 14:44:37 -08:00
|
|
|
void *arg) {
|
2019-05-13 20:58:20 +07:00
|
|
|
return (isc_app_ctxonrun(&isc_g_appctx, mctx, task, action, arg));
|
2013-05-09 08:41:24 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
isc_result_t
|
2019-05-13 20:58:20 +07:00
|
|
|
isc_app_ctxonrun(isc_appctx_t *ctx, isc_mem_t *mctx, isc_task_t *task,
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_taskaction_t action, void *arg) {
|
1999-07-14 02:03:44 +00:00
|
|
|
isc_event_t *event;
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_task_t *cloned_task = NULL;
|
1999-07-14 02:03:44 +00:00
|
|
|
|
2019-05-13 20:58:20 +07:00
|
|
|
if (atomic_load_acquire(&ctx->running)) {
|
|
|
|
return (ISC_R_ALREADYRUNNING);
|
1999-07-14 02:03:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note that we store the task to which we're going to send the event
|
|
|
|
* in the event's "sender" field.
|
|
|
|
*/
|
|
|
|
isc_task_attach(task, &cloned_task);
|
|
|
|
event = isc_event_allocate(mctx, cloned_task, ISC_APPEVENT_SHUTDOWN,
|
2001-11-27 01:56:32 +00:00
|
|
|
action, arg, sizeof(*event));
|
2000-08-01 01:33:37 +00:00
|
|
|
|
2019-05-13 20:58:20 +07:00
|
|
|
LOCK(&ctx->lock);
|
2019-05-28 23:15:38 +02:00
|
|
|
ISC_LINK_INIT(event, ev_link);
|
2013-05-09 08:41:24 +10:00
|
|
|
ISC_LIST_APPEND(ctx->on_run, event, ev_link);
|
|
|
|
UNLOCK(&ctx->lock);
|
1999-07-14 02:03:44 +00:00
|
|
|
|
2019-05-13 20:58:20 +07:00
|
|
|
return (ISC_R_SUCCESS);
|
1999-07-14 02:03:44 +00:00
|
|
|
}
|
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
isc_result_t
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_app_ctxrun(isc_appctx_t *ctx) {
|
1999-05-12 09:44:35 +00:00
|
|
|
isc_event_t *event, *next_event;
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_task_t *task;
|
2009-09-01 00:22:28 +00:00
|
|
|
|
|
|
|
REQUIRE(VALID_APPCTX(ctx));
|
1999-05-12 09:44:35 +00:00
|
|
|
|
2020-03-30 13:47:58 -07:00
|
|
|
if (atomic_compare_exchange_strong_acq_rel(&ctx->running,
|
|
|
|
&(bool){ false }, true))
|
2020-02-13 14:44:37 -08:00
|
|
|
{
|
2000-01-22 01:39:35 +00:00
|
|
|
/*
|
|
|
|
* Post any on-run events (in FIFO order).
|
|
|
|
*/
|
2019-05-13 20:58:20 +07:00
|
|
|
LOCK(&ctx->lock);
|
2020-02-12 13:59:18 +01:00
|
|
|
for (event = ISC_LIST_HEAD(ctx->on_run); event != NULL;
|
2000-01-22 01:39:35 +00:00
|
|
|
event = next_event) {
|
2000-04-17 19:22:44 +00:00
|
|
|
next_event = ISC_LIST_NEXT(event, ev_link);
|
2009-09-01 00:22:28 +00:00
|
|
|
ISC_LIST_UNLINK(ctx->on_run, event, ev_link);
|
2000-04-17 19:22:44 +00:00
|
|
|
task = event->ev_sender;
|
2000-08-25 18:58:35 +00:00
|
|
|
event->ev_sender = NULL;
|
2000-01-22 01:39:35 +00:00
|
|
|
isc_task_sendanddetach(&task, &event);
|
|
|
|
}
|
2019-05-13 20:58:20 +07:00
|
|
|
UNLOCK(&ctx->lock);
|
1999-05-12 09:44:35 +00:00
|
|
|
}
|
2000-08-01 01:33:37 +00:00
|
|
|
|
1999-07-14 02:03:44 +00:00
|
|
|
/*
|
2013-04-10 13:49:57 -07:00
|
|
|
* There is no danger if isc_app_shutdown() is called before we
|
|
|
|
* wait for signals. Signals are blocked, so any such signal will
|
|
|
|
* simply be made pending and we will get it when we call
|
|
|
|
* sigwait().
|
1999-07-14 02:03:44 +00:00
|
|
|
*/
|
2020-03-30 13:49:55 -07:00
|
|
|
while (!atomic_load_acquire(&ctx->want_shutdown)) {
|
2019-10-01 09:47:19 +02:00
|
|
|
if (ctx == &isc_g_appctx) {
|
2019-05-28 23:15:38 +02:00
|
|
|
sigset_t sset;
|
2020-02-13 14:44:37 -08:00
|
|
|
int sig;
|
2013-04-10 13:49:57 -07:00
|
|
|
/*
|
|
|
|
* Wait for SIGHUP, SIGINT, or SIGTERM.
|
|
|
|
*/
|
|
|
|
if (sigemptyset(&sset) != 0 ||
|
|
|
|
sigaddset(&sset, SIGHUP) != 0 ||
|
|
|
|
sigaddset(&sset, SIGINT) != 0 ||
|
2020-02-13 14:44:37 -08:00
|
|
|
sigaddset(&sset, SIGTERM) != 0)
|
|
|
|
{
|
2019-05-13 20:58:20 +07:00
|
|
|
char strbuf[ISC_STRERRORSIZE];
|
2018-08-21 15:27:42 +02:00
|
|
|
strerror_r(errno, strbuf, sizeof(strbuf));
|
2019-05-13 20:58:20 +07:00
|
|
|
isc_error_fatal(__FILE__, __LINE__,
|
|
|
|
"isc_app_run() sigsetops: %s",
|
|
|
|
strbuf);
|
2013-04-10 13:49:57 -07:00
|
|
|
}
|
2000-05-18 22:39:24 +00:00
|
|
|
|
2019-05-13 20:58:20 +07:00
|
|
|
if (sigwait(&sset, &sig) == 0) {
|
|
|
|
switch (sig) {
|
|
|
|
case SIGINT:
|
|
|
|
case SIGTERM:
|
|
|
|
atomic_store_release(
|
|
|
|
&ctx->want_shutdown, true);
|
|
|
|
break;
|
|
|
|
case SIGHUP:
|
2020-02-12 13:59:18 +01:00
|
|
|
atomic_store_release(&ctx->want_reload,
|
|
|
|
true);
|
2019-05-13 20:58:20 +07:00
|
|
|
break;
|
|
|
|
default:
|
2021-10-11 12:50:17 +02:00
|
|
|
UNREACHABLE();
|
2019-05-13 20:58:20 +07:00
|
|
|
}
|
2013-04-10 13:49:57 -07:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/*
|
2019-10-01 09:47:19 +02:00
|
|
|
* Tools using multiple contexts don't
|
|
|
|
* rely on a signal, just wait until woken
|
|
|
|
* up.
|
2013-04-10 13:49:57 -07:00
|
|
|
*/
|
2019-05-13 20:58:20 +07:00
|
|
|
if (atomic_load_acquire(&ctx->want_shutdown)) {
|
2013-07-06 18:23:41 -07:00
|
|
|
break;
|
|
|
|
}
|
2019-05-13 20:58:20 +07:00
|
|
|
if (!atomic_load_acquire(&ctx->want_reload)) {
|
|
|
|
LOCK(&ctx->readylock);
|
2013-05-09 08:41:24 +10:00
|
|
|
WAIT(&ctx->ready, &ctx->readylock);
|
2013-07-06 18:23:41 -07:00
|
|
|
UNLOCK(&ctx->readylock);
|
|
|
|
}
|
2000-01-25 03:33:55 +00:00
|
|
|
}
|
2020-02-12 09:17:55 +01:00
|
|
|
if (atomic_compare_exchange_strong_acq_rel(
|
|
|
|
&ctx->want_reload, &(bool){ true }, false))
|
|
|
|
{
|
2000-01-25 03:33:55 +00:00
|
|
|
return (ISC_R_RELOAD);
|
|
|
|
}
|
2001-01-17 00:48:54 +00:00
|
|
|
|
2019-05-13 20:58:20 +07:00
|
|
|
if (atomic_load_acquire(&ctx->want_shutdown) &&
|
2020-02-13 14:44:37 -08:00
|
|
|
atomic_load_acquire(&ctx->blocked))
|
|
|
|
{
|
2001-03-14 06:31:17 +00:00
|
|
|
exit(1);
|
2019-05-13 20:58:20 +07:00
|
|
|
}
|
2000-01-22 01:39:35 +00:00
|
|
|
}
|
|
|
|
|
1999-05-12 09:44:35 +00:00
|
|
|
return (ISC_R_SUCCESS);
|
|
|
|
}
|
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
isc_result_t
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_app_run(void) {
|
2018-08-06 13:00:55 +02:00
|
|
|
isc_result_t result;
|
2018-10-18 23:59:18 -07:00
|
|
|
|
2020-03-30 13:47:58 -07:00
|
|
|
REQUIRE(atomic_compare_exchange_strong_acq_rel(&is_running,
|
|
|
|
&(bool){ false }, true));
|
2019-05-13 20:58:20 +07:00
|
|
|
result = isc_app_ctxrun(&isc_g_appctx);
|
|
|
|
atomic_store_release(&is_running, false);
|
2018-10-18 23:59:18 -07:00
|
|
|
|
|
|
|
return (result);
|
2018-08-06 13:00:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2018-08-07 16:46:53 +02:00
|
|
|
isc_app_isrunning(void) {
|
2019-05-13 20:58:20 +07:00
|
|
|
return (atomic_load_acquire(&is_running));
|
2009-09-01 00:22:28 +00:00
|
|
|
}
|
|
|
|
|
2019-05-13 20:58:20 +07:00
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_app_ctxshutdown(isc_appctx_t *ctx) {
|
2009-09-01 00:22:28 +00:00
|
|
|
REQUIRE(VALID_APPCTX(ctx));
|
|
|
|
|
2019-05-13 20:58:20 +07:00
|
|
|
REQUIRE(atomic_load_acquire(&ctx->running));
|
1999-05-12 09:44:35 +00:00
|
|
|
|
2019-05-13 20:58:20 +07:00
|
|
|
/* If ctx->shutdown_requested == true, we are already shutting
|
|
|
|
* down and we want to just bail out.
|
|
|
|
*/
|
2020-02-12 09:17:55 +01:00
|
|
|
if (atomic_compare_exchange_strong_acq_rel(&ctx->shutdown_requested,
|
|
|
|
&(bool){ false }, true))
|
2020-02-13 14:44:37 -08:00
|
|
|
{
|
2019-10-01 09:47:19 +02:00
|
|
|
if (ctx != &isc_g_appctx) {
|
|
|
|
/* Tool using multiple contexts */
|
2019-05-13 20:58:20 +07:00
|
|
|
atomic_store_release(&ctx->want_shutdown, true);
|
2019-10-01 09:47:19 +02:00
|
|
|
SIGNAL(&ctx->ready);
|
|
|
|
} else {
|
|
|
|
/* Normal single BIND9 context */
|
2019-05-13 20:58:20 +07:00
|
|
|
if (kill(getpid(), SIGTERM) < 0) {
|
|
|
|
char strbuf[ISC_STRERRORSIZE];
|
2020-02-12 13:59:18 +01:00
|
|
|
strerror_r(errno, strbuf, sizeof(strbuf));
|
2019-05-13 20:58:20 +07:00
|
|
|
isc_error_fatal(__FILE__, __LINE__,
|
|
|
|
"isc_app_shutdown() "
|
2020-02-12 13:59:18 +01:00
|
|
|
"kill: %s",
|
|
|
|
strbuf);
|
2009-09-01 00:22:28 +00:00
|
|
|
}
|
1999-05-12 22:35:40 +00:00
|
|
|
}
|
1999-05-12 09:44:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-13 20:58:20 +07:00
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_app_shutdown(void) {
|
2019-05-13 20:58:20 +07:00
|
|
|
isc_app_ctxshutdown(&isc_g_appctx);
|
2009-09-01 00:22:28 +00:00
|
|
|
}
|
|
|
|
|
2019-05-13 20:58:20 +07:00
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_app_ctxsuspend(isc_appctx_t *ctx) {
|
2009-09-01 00:22:28 +00:00
|
|
|
REQUIRE(VALID_APPCTX(ctx));
|
|
|
|
|
2019-05-13 20:58:20 +07:00
|
|
|
REQUIRE(atomic_load(&ctx->running));
|
2000-01-22 01:39:35 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't send the reload signal if we're shutting down.
|
|
|
|
*/
|
2020-03-30 13:49:55 -07:00
|
|
|
if (!atomic_load_acquire(&ctx->shutdown_requested)) {
|
2019-10-01 09:47:19 +02:00
|
|
|
if (ctx != &isc_g_appctx) {
|
|
|
|
/* Tool using multiple contexts */
|
2019-05-13 20:58:20 +07:00
|
|
|
atomic_store_release(&ctx->want_reload, true);
|
2019-10-01 09:47:19 +02:00
|
|
|
SIGNAL(&ctx->ready);
|
|
|
|
} else {
|
|
|
|
/* Normal single BIND9 context */
|
2019-05-13 20:58:20 +07:00
|
|
|
if (kill(getpid(), SIGHUP) < 0) {
|
|
|
|
char strbuf[ISC_STRERRORSIZE];
|
2020-02-12 13:59:18 +01:00
|
|
|
strerror_r(errno, strbuf, sizeof(strbuf));
|
2019-05-13 20:58:20 +07:00
|
|
|
isc_error_fatal(__FILE__, __LINE__,
|
|
|
|
"isc_app_reload() "
|
2020-02-12 13:59:18 +01:00
|
|
|
"kill: %s",
|
|
|
|
strbuf);
|
2013-04-10 13:49:57 -07:00
|
|
|
}
|
2009-09-01 00:22:28 +00:00
|
|
|
}
|
2019-05-13 20:58:20 +07:00
|
|
|
}
|
2000-01-22 01:39:35 +00:00
|
|
|
}
|
|
|
|
|
2019-05-13 20:58:20 +07:00
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_app_reload(void) {
|
2019-05-29 13:37:10 +02:00
|
|
|
isc_app_ctxsuspend(&isc_g_appctx);
|
1999-05-12 09:44:35 +00:00
|
|
|
}
|
2000-09-01 21:31:54 +00:00
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_app_ctxfinish(isc_appctx_t *ctx) {
|
2009-09-01 00:22:28 +00:00
|
|
|
REQUIRE(VALID_APPCTX(ctx));
|
|
|
|
|
2018-11-19 10:31:09 +00:00
|
|
|
isc_mutex_destroy(&ctx->lock);
|
2019-05-13 20:58:20 +07:00
|
|
|
isc_mutex_destroy(&ctx->readylock);
|
|
|
|
isc_condition_destroy(&ctx->ready);
|
2009-09-01 00:22:28 +00:00
|
|
|
}
|
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_app_finish(void) {
|
2019-05-13 20:58:20 +07:00
|
|
|
isc_app_ctxfinish(&isc_g_appctx);
|
2009-09-01 00:22:28 +00:00
|
|
|
}
|
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_app_block(void) {
|
2019-05-28 23:15:38 +02:00
|
|
|
REQUIRE(atomic_load_acquire(&isc_g_appctx.running));
|
2020-02-12 09:17:55 +01:00
|
|
|
REQUIRE(atomic_compare_exchange_strong_acq_rel(&isc_g_appctx.blocked,
|
|
|
|
&(bool){ false }, true));
|
2000-09-01 21:31:54 +00:00
|
|
|
|
2019-05-28 23:15:38 +02:00
|
|
|
sigset_t sset;
|
2000-09-01 21:31:54 +00:00
|
|
|
blockedthread = pthread_self();
|
|
|
|
RUNTIME_CHECK(sigemptyset(&sset) == 0 &&
|
|
|
|
sigaddset(&sset, SIGINT) == 0 &&
|
|
|
|
sigaddset(&sset, SIGTERM) == 0);
|
|
|
|
RUNTIME_CHECK(pthread_sigmask(SIG_UNBLOCK, &sset, NULL) == 0);
|
|
|
|
}
|
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_app_unblock(void) {
|
2019-05-13 20:58:20 +07:00
|
|
|
REQUIRE(atomic_load_acquire(&isc_g_appctx.running));
|
2020-02-12 09:17:55 +01:00
|
|
|
REQUIRE(atomic_compare_exchange_strong_acq_rel(&isc_g_appctx.blocked,
|
|
|
|
&(bool){ true }, false));
|
2000-09-01 21:31:54 +00:00
|
|
|
|
|
|
|
REQUIRE(blockedthread == pthread_self());
|
|
|
|
|
2019-05-28 23:15:38 +02:00
|
|
|
sigset_t sset;
|
2000-09-01 21:31:54 +00:00
|
|
|
RUNTIME_CHECK(sigemptyset(&sset) == 0 &&
|
2008-01-18 23:46:58 +00:00
|
|
|
sigaddset(&sset, SIGINT) == 0 &&
|
2000-09-01 21:31:54 +00:00
|
|
|
sigaddset(&sset, SIGTERM) == 0);
|
|
|
|
RUNTIME_CHECK(pthread_sigmask(SIG_BLOCK, &sset, NULL) == 0);
|
|
|
|
}
|
2009-09-01 00:22:28 +00:00
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
isc_result_t
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_appctx_create(isc_mem_t *mctx, isc_appctx_t **ctxp) {
|
2019-05-13 20:58:20 +07:00
|
|
|
isc_appctx_t *ctx;
|
2009-09-01 00:22:28 +00:00
|
|
|
|
|
|
|
REQUIRE(mctx != NULL);
|
|
|
|
REQUIRE(ctxp != NULL && *ctxp == NULL);
|
|
|
|
|
|
|
|
ctx = isc_mem_get(mctx, sizeof(*ctx));
|
Refactor taskmgr to run on top of netmgr
This commit changes the taskmgr to run the individual tasks on the
netmgr internal workers. While an effort has been put into keeping the
taskmgr interface intact, couple of changes have been made:
* The taskmgr has no concept of universal privileged mode - rather the
tasks are either privileged or unprivileged (normal). The privileged
tasks are run as a first thing when the netmgr is unpaused. There
are now four different queues in in the netmgr:
1. priority queue - netievent on the priority queue are run even when
the taskmgr enter exclusive mode and netmgr is paused. This is
needed to properly start listening on the interfaces, free
resources and resume.
2. privileged task queue - only privileged tasks are queued here and
this is the first queue that gets processed when network manager
is unpaused using isc_nm_resume(). All netmgr workers need to
clean the privileged task queue before they all proceed normal
operation. Both task queues are processed when the workers are
finished.
3. task queue - only (traditional) task are scheduled here and this
queue along with privileged task queues are process when the
netmgr workers are finishing. This is needed to process the task
shutdown events.
4. normal queue - this is the queue with netmgr events, e.g. reading,
sending, callbacks and pretty much everything is processed here.
* The isc_taskmgr_create() now requires initialized netmgr (isc_nm_t)
object.
* The isc_nm_destroy() function now waits for indefinite time, but it
will print out the active objects when in tracing mode
(-DNETMGR_TRACE=1 and -DNETMGR_TRACE_VERBOSE=1), the netmgr has been
made a little bit more asynchronous and it might take longer time to
shutdown all the active networking connections.
* Previously, the isc_nm_stoplistening() was a synchronous operation.
This has been changed and the isc_nm_stoplistening() just schedules
the child sockets to stop listening and exits. This was needed to
prevent a deadlock as the the (traditional) tasks are now executed on
the netmgr threads.
* The socket selection logic in isc__nm_udp_send() was flawed, but
fortunatelly, it was broken, so we never hit the problem where we
created uvreq_t on a socket from nmhandle_t, but then a different
socket could be picked up and then we were trying to run the send
callback on a socket that had different threadid than currently
running.
2021-04-09 11:31:19 +02:00
|
|
|
*ctx = (isc_appctx_t){ .magic = 0 };
|
2009-09-01 00:22:28 +00:00
|
|
|
|
|
|
|
isc_mem_attach(mctx, &ctx->mctx);
|
Refactor taskmgr to run on top of netmgr
This commit changes the taskmgr to run the individual tasks on the
netmgr internal workers. While an effort has been put into keeping the
taskmgr interface intact, couple of changes have been made:
* The taskmgr has no concept of universal privileged mode - rather the
tasks are either privileged or unprivileged (normal). The privileged
tasks are run as a first thing when the netmgr is unpaused. There
are now four different queues in in the netmgr:
1. priority queue - netievent on the priority queue are run even when
the taskmgr enter exclusive mode and netmgr is paused. This is
needed to properly start listening on the interfaces, free
resources and resume.
2. privileged task queue - only privileged tasks are queued here and
this is the first queue that gets processed when network manager
is unpaused using isc_nm_resume(). All netmgr workers need to
clean the privileged task queue before they all proceed normal
operation. Both task queues are processed when the workers are
finished.
3. task queue - only (traditional) task are scheduled here and this
queue along with privileged task queues are process when the
netmgr workers are finishing. This is needed to process the task
shutdown events.
4. normal queue - this is the queue with netmgr events, e.g. reading,
sending, callbacks and pretty much everything is processed here.
* The isc_taskmgr_create() now requires initialized netmgr (isc_nm_t)
object.
* The isc_nm_destroy() function now waits for indefinite time, but it
will print out the active objects when in tracing mode
(-DNETMGR_TRACE=1 and -DNETMGR_TRACE_VERBOSE=1), the netmgr has been
made a little bit more asynchronous and it might take longer time to
shutdown all the active networking connections.
* Previously, the isc_nm_stoplistening() was a synchronous operation.
This has been changed and the isc_nm_stoplistening() just schedules
the child sockets to stop listening and exits. This was needed to
prevent a deadlock as the the (traditional) tasks are now executed on
the netmgr threads.
* The socket selection logic in isc__nm_udp_send() was flawed, but
fortunatelly, it was broken, so we never hit the problem where we
created uvreq_t on a socket from nmhandle_t, but then a different
socket could be picked up and then we were trying to run the send
callback on a socket that had different threadid than currently
running.
2021-04-09 11:31:19 +02:00
|
|
|
ctx->magic = APPCTX_MAGIC;
|
2009-09-01 00:22:28 +00:00
|
|
|
|
2019-05-13 20:58:20 +07:00
|
|
|
*ctxp = ctx;
|
2009-09-01 00:22:28 +00:00
|
|
|
|
|
|
|
return (ISC_R_SUCCESS);
|
|
|
|
}
|
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
void
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_appctx_destroy(isc_appctx_t **ctxp) {
|
2019-05-13 20:58:20 +07:00
|
|
|
isc_appctx_t *ctx;
|
2009-09-01 00:22:28 +00:00
|
|
|
|
|
|
|
REQUIRE(ctxp != NULL);
|
2019-05-13 20:58:20 +07:00
|
|
|
ctx = *ctxp;
|
2009-09-01 00:22:28 +00:00
|
|
|
*ctxp = NULL;
|
2019-05-28 23:15:38 +02:00
|
|
|
REQUIRE(VALID_APPCTX(ctx));
|
2009-09-01 00:22:28 +00:00
|
|
|
|
2019-05-13 20:58:20 +07:00
|
|
|
ctx->magic = 0;
|
2009-09-01 00:22:28 +00:00
|
|
|
|
2019-05-13 20:58:20 +07:00
|
|
|
isc_mem_putanddetach(&ctx->mctx, ctx, sizeof(*ctx));
|
2009-09-01 00:22:28 +00:00
|
|
|
}
|