1998-12-12 20:48:14 +00:00
|
|
|
/*
|
2018-02-23 09:53:12 +01:00
|
|
|
* Copyright (C) Internet Systems Consortium, Inc. ("ISC")
|
2000-08-01 01:33:37 +00:00
|
|
|
*
|
2016-06-27 14:56:38 +10:00
|
|
|
* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
2018-02-23 09:53:12 +01:00
|
|
|
*
|
|
|
|
* See the COPYRIGHT file distributed with this work for additional
|
|
|
|
* information regarding copyright ownership.
|
1998-12-12 20:48:14 +00:00
|
|
|
*/
|
1998-08-17 22:05:58 +00:00
|
|
|
|
2018-04-17 09:32:20 -07:00
|
|
|
/*! \file */
|
1999-04-01 03:59:27 +00:00
|
|
|
|
1999-05-18 19:23:13 +00:00
|
|
|
/*
|
|
|
|
* XXXRTH Need to document the states a task can be in, and the rules
|
|
|
|
* for changing states.
|
|
|
|
*/
|
|
|
|
|
2018-04-17 08:29:14 -07:00
|
|
|
#include <stdbool.h>
|
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
#include <isc/app.h>
|
2018-10-11 13:39:04 +00:00
|
|
|
#include <isc/atomic.h>
|
2000-12-26 21:45:08 +00:00
|
|
|
#include <isc/condition.h>
|
1999-05-10 23:00:30 +00:00
|
|
|
#include <isc/event.h>
|
2001-06-04 19:33:39 +00:00
|
|
|
#include <isc/magic.h>
|
2000-04-28 04:26:08 +00:00
|
|
|
#include <isc/mem.h>
|
2013-04-10 13:49:57 -07:00
|
|
|
#include <isc/once.h>
|
2000-08-29 22:55:57 +00:00
|
|
|
#include <isc/platform.h>
|
2015-05-23 14:21:51 +02:00
|
|
|
#include <isc/print.h>
|
2000-05-08 14:38:29 +00:00
|
|
|
#include <isc/string.h>
|
2018-10-11 13:39:04 +00:00
|
|
|
#include <isc/random.h>
|
2019-05-16 18:51:39 +02:00
|
|
|
#include <isc/refcount.h>
|
1998-08-18 00:47:55 +00:00
|
|
|
#include <isc/task.h>
|
2000-12-26 21:45:08 +00:00
|
|
|
#include <isc/thread.h>
|
2015-10-02 12:32:42 -07:00
|
|
|
#include <isc/time.h>
|
1999-12-16 22:24:22 +00:00
|
|
|
#include <isc/util.h>
|
2019-06-24 14:25:55 +02:00
|
|
|
|
|
|
|
#ifdef HAVE_LIBXML2
|
|
|
|
#include <libxml/xmlwriter.h>
|
|
|
|
#define ISC_XMLCHAR (const xmlChar *)
|
|
|
|
#endif /* HAVE_LIBXML2 */
|
1998-08-17 22:05:58 +00:00
|
|
|
|
2019-06-24 12:21:47 +02:00
|
|
|
#ifdef HAVE_JSON_C
|
|
|
|
#include <json_object.h>
|
|
|
|
#endif /* HAVE_JSON_C */
|
|
|
|
|
2009-10-05 17:30:49 +00:00
|
|
|
#ifdef OPENSSL_LEAKS
|
|
|
|
#include <openssl/err.h>
|
|
|
|
#endif
|
|
|
|
|
2018-10-22 10:57:05 +00:00
|
|
|
/*
|
|
|
|
* Task manager is built around 'as little locking as possible' concept.
|
|
|
|
* Each thread has his own queue of tasks to be run, if a task is in running
|
|
|
|
* state it will stay on the runner it's currently on, if a task is in idle
|
|
|
|
* state it can be woken up on a specific runner with isc_task_sendto - that
|
|
|
|
* helps with data locality on CPU.
|
2018-11-21 09:50:50 +00:00
|
|
|
*
|
|
|
|
* To make load even some tasks (from task pools) are bound to specific
|
|
|
|
* queues using isc_task_create_bound. This way load balancing between
|
|
|
|
* CPUs/queues happens on the higher layer.
|
2018-10-22 10:57:05 +00:00
|
|
|
*/
|
|
|
|
|
1998-10-21 02:26:57 +00:00
|
|
|
#ifdef ISC_TASK_TRACE
|
2000-08-24 01:40:46 +00:00
|
|
|
#define XTRACE(m) fprintf(stderr, "task %p thread %lu: %s\n", \
|
1999-07-10 01:00:05 +00:00
|
|
|
task, isc_thread_self(), (m))
|
2000-08-24 01:40:46 +00:00
|
|
|
#define XTTRACE(t, m) fprintf(stderr, "task %p thread %lu: %s\n", \
|
1999-07-10 01:00:05 +00:00
|
|
|
(t), isc_thread_self(), (m))
|
2000-08-24 01:40:46 +00:00
|
|
|
#define XTHREADTRACE(m) fprintf(stderr, "thread %lu: %s\n", \
|
1999-07-10 01:00:05 +00:00
|
|
|
isc_thread_self(), (m))
|
1998-08-17 23:15:50 +00:00
|
|
|
#else
|
|
|
|
#define XTRACE(m)
|
1999-07-10 01:00:05 +00:00
|
|
|
#define XTTRACE(t, m)
|
1999-05-10 23:00:30 +00:00
|
|
|
#define XTHREADTRACE(m)
|
1998-08-17 23:15:50 +00:00
|
|
|
#endif
|
1998-08-17 22:05:58 +00:00
|
|
|
|
1998-08-18 00:29:57 +00:00
|
|
|
/***
|
1998-08-19 21:46:15 +00:00
|
|
|
*** Types.
|
1998-08-18 00:29:57 +00:00
|
|
|
***/
|
|
|
|
|
1998-08-19 21:46:15 +00:00
|
|
|
typedef enum {
|
Fix a race in taskmgr between worker and task pausing/unpausing.
To reproduce the race - create a task, send two events to it, first one
must take some time. Then, from the outside, pause(), unpause() and detach()
the task.
When the long-running event is processed by the task it is in
task_state_running state. When we called pause() the state changed to
task_state_paused, on unpause we checked that there are events in the task
queue, changed the state to task_state_ready and enqueued the task on the
workers readyq. We then detach the task.
The dispatch() is done with processing the event, it processes the second
event in the queue, and then shuts down the task and frees it (as it's not
referenced anymore). Dispatcher then takes the, already freed, task from
the queue where it was wrongly put, causing an use-after free and,
subsequently, either an assertion failure or a segmentation fault.
The probability of this happening is very slim, yet it might happen under a
very high load, more probably on a recursive resolver than on an
authoritative.
The fix introduces a new 'task_state_pausing' state - to which tasks
are moved if they're being paused while still running. They are moved
to task_state_paused state when dispatcher is done with them, and
if we unpause a task in paused state it's moved back to task_state_running
and not requeued.
2020-01-20 11:39:14 +01:00
|
|
|
task_state_idle, /* not doing anything, events queue empty */
|
|
|
|
task_state_ready, /* waiting in worker's queue */
|
|
|
|
task_state_paused, /* not running, paused */
|
|
|
|
task_state_pausing, /* running, waiting to be paused */
|
|
|
|
task_state_running, /* actively processing events */
|
|
|
|
task_state_done /* shutting down, no events or references */
|
1998-08-19 21:46:15 +00:00
|
|
|
} task_state_t;
|
|
|
|
|
2019-02-06 11:56:42 +01:00
|
|
|
#if defined(HAVE_LIBXML2) || defined(HAVE_JSON_C)
|
2007-02-13 02:49:08 +00:00
|
|
|
static const char *statenames[] = {
|
Fix a race in taskmgr between worker and task pausing/unpausing.
To reproduce the race - create a task, send two events to it, first one
must take some time. Then, from the outside, pause(), unpause() and detach()
the task.
When the long-running event is processed by the task it is in
task_state_running state. When we called pause() the state changed to
task_state_paused, on unpause we checked that there are events in the task
queue, changed the state to task_state_ready and enqueued the task on the
workers readyq. We then detach the task.
The dispatch() is done with processing the event, it processes the second
event in the queue, and then shuts down the task and frees it (as it's not
referenced anymore). Dispatcher then takes the, already freed, task from
the queue where it was wrongly put, causing an use-after free and,
subsequently, either an assertion failure or a segmentation fault.
The probability of this happening is very slim, yet it might happen under a
very high load, more probably on a recursive resolver than on an
authoritative.
The fix introduces a new 'task_state_pausing' state - to which tasks
are moved if they're being paused while still running. They are moved
to task_state_paused state when dispatcher is done with them, and
if we unpause a task in paused state it's moved back to task_state_running
and not requeued.
2020-01-20 11:39:14 +01:00
|
|
|
"idle", "ready", "paused", "pausing", "running", "done",
|
2007-02-13 02:49:08 +00:00
|
|
|
};
|
2007-02-14 02:32:30 +00:00
|
|
|
#endif
|
2007-02-13 02:49:08 +00:00
|
|
|
|
2001-06-04 19:33:39 +00:00
|
|
|
#define TASK_MAGIC ISC_MAGIC('T', 'A', 'S', 'K')
|
|
|
|
#define VALID_TASK(t) ISC_MAGIC_VALID(t, TASK_MAGIC)
|
1998-08-19 21:46:15 +00:00
|
|
|
|
2009-09-01 00:22:28 +00:00
|
|
|
typedef struct isc__task isc__task_t;
|
|
|
|
typedef struct isc__taskmgr isc__taskmgr_t;
|
2018-10-22 09:37:17 +00:00
|
|
|
typedef struct isc__taskqueue isc__taskqueue_t;
|
2009-09-01 00:22:28 +00:00
|
|
|
|
|
|
|
struct isc__task {
|
1998-08-19 21:46:15 +00:00
|
|
|
/* Not locked. */
|
2009-09-01 00:22:28 +00:00
|
|
|
isc_task_t common;
|
|
|
|
isc__taskmgr_t * manager;
|
1998-10-22 01:33:20 +00:00
|
|
|
isc_mutex_t lock;
|
1998-08-19 21:46:15 +00:00
|
|
|
/* Locked by task lock. */
|
|
|
|
task_state_t state;
|
2019-05-20 17:00:22 +02:00
|
|
|
isc_refcount_t references;
|
1998-10-21 02:26:57 +00:00
|
|
|
isc_eventlist_t events;
|
1998-12-16 02:02:10 +00:00
|
|
|
isc_eventlist_t on_shutdown;
|
2012-05-14 10:06:05 -07:00
|
|
|
unsigned int nevents;
|
1998-08-19 21:46:15 +00:00
|
|
|
unsigned int quantum;
|
2003-10-25 00:09:14 +00:00
|
|
|
isc_stdtime_t now;
|
2015-10-02 12:32:42 -07:00
|
|
|
isc_time_t tnow;
|
2000-01-25 19:25:20 +00:00
|
|
|
char name[16];
|
|
|
|
void * tag;
|
2018-10-22 10:57:05 +00:00
|
|
|
unsigned int threadid;
|
2018-11-21 09:50:50 +00:00
|
|
|
bool bound;
|
2019-12-12 15:22:10 +01:00
|
|
|
/* Protected by atomics */
|
|
|
|
atomic_uint_fast32_t flags;
|
1998-08-19 21:46:15 +00:00
|
|
|
/* Locked by task manager lock. */
|
2009-09-01 00:22:28 +00:00
|
|
|
LINK(isc__task_t) link;
|
|
|
|
LINK(isc__task_t) ready_link;
|
2011-09-02 21:15:39 +00:00
|
|
|
LINK(isc__task_t) ready_priority_link;
|
1998-08-19 21:46:15 +00:00
|
|
|
};
|
|
|
|
|
1999-09-23 21:30:26 +00:00
|
|
|
#define TASK_F_SHUTTINGDOWN 0x01
|
2011-09-02 21:15:39 +00:00
|
|
|
#define TASK_F_PRIVILEGED 0x02
|
1999-05-10 23:00:30 +00:00
|
|
|
|
2019-12-12 15:22:10 +01:00
|
|
|
#define TASK_SHUTTINGDOWN(t) \
|
|
|
|
((atomic_load_acquire(&(t)->flags) & TASK_F_SHUTTINGDOWN) != 0)
|
|
|
|
#define TASK_PRIVILEGED(t) \
|
|
|
|
((atomic_load_acquire(&(t)->flags) & TASK_F_PRIVILEGED) != 0)
|
|
|
|
|
|
|
|
#define TASK_FLAG_SET(t, f) \
|
|
|
|
atomic_fetch_or_release(&(t)->flags, (f))
|
|
|
|
#define TASK_FLAG_CLR(t, f) \
|
|
|
|
atomic_fetch_and_release(&(t)->flags, ~(f))
|
1999-05-10 23:00:30 +00:00
|
|
|
|
2001-06-04 19:33:39 +00:00
|
|
|
#define TASK_MANAGER_MAGIC ISC_MAGIC('T', 'S', 'K', 'M')
|
|
|
|
#define VALID_MANAGER(m) ISC_MAGIC_VALID(m, TASK_MANAGER_MAGIC)
|
1998-08-19 21:46:15 +00:00
|
|
|
|
2009-09-01 00:22:28 +00:00
|
|
|
typedef ISC_LIST(isc__task_t) isc__tasklist_t;
|
|
|
|
|
2018-10-22 09:37:17 +00:00
|
|
|
struct isc__taskqueue {
|
2018-10-23 08:20:17 +00:00
|
|
|
/* Everything locked by lock */
|
|
|
|
isc_mutex_t lock;
|
2018-10-22 09:37:17 +00:00
|
|
|
isc__tasklist_t ready_tasks;
|
|
|
|
isc__tasklist_t ready_priority_tasks;
|
|
|
|
isc_condition_t work_available;
|
|
|
|
isc_thread_t thread;
|
|
|
|
unsigned int threadid;
|
|
|
|
isc__taskmgr_t *manager;
|
|
|
|
};
|
|
|
|
|
2009-09-01 00:22:28 +00:00
|
|
|
struct isc__taskmgr {
|
1998-08-19 21:46:15 +00:00
|
|
|
/* Not locked. */
|
2009-09-01 00:22:28 +00:00
|
|
|
isc_taskmgr_t common;
|
1998-12-18 19:14:37 +00:00
|
|
|
isc_mem_t * mctx;
|
1998-10-22 01:33:20 +00:00
|
|
|
isc_mutex_t lock;
|
2018-10-23 08:47:44 +00:00
|
|
|
isc_mutex_t halt_lock;
|
2018-10-22 09:37:17 +00:00
|
|
|
isc_condition_t halt_cond;
|
2004-10-15 00:45:34 +00:00
|
|
|
unsigned int workers;
|
2018-10-11 13:39:04 +00:00
|
|
|
atomic_uint_fast32_t tasks_running;
|
|
|
|
atomic_uint_fast32_t tasks_ready;
|
|
|
|
atomic_uint_fast32_t curq;
|
2019-01-18 11:47:43 +01:00
|
|
|
atomic_uint_fast32_t tasks_count;
|
2018-10-22 09:37:17 +00:00
|
|
|
isc__taskqueue_t *queues;
|
2019-11-05 15:23:33 -08:00
|
|
|
isc_nm_t *nm;
|
2018-10-11 13:39:04 +00:00
|
|
|
|
1998-08-19 21:46:15 +00:00
|
|
|
/* Locked by task manager lock. */
|
|
|
|
unsigned int default_quantum;
|
2009-09-01 00:22:28 +00:00
|
|
|
LIST(isc__task_t) tasks;
|
2019-01-18 11:47:43 +01:00
|
|
|
atomic_uint_fast32_t mode;
|
|
|
|
atomic_bool pause_req;
|
|
|
|
atomic_bool exclusive_req;
|
|
|
|
atomic_bool exiting;
|
2015-02-27 12:34:43 +11:00
|
|
|
|
2018-10-23 08:47:44 +00:00
|
|
|
/* Locked by halt_lock */
|
2018-10-22 09:37:17 +00:00
|
|
|
unsigned int halted;
|
|
|
|
|
2015-02-27 12:34:43 +11:00
|
|
|
/*
|
|
|
|
* Multiple threads can read/write 'excl' at the same time, so we need
|
|
|
|
* to protect the access. We can't use 'lock' since isc_task_detach()
|
|
|
|
* will try to acquire it.
|
|
|
|
*/
|
|
|
|
isc_mutex_t excl_lock;
|
2012-07-19 23:00:21 +10:00
|
|
|
isc__task_t *excl;
|
1998-08-19 21:46:15 +00:00
|
|
|
};
|
1998-08-18 00:29:57 +00:00
|
|
|
|
2018-08-06 13:00:55 +02:00
|
|
|
void
|
|
|
|
isc__taskmgr_pause(isc_taskmgr_t *manager0);
|
|
|
|
void
|
|
|
|
isc__taskmgr_resume(isc_taskmgr_t *manager0);
|
|
|
|
|
|
|
|
|
2018-11-21 09:50:50 +00:00
|
|
|
#define DEFAULT_DEFAULT_QUANTUM 25
|
2019-01-18 11:47:43 +01:00
|
|
|
#define FINISHED(m) (atomic_load_relaxed(&((m)->exiting)) == true && \
|
|
|
|
atomic_load(&(m)->tasks_count) == 0)
|
1998-08-18 00:29:57 +00:00
|
|
|
|
2009-09-01 00:22:28 +00:00
|
|
|
/*%
|
2013-04-10 13:49:57 -07:00
|
|
|
* The following are intended for internal use (indicated by "isc__"
|
|
|
|
* prefix) but are not declared as static, allowing direct access from
|
|
|
|
* unit tests etc.
|
2009-09-01 00:22:28 +00:00
|
|
|
*/
|
|
|
|
|
2018-04-17 08:29:14 -07:00
|
|
|
bool
|
2013-04-10 13:49:57 -07:00
|
|
|
isc_task_purgeevent(isc_task_t *task0, isc_event_t *event);
|
|
|
|
void
|
|
|
|
isc_taskmgr_setexcltask(isc_taskmgr_t *mgr0, isc_task_t *task0);
|
|
|
|
isc_result_t
|
|
|
|
isc_taskmgr_excltask(isc_taskmgr_t *mgr0, isc_task_t **taskp);
|
2018-04-17 08:29:14 -07:00
|
|
|
static inline bool
|
2018-10-11 13:39:04 +00:00
|
|
|
empty_readyq(isc__taskmgr_t *manager, int c);
|
2011-09-02 21:15:39 +00:00
|
|
|
|
2011-09-03 16:27:51 +00:00
|
|
|
static inline isc__task_t *
|
2018-10-11 13:39:04 +00:00
|
|
|
pop_readyq(isc__taskmgr_t *manager, int c);
|
2011-09-02 21:15:39 +00:00
|
|
|
|
2011-09-03 13:54:06 +00:00
|
|
|
static inline void
|
2018-10-11 13:39:04 +00:00
|
|
|
push_readyq(isc__taskmgr_t *manager, isc__task_t *task, int c);
|
2009-09-01 00:22:28 +00:00
|
|
|
|
2018-10-25 15:01:25 +00:00
|
|
|
static inline void
|
|
|
|
wake_all_queues(isc__taskmgr_t *manager);
|
|
|
|
|
1998-08-17 22:05:58 +00:00
|
|
|
/***
|
|
|
|
*** Tasks.
|
|
|
|
***/
|
|
|
|
|
2018-10-25 15:01:25 +00:00
|
|
|
static inline void
|
|
|
|
wake_all_queues(isc__taskmgr_t *manager) {
|
2018-11-08 19:34:51 -08:00
|
|
|
for (unsigned int i = 0; i < manager->workers; i++) {
|
2018-10-25 15:01:25 +00:00
|
|
|
LOCK(&manager->queues[i].lock);
|
|
|
|
BROADCAST(&manager->queues[i].work_available);
|
|
|
|
UNLOCK(&manager->queues[i].lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1998-08-17 22:05:58 +00:00
|
|
|
static void
|
2009-09-01 00:22:28 +00:00
|
|
|
task_finished(isc__task_t *task) {
|
|
|
|
isc__taskmgr_t *manager = task->manager;
|
1998-08-17 22:05:58 +00:00
|
|
|
REQUIRE(EMPTY(task->events));
|
2012-05-14 10:06:05 -07:00
|
|
|
REQUIRE(task->nevents == 0);
|
1998-12-16 02:02:10 +00:00
|
|
|
REQUIRE(EMPTY(task->on_shutdown));
|
1999-07-10 01:00:05 +00:00
|
|
|
REQUIRE(task->state == task_state_done);
|
|
|
|
|
|
|
|
XTRACE("task_finished");
|
1998-08-17 22:05:58 +00:00
|
|
|
|
2019-07-23 08:27:30 -04:00
|
|
|
isc_refcount_destroy(&task->references);
|
|
|
|
|
1998-08-17 22:05:58 +00:00
|
|
|
LOCK(&manager->lock);
|
|
|
|
UNLINK(manager->tasks, task, link);
|
2019-01-18 11:47:43 +01:00
|
|
|
atomic_fetch_sub(&manager->tasks_count, 1);
|
2018-10-11 13:39:04 +00:00
|
|
|
UNLOCK(&manager->lock);
|
1998-08-17 22:05:58 +00:00
|
|
|
if (FINISHED(manager)) {
|
|
|
|
/*
|
|
|
|
* All tasks have completed and the
|
|
|
|
* task manager is exiting. Wake up
|
|
|
|
* any idle worker threads so they
|
|
|
|
* can exit.
|
|
|
|
*/
|
2018-10-25 15:01:25 +00:00
|
|
|
wake_all_queues(manager);
|
1998-08-17 22:05:58 +00:00
|
|
|
}
|
2018-11-19 10:31:09 +00:00
|
|
|
isc_mutex_destroy(&task->lock);
|
2009-09-01 00:22:28 +00:00
|
|
|
task->common.impmagic = 0;
|
|
|
|
task->common.magic = 0;
|
2001-11-27 01:56:32 +00:00
|
|
|
isc_mem_put(manager->mctx, task, sizeof(*task));
|
1998-08-17 22:05:58 +00:00
|
|
|
}
|
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
isc_result_t
|
2018-08-06 13:00:55 +02:00
|
|
|
isc_task_create(isc_taskmgr_t *manager0, unsigned int quantum,
|
2018-11-21 09:50:50 +00:00
|
|
|
isc_task_t **taskp)
|
|
|
|
{
|
|
|
|
return (isc_task_create_bound(manager0, quantum, taskp, -1));
|
|
|
|
}
|
|
|
|
|
|
|
|
isc_result_t
|
|
|
|
isc_task_create_bound(isc_taskmgr_t *manager0, unsigned int quantum,
|
|
|
|
isc_task_t **taskp, int threadid)
|
1998-08-17 23:15:50 +00:00
|
|
|
{
|
2009-09-02 23:48:03 +00:00
|
|
|
isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
|
2009-09-01 00:22:28 +00:00
|
|
|
isc__task_t *task;
|
2018-04-17 08:29:14 -07:00
|
|
|
bool exiting;
|
1998-08-17 22:05:58 +00:00
|
|
|
|
|
|
|
REQUIRE(VALID_MANAGER(manager));
|
|
|
|
REQUIRE(taskp != NULL && *taskp == NULL);
|
|
|
|
|
2001-11-27 01:56:32 +00:00
|
|
|
task = isc_mem_get(manager->mctx, sizeof(*task));
|
2000-12-06 00:30:32 +00:00
|
|
|
XTRACE("isc_task_create");
|
1998-08-17 22:05:58 +00:00
|
|
|
task->manager = manager;
|
2018-11-16 15:33:22 +01:00
|
|
|
|
2018-11-21 09:50:50 +00:00
|
|
|
if (threadid == -1) {
|
|
|
|
/*
|
|
|
|
* Task is not pinned to a queue, it's threadid will be
|
|
|
|
* choosen when first task will be sent to it - either
|
|
|
|
* randomly or specified by isc_task_sendto.
|
|
|
|
*/
|
|
|
|
task->bound = false;
|
|
|
|
task->threadid = 0;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Task is pinned to a queue, it'll always be run
|
|
|
|
* by a specific thread.
|
|
|
|
*/
|
|
|
|
task->bound = true;
|
|
|
|
task->threadid = threadid % manager->workers;
|
|
|
|
}
|
|
|
|
|
|
|
|
isc_mutex_init(&task->lock);
|
1998-08-17 22:05:58 +00:00
|
|
|
task->state = task_state_idle;
|
2019-05-20 17:00:22 +02:00
|
|
|
|
2019-05-16 18:51:39 +02:00
|
|
|
isc_refcount_init(&task->references, 1);
|
1998-08-17 22:05:58 +00:00
|
|
|
INIT_LIST(task->events);
|
1998-12-16 02:02:10 +00:00
|
|
|
INIT_LIST(task->on_shutdown);
|
2012-05-14 10:06:05 -07:00
|
|
|
task->nevents = 0;
|
2018-11-21 09:50:50 +00:00
|
|
|
task->quantum = (quantum > 0) ? quantum : manager->default_quantum;
|
2019-12-12 15:22:10 +01:00
|
|
|
atomic_init(&task->flags, 0);
|
2003-10-25 00:09:14 +00:00
|
|
|
task->now = 0;
|
2015-10-02 12:32:42 -07:00
|
|
|
isc_time_settoepoch(&task->tnow);
|
2001-11-27 01:56:32 +00:00
|
|
|
memset(task->name, 0, sizeof(task->name));
|
2000-01-25 19:25:20 +00:00
|
|
|
task->tag = NULL;
|
1998-08-17 22:05:58 +00:00
|
|
|
INIT_LINK(task, link);
|
|
|
|
INIT_LINK(task, ready_link);
|
2011-09-02 21:15:39 +00:00
|
|
|
INIT_LINK(task, ready_priority_link);
|
1998-08-17 22:05:58 +00:00
|
|
|
|
2018-04-17 08:29:14 -07:00
|
|
|
exiting = false;
|
1998-08-17 22:05:58 +00:00
|
|
|
LOCK(&manager->lock);
|
2019-01-18 11:47:43 +01:00
|
|
|
if (!atomic_load_relaxed(&manager->exiting)) {
|
2000-01-17 23:41:33 +00:00
|
|
|
APPEND(manager->tasks, task, link);
|
2019-01-18 11:47:43 +01:00
|
|
|
atomic_fetch_add(&manager->tasks_count, 1);
|
2018-11-21 09:50:50 +00:00
|
|
|
} else {
|
2018-04-17 08:29:14 -07:00
|
|
|
exiting = true;
|
2018-11-21 09:50:50 +00:00
|
|
|
}
|
1998-08-17 22:05:58 +00:00
|
|
|
UNLOCK(&manager->lock);
|
|
|
|
|
2000-01-17 23:41:33 +00:00
|
|
|
if (exiting) {
|
2018-11-19 10:31:09 +00:00
|
|
|
isc_mutex_destroy(&task->lock);
|
2001-11-27 01:56:32 +00:00
|
|
|
isc_mem_put(manager->mctx, task, sizeof(*task));
|
2000-01-17 23:41:33 +00:00
|
|
|
return (ISC_R_SHUTTINGDOWN);
|
|
|
|
}
|
|
|
|
|
2009-09-01 00:22:28 +00:00
|
|
|
task->common.magic = ISCAPI_TASK_MAGIC;
|
|
|
|
task->common.impmagic = TASK_MAGIC;
|
|
|
|
*taskp = (isc_task_t *)task;
|
1998-08-17 22:05:58 +00:00
|
|
|
|
1998-10-22 01:33:20 +00:00
|
|
|
return (ISC_R_SUCCESS);
|
1998-08-17 22:05:58 +00:00
|
|
|
}
|
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
void
|
2018-08-06 13:00:55 +02:00
|
|
|
isc_task_attach(isc_task_t *source0, isc_task_t **targetp) {
|
2009-09-01 00:22:28 +00:00
|
|
|
isc__task_t *source = (isc__task_t *)source0;
|
1998-08-17 22:05:58 +00:00
|
|
|
|
1999-05-18 19:23:13 +00:00
|
|
|
/*
|
|
|
|
* Attach *targetp to source.
|
|
|
|
*/
|
1998-08-17 22:05:58 +00:00
|
|
|
|
1999-05-18 19:23:13 +00:00
|
|
|
REQUIRE(VALID_TASK(source));
|
|
|
|
REQUIRE(targetp != NULL && *targetp == NULL);
|
1998-08-17 22:05:58 +00:00
|
|
|
|
2000-12-06 00:30:32 +00:00
|
|
|
XTTRACE(source, "isc_task_attach");
|
1999-07-10 01:00:05 +00:00
|
|
|
|
2019-05-16 18:51:39 +02:00
|
|
|
isc_refcount_increment(&source->references);
|
1999-05-18 19:23:13 +00:00
|
|
|
|
2009-09-01 00:22:28 +00:00
|
|
|
*targetp = (isc_task_t *)source;
|
1998-08-17 22:05:58 +00:00
|
|
|
}
|
|
|
|
|
2018-04-17 08:29:14 -07:00
|
|
|
static inline bool
|
2009-09-01 00:22:28 +00:00
|
|
|
task_shutdown(isc__task_t *task) {
|
2018-04-17 08:29:14 -07:00
|
|
|
bool was_idle = false;
|
1999-07-10 01:00:05 +00:00
|
|
|
isc_event_t *event, *prev;
|
2000-08-01 01:33:37 +00:00
|
|
|
|
1999-07-10 01:00:05 +00:00
|
|
|
/*
|
|
|
|
* Caller must be holding the task's lock.
|
|
|
|
*/
|
|
|
|
|
|
|
|
XTRACE("task_shutdown");
|
|
|
|
|
|
|
|
if (! TASK_SHUTTINGDOWN(task)) {
|
2018-11-23 21:35:01 +01:00
|
|
|
XTRACE("shutting down");
|
2019-12-12 15:22:10 +01:00
|
|
|
TASK_FLAG_SET(task, TASK_F_SHUTTINGDOWN);
|
1999-07-10 01:00:05 +00:00
|
|
|
if (task->state == task_state_idle) {
|
|
|
|
INSIST(EMPTY(task->events));
|
|
|
|
task->state = task_state_ready;
|
2018-04-17 08:29:14 -07:00
|
|
|
was_idle = true;
|
1999-07-10 01:00:05 +00:00
|
|
|
}
|
|
|
|
INSIST(task->state == task_state_ready ||
|
2019-11-05 15:23:33 -08:00
|
|
|
task->state == task_state_paused ||
|
Fix a race in taskmgr between worker and task pausing/unpausing.
To reproduce the race - create a task, send two events to it, first one
must take some time. Then, from the outside, pause(), unpause() and detach()
the task.
When the long-running event is processed by the task it is in
task_state_running state. When we called pause() the state changed to
task_state_paused, on unpause we checked that there are events in the task
queue, changed the state to task_state_ready and enqueued the task on the
workers readyq. We then detach the task.
The dispatch() is done with processing the event, it processes the second
event in the queue, and then shuts down the task and frees it (as it's not
referenced anymore). Dispatcher then takes the, already freed, task from
the queue where it was wrongly put, causing an use-after free and,
subsequently, either an assertion failure or a segmentation fault.
The probability of this happening is very slim, yet it might happen under a
very high load, more probably on a recursive resolver than on an
authoritative.
The fix introduces a new 'task_state_pausing' state - to which tasks
are moved if they're being paused while still running. They are moved
to task_state_paused state when dispatcher is done with them, and
if we unpause a task in paused state it's moved back to task_state_running
and not requeued.
2020-01-20 11:39:14 +01:00
|
|
|
task->state == task_state_pausing ||
|
1999-07-10 01:00:05 +00:00
|
|
|
task->state == task_state_running);
|
2011-09-02 21:15:39 +00:00
|
|
|
|
1999-07-10 01:00:05 +00:00
|
|
|
/*
|
|
|
|
* Note that we post shutdown events LIFO.
|
|
|
|
*/
|
|
|
|
for (event = TAIL(task->on_shutdown);
|
|
|
|
event != NULL;
|
|
|
|
event = prev) {
|
2000-04-17 19:22:44 +00:00
|
|
|
prev = PREV(event, ev_link);
|
|
|
|
DEQUEUE(task->on_shutdown, event, ev_link);
|
|
|
|
ENQUEUE(task->events, event, ev_link);
|
2012-05-14 10:06:05 -07:00
|
|
|
task->nevents++;
|
1999-07-10 01:00:05 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (was_idle);
|
|
|
|
}
|
|
|
|
|
2011-09-02 21:15:39 +00:00
|
|
|
/*
|
|
|
|
* Moves a task onto the appropriate run queue.
|
|
|
|
*
|
|
|
|
* Caller must NOT hold manager lock.
|
|
|
|
*/
|
1999-07-10 01:00:05 +00:00
|
|
|
static inline void
|
2009-09-01 00:22:28 +00:00
|
|
|
task_ready(isc__task_t *task) {
|
|
|
|
isc__taskmgr_t *manager = task->manager;
|
2018-08-06 13:00:55 +02:00
|
|
|
bool has_privilege = isc_task_privilege((isc_task_t *) task);
|
1999-07-10 01:00:05 +00:00
|
|
|
|
|
|
|
REQUIRE(VALID_MANAGER(manager));
|
|
|
|
REQUIRE(task->state == task_state_ready);
|
|
|
|
|
|
|
|
XTRACE("task_ready");
|
2018-10-22 09:37:17 +00:00
|
|
|
LOCK(&manager->queues[task->threadid].lock);
|
|
|
|
push_readyq(manager, task, task->threadid);
|
2019-01-18 11:47:43 +01:00
|
|
|
if (atomic_load(&manager->mode) == isc_taskmgrmode_normal ||
|
2019-11-05 15:23:33 -08:00
|
|
|
has_privilege)
|
|
|
|
{
|
2018-10-22 09:37:17 +00:00
|
|
|
SIGNAL(&manager->queues[task->threadid].work_available);
|
2018-11-08 19:34:51 -08:00
|
|
|
}
|
2018-10-22 09:37:17 +00:00
|
|
|
UNLOCK(&manager->queues[task->threadid].lock);
|
1999-07-10 01:00:05 +00:00
|
|
|
}
|
|
|
|
|
2018-04-17 08:29:14 -07:00
|
|
|
static inline bool
|
2009-09-01 00:22:28 +00:00
|
|
|
task_detach(isc__task_t *task) {
|
1999-07-10 01:00:05 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Caller must be holding the task lock.
|
|
|
|
*/
|
|
|
|
|
|
|
|
XTRACE("detach");
|
|
|
|
|
2019-05-16 18:51:39 +02:00
|
|
|
if (isc_refcount_decrement(&task->references) == 1 &&
|
2019-05-20 17:00:22 +02:00
|
|
|
task->state == task_state_idle)
|
|
|
|
{
|
1999-09-23 21:30:26 +00:00
|
|
|
INSIST(EMPTY(task->events));
|
|
|
|
/*
|
|
|
|
* There are no references to this task, and no
|
|
|
|
* pending events. We could try to optimize and
|
|
|
|
* either initiate shutdown or clean up the task,
|
|
|
|
* depending on its state, but it's easier to just
|
2000-08-29 22:30:14 +00:00
|
|
|
* make the task ready and allow run() or the event
|
|
|
|
* loop to deal with shutting down and termination.
|
1999-09-23 21:30:26 +00:00
|
|
|
*/
|
|
|
|
task->state = task_state_ready;
|
2018-04-17 08:29:14 -07:00
|
|
|
return (true);
|
1999-07-10 01:00:05 +00:00
|
|
|
}
|
|
|
|
|
2018-04-17 08:29:14 -07:00
|
|
|
return (false);
|
1999-07-10 01:00:05 +00:00
|
|
|
}
|
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
void
|
2018-08-06 13:00:55 +02:00
|
|
|
isc_task_detach(isc_task_t **taskp) {
|
2009-09-01 00:22:28 +00:00
|
|
|
isc__task_t *task;
|
2018-04-17 08:29:14 -07:00
|
|
|
bool was_idle;
|
1998-08-17 22:05:58 +00:00
|
|
|
|
1999-05-18 19:23:13 +00:00
|
|
|
/*
|
|
|
|
* Detach *taskp from its task.
|
|
|
|
*/
|
|
|
|
|
1998-08-17 22:05:58 +00:00
|
|
|
REQUIRE(taskp != NULL);
|
2009-09-01 00:22:28 +00:00
|
|
|
task = (isc__task_t *)*taskp;
|
1998-08-17 22:05:58 +00:00
|
|
|
REQUIRE(VALID_TASK(task));
|
|
|
|
|
1999-05-10 23:00:30 +00:00
|
|
|
XTRACE("isc_task_detach");
|
|
|
|
|
1998-08-17 22:05:58 +00:00
|
|
|
LOCK(&task->lock);
|
1999-09-23 21:30:26 +00:00
|
|
|
was_idle = task_detach(task);
|
1998-08-17 22:05:58 +00:00
|
|
|
UNLOCK(&task->lock);
|
|
|
|
|
1999-09-23 21:30:26 +00:00
|
|
|
if (was_idle)
|
1999-07-10 01:00:05 +00:00
|
|
|
task_ready(task);
|
1998-08-17 22:05:58 +00:00
|
|
|
|
|
|
|
*taskp = NULL;
|
|
|
|
}
|
|
|
|
|
2018-04-17 08:29:14 -07:00
|
|
|
static inline bool
|
2018-10-11 13:39:04 +00:00
|
|
|
task_send(isc__task_t *task, isc_event_t **eventp, int c) {
|
2018-04-17 08:29:14 -07:00
|
|
|
bool was_idle = false;
|
1998-12-13 23:45:21 +00:00
|
|
|
isc_event_t *event;
|
2000-08-01 01:33:37 +00:00
|
|
|
|
1999-05-18 19:23:13 +00:00
|
|
|
/*
|
1999-07-10 01:00:05 +00:00
|
|
|
* Caller must be holding the task lock.
|
1999-05-18 19:23:13 +00:00
|
|
|
*/
|
|
|
|
|
1998-08-18 19:28:30 +00:00
|
|
|
REQUIRE(eventp != NULL);
|
|
|
|
event = *eventp;
|
2020-02-08 04:37:54 -08:00
|
|
|
*eventp = NULL;
|
1998-08-17 22:05:58 +00:00
|
|
|
REQUIRE(event != NULL);
|
2000-04-17 19:22:44 +00:00
|
|
|
REQUIRE(event->ev_type > 0);
|
1999-09-23 21:30:26 +00:00
|
|
|
REQUIRE(task->state != task_state_done);
|
2017-01-12 14:12:05 +11:00
|
|
|
REQUIRE(!ISC_LINK_LINKED(event, ev_ratelink));
|
1998-08-17 22:05:58 +00:00
|
|
|
|
1999-07-10 01:00:05 +00:00
|
|
|
XTRACE("task_send");
|
|
|
|
|
1999-09-23 21:30:26 +00:00
|
|
|
if (task->state == task_state_idle) {
|
2018-04-17 08:29:14 -07:00
|
|
|
was_idle = true;
|
2018-10-11 13:39:04 +00:00
|
|
|
task->threadid = c;
|
1999-09-23 21:30:26 +00:00
|
|
|
INSIST(EMPTY(task->events));
|
|
|
|
task->state = task_state_ready;
|
1998-12-16 02:02:10 +00:00
|
|
|
}
|
1999-09-23 21:30:26 +00:00
|
|
|
INSIST(task->state == task_state_ready ||
|
2019-11-05 15:23:33 -08:00
|
|
|
task->state == task_state_running ||
|
Fix a race in taskmgr between worker and task pausing/unpausing.
To reproduce the race - create a task, send two events to it, first one
must take some time. Then, from the outside, pause(), unpause() and detach()
the task.
When the long-running event is processed by the task it is in
task_state_running state. When we called pause() the state changed to
task_state_paused, on unpause we checked that there are events in the task
queue, changed the state to task_state_ready and enqueued the task on the
workers readyq. We then detach the task.
The dispatch() is done with processing the event, it processes the second
event in the queue, and then shuts down the task and frees it (as it's not
referenced anymore). Dispatcher then takes the, already freed, task from
the queue where it was wrongly put, causing an use-after free and,
subsequently, either an assertion failure or a segmentation fault.
The probability of this happening is very slim, yet it might happen under a
very high load, more probably on a recursive resolver than on an
authoritative.
The fix introduces a new 'task_state_pausing' state - to which tasks
are moved if they're being paused while still running. They are moved
to task_state_paused state when dispatcher is done with them, and
if we unpause a task in paused state it's moved back to task_state_running
and not requeued.
2020-01-20 11:39:14 +01:00
|
|
|
task->state == task_state_paused ||
|
|
|
|
task->state == task_state_pausing);
|
2000-04-17 19:22:44 +00:00
|
|
|
ENQUEUE(task->events, event, ev_link);
|
2012-05-14 10:06:05 -07:00
|
|
|
task->nevents++;
|
1999-07-10 01:00:05 +00:00
|
|
|
|
1999-09-23 21:30:26 +00:00
|
|
|
return (was_idle);
|
2000-08-01 01:33:37 +00:00
|
|
|
}
|
1999-07-10 01:00:05 +00:00
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
void
|
2018-08-06 13:00:55 +02:00
|
|
|
isc_task_send(isc_task_t *task0, isc_event_t **eventp) {
|
2018-10-11 13:39:04 +00:00
|
|
|
isc_task_sendto(task0, eventp, -1);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc_task_sendanddetach(isc_task_t **taskp, isc_event_t **eventp) {
|
|
|
|
isc_task_sendtoanddetach(taskp, eventp, -1);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc_task_sendto(isc_task_t *task0, isc_event_t **eventp, int c) {
|
2009-09-01 00:22:28 +00:00
|
|
|
isc__task_t *task = (isc__task_t *)task0;
|
2018-04-17 08:29:14 -07:00
|
|
|
bool was_idle;
|
1999-07-10 01:00:05 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Send '*event' to 'task'.
|
|
|
|
*/
|
|
|
|
|
|
|
|
REQUIRE(VALID_TASK(task));
|
2018-10-25 06:31:53 +00:00
|
|
|
XTRACE("isc_task_send");
|
1999-07-10 01:00:05 +00:00
|
|
|
|
2019-01-18 11:47:43 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We're trying hard to hold locks for as short a time as possible.
|
|
|
|
* We're also trying to hold as few locks as possible. This is why
|
|
|
|
* some processing is deferred until after the lock is released.
|
|
|
|
*/
|
|
|
|
LOCK(&task->lock);
|
2018-11-21 09:50:50 +00:00
|
|
|
/* If task is bound ignore provided cpu. */
|
|
|
|
if (task->bound) {
|
|
|
|
c = task->threadid;
|
|
|
|
} else if (c < 0) {
|
2018-10-22 09:37:17 +00:00
|
|
|
c = atomic_fetch_add_explicit(&task->manager->curq, 1,
|
2018-10-25 06:31:53 +00:00
|
|
|
memory_order_relaxed);
|
2018-10-22 09:37:17 +00:00
|
|
|
}
|
2018-10-25 06:31:53 +00:00
|
|
|
c %= task->manager->workers;
|
2018-10-11 13:39:04 +00:00
|
|
|
was_idle = task_send(task, eventp, c);
|
1998-08-17 22:05:58 +00:00
|
|
|
UNLOCK(&task->lock);
|
|
|
|
|
|
|
|
if (was_idle) {
|
|
|
|
/*
|
|
|
|
* We need to add this task to the ready queue.
|
|
|
|
*
|
1999-07-10 01:00:05 +00:00
|
|
|
* We've waited until now to do it because making a task
|
|
|
|
* ready requires locking the manager. If we tried to do
|
|
|
|
* this while holding the task lock, we could deadlock.
|
1998-08-17 22:05:58 +00:00
|
|
|
*
|
|
|
|
* We've changed the state to ready, so no one else will
|
1999-05-10 23:00:30 +00:00
|
|
|
* be trying to add this task to the ready queue. The
|
|
|
|
* only way to leave the ready state is by executing the
|
|
|
|
* task. It thus doesn't matter if events are added,
|
1999-07-10 01:00:05 +00:00
|
|
|
* removed, or a shutdown is started in the interval
|
1999-05-10 23:00:30 +00:00
|
|
|
* between the time we released the task lock, and the time
|
|
|
|
* we add the task to the ready queue.
|
1998-08-17 22:05:58 +00:00
|
|
|
*/
|
1999-07-10 01:00:05 +00:00
|
|
|
task_ready(task);
|
1998-08-17 22:05:58 +00:00
|
|
|
}
|
1999-07-10 01:00:05 +00:00
|
|
|
}
|
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
void
|
2018-10-11 13:39:04 +00:00
|
|
|
isc_task_sendtoanddetach(isc_task_t **taskp, isc_event_t **eventp, int c) {
|
2018-04-17 08:29:14 -07:00
|
|
|
bool idle1, idle2;
|
2009-09-01 00:22:28 +00:00
|
|
|
isc__task_t *task;
|
1999-07-10 01:00:05 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Send '*event' to '*taskp' and then detach '*taskp' from its
|
|
|
|
* task.
|
|
|
|
*/
|
|
|
|
|
|
|
|
REQUIRE(taskp != NULL);
|
2009-09-01 00:22:28 +00:00
|
|
|
task = (isc__task_t *)*taskp;
|
1999-07-10 01:00:05 +00:00
|
|
|
REQUIRE(VALID_TASK(task));
|
2018-10-25 06:31:53 +00:00
|
|
|
XTRACE("isc_task_sendanddetach");
|
|
|
|
|
2019-01-18 11:47:43 +01:00
|
|
|
LOCK(&task->lock);
|
2018-11-21 09:50:50 +00:00
|
|
|
if (task->bound) {
|
|
|
|
c = task->threadid;
|
|
|
|
} else if (c < 0) {
|
2018-10-22 09:37:17 +00:00
|
|
|
c = atomic_fetch_add_explicit(&task->manager->curq, 1,
|
2018-10-25 06:31:53 +00:00
|
|
|
memory_order_relaxed);
|
2018-10-11 13:39:04 +00:00
|
|
|
}
|
2018-10-25 06:31:53 +00:00
|
|
|
c %= task->manager->workers;
|
2018-10-11 13:39:04 +00:00
|
|
|
idle1 = task_send(task, eventp, c);
|
1999-09-23 21:30:26 +00:00
|
|
|
idle2 = task_detach(task);
|
1999-07-10 01:00:05 +00:00
|
|
|
UNLOCK(&task->lock);
|
|
|
|
|
1999-09-23 21:30:26 +00:00
|
|
|
/*
|
|
|
|
* If idle1, then idle2 shouldn't be true as well since we're holding
|
|
|
|
* the task lock, and thus the task cannot switch from ready back to
|
|
|
|
* idle.
|
|
|
|
*/
|
|
|
|
INSIST(!(idle1 && idle2));
|
1999-07-10 01:00:05 +00:00
|
|
|
|
1999-09-23 21:30:26 +00:00
|
|
|
if (idle1 || idle2)
|
|
|
|
task_ready(task);
|
1999-07-10 01:00:05 +00:00
|
|
|
|
|
|
|
*taskp = NULL;
|
1998-08-17 22:05:58 +00:00
|
|
|
}
|
|
|
|
|
2000-04-17 19:22:44 +00:00
|
|
|
#define PURGE_OK(event) (((event)->ev_attributes & ISC_EVENTATTR_NOPURGE) == 0)
|
1999-07-10 01:00:05 +00:00
|
|
|
|
|
|
|
static unsigned int
|
2009-09-01 00:22:28 +00:00
|
|
|
dequeue_events(isc__task_t *task, void *sender, isc_eventtype_t first,
|
1999-07-10 01:00:05 +00:00
|
|
|
isc_eventtype_t last, void *tag,
|
2018-04-17 08:29:14 -07:00
|
|
|
isc_eventlist_t *events, bool purging)
|
1999-05-10 23:00:30 +00:00
|
|
|
{
|
1998-12-13 23:45:21 +00:00
|
|
|
isc_event_t *event, *next_event;
|
1999-07-10 01:00:05 +00:00
|
|
|
unsigned int count = 0;
|
1999-05-18 19:23:13 +00:00
|
|
|
|
1998-10-13 20:22:22 +00:00
|
|
|
REQUIRE(VALID_TASK(task));
|
1999-05-18 19:23:13 +00:00
|
|
|
REQUIRE(last >= first);
|
1998-10-13 20:22:22 +00:00
|
|
|
|
1999-07-10 01:00:05 +00:00
|
|
|
XTRACE("dequeue_events");
|
1999-06-15 23:18:18 +00:00
|
|
|
|
1998-10-13 20:22:22 +00:00
|
|
|
/*
|
1999-07-10 01:00:05 +00:00
|
|
|
* Events matching 'sender', whose type is >= first and <= last, and
|
|
|
|
* whose tag is 'tag' will be dequeued. If 'purging', matching events
|
|
|
|
* which are marked as unpurgable will not be dequeued.
|
1998-12-16 02:02:10 +00:00
|
|
|
*
|
1999-07-10 01:00:05 +00:00
|
|
|
* sender == NULL means "any sender", and tag == NULL means "any tag".
|
1998-10-13 20:22:22 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
LOCK(&task->lock);
|
1999-07-10 01:00:05 +00:00
|
|
|
|
|
|
|
for (event = HEAD(task->events); event != NULL; event = next_event) {
|
2000-04-17 19:22:44 +00:00
|
|
|
next_event = NEXT(event, ev_link);
|
|
|
|
if (event->ev_type >= first && event->ev_type <= last &&
|
|
|
|
(sender == NULL || event->ev_sender == sender) &&
|
|
|
|
(tag == NULL || event->ev_tag == tag) &&
|
1999-07-10 01:00:05 +00:00
|
|
|
(!purging || PURGE_OK(event))) {
|
2000-04-17 19:22:44 +00:00
|
|
|
DEQUEUE(task->events, event, ev_link);
|
2012-05-14 10:06:05 -07:00
|
|
|
task->nevents--;
|
2000-04-17 19:22:44 +00:00
|
|
|
ENQUEUE(*events, event, ev_link);
|
1999-07-10 01:00:05 +00:00
|
|
|
count++;
|
1998-10-13 20:22:22 +00:00
|
|
|
}
|
|
|
|
}
|
1999-07-10 01:00:05 +00:00
|
|
|
|
1998-10-13 20:22:22 +00:00
|
|
|
UNLOCK(&task->lock);
|
|
|
|
|
1999-07-10 01:00:05 +00:00
|
|
|
return (count);
|
|
|
|
}
|
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
unsigned int
|
2018-08-06 13:00:55 +02:00
|
|
|
isc_task_purgerange(isc_task_t *task0, void *sender, isc_eventtype_t first,
|
2009-09-01 00:22:28 +00:00
|
|
|
isc_eventtype_t last, void *tag)
|
1999-07-10 01:00:05 +00:00
|
|
|
{
|
2009-09-01 00:22:28 +00:00
|
|
|
isc__task_t *task = (isc__task_t *)task0;
|
1999-07-10 01:00:05 +00:00
|
|
|
unsigned int count;
|
|
|
|
isc_eventlist_t events;
|
|
|
|
isc_event_t *event, *next_event;
|
2018-08-06 13:00:55 +02:00
|
|
|
REQUIRE(VALID_TASK(task));
|
1999-07-10 01:00:05 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Purge events from a task's event queue.
|
|
|
|
*/
|
|
|
|
|
|
|
|
XTRACE("isc_task_purgerange");
|
|
|
|
|
|
|
|
ISC_LIST_INIT(events);
|
|
|
|
|
|
|
|
count = dequeue_events(task, sender, first, last, tag, &events,
|
2018-04-17 08:29:14 -07:00
|
|
|
true);
|
1999-07-10 01:00:05 +00:00
|
|
|
|
|
|
|
for (event = HEAD(events); event != NULL; event = next_event) {
|
2000-04-17 19:22:44 +00:00
|
|
|
next_event = NEXT(event, ev_link);
|
2017-11-30 10:31:44 +11:00
|
|
|
ISC_LIST_UNLINK(events, event, ev_link);
|
1998-10-21 02:26:57 +00:00
|
|
|
isc_event_free(&event);
|
1998-10-13 20:22:22 +00:00
|
|
|
}
|
1998-11-06 01:44:44 +00:00
|
|
|
|
1999-07-10 01:00:05 +00:00
|
|
|
/*
|
|
|
|
* Note that purging never changes the state of the task.
|
|
|
|
*/
|
|
|
|
|
|
|
|
return (count);
|
1998-10-13 20:22:22 +00:00
|
|
|
}
|
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
unsigned int
|
2018-08-06 13:00:55 +02:00
|
|
|
isc_task_purge(isc_task_t *task, void *sender, isc_eventtype_t type,
|
2009-09-01 00:22:28 +00:00
|
|
|
void *tag)
|
1999-06-15 23:18:18 +00:00
|
|
|
{
|
1999-05-18 19:23:13 +00:00
|
|
|
/*
|
|
|
|
* Purge events from a task's event queue.
|
|
|
|
*/
|
2018-08-06 13:00:55 +02:00
|
|
|
REQUIRE(VALID_TASK(task));
|
1999-05-18 19:23:13 +00:00
|
|
|
|
1999-07-10 01:00:05 +00:00
|
|
|
XTRACE("isc_task_purge");
|
|
|
|
|
2018-08-06 13:00:55 +02:00
|
|
|
return (isc_task_purgerange(task, sender, type, type, tag));
|
1999-05-18 19:23:13 +00:00
|
|
|
}
|
|
|
|
|
2018-04-17 08:29:14 -07:00
|
|
|
bool
|
2013-04-10 13:49:57 -07:00
|
|
|
isc_task_purgeevent(isc_task_t *task0, isc_event_t *event) {
|
2009-09-01 00:22:28 +00:00
|
|
|
isc__task_t *task = (isc__task_t *)task0;
|
1999-06-12 01:10:32 +00:00
|
|
|
isc_event_t *curr_event, *next_event;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Purge 'event' from a task's event queue.
|
1999-07-10 01:00:05 +00:00
|
|
|
*
|
|
|
|
* XXXRTH: WARNING: This method may be removed before beta.
|
1999-06-12 01:10:32 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
REQUIRE(VALID_TASK(task));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If 'event' is on the task's event queue, it will be purged,
|
|
|
|
* unless it is marked as unpurgeable. 'event' does not have to be
|
|
|
|
* on the task's event queue; in fact, it can even be an invalid
|
|
|
|
* pointer. Purging only occurs if the event is actually on the task's
|
|
|
|
* event queue.
|
|
|
|
*
|
|
|
|
* Purging never changes the state of the task.
|
|
|
|
*/
|
|
|
|
|
|
|
|
LOCK(&task->lock);
|
|
|
|
for (curr_event = HEAD(task->events);
|
|
|
|
curr_event != NULL;
|
|
|
|
curr_event = next_event) {
|
2000-04-17 19:22:44 +00:00
|
|
|
next_event = NEXT(curr_event, ev_link);
|
1999-07-10 01:00:05 +00:00
|
|
|
if (curr_event == event && PURGE_OK(event)) {
|
2000-04-17 19:22:44 +00:00
|
|
|
DEQUEUE(task->events, curr_event, ev_link);
|
2012-05-14 10:06:05 -07:00
|
|
|
task->nevents--;
|
1999-06-12 01:10:32 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
UNLOCK(&task->lock);
|
|
|
|
|
|
|
|
if (curr_event == NULL)
|
2018-04-17 08:29:14 -07:00
|
|
|
return (false);
|
1999-06-12 01:10:32 +00:00
|
|
|
|
|
|
|
isc_event_free(&curr_event);
|
|
|
|
|
2018-04-17 08:29:14 -07:00
|
|
|
return (true);
|
1999-06-12 01:10:32 +00:00
|
|
|
}
|
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
unsigned int
|
2018-08-06 13:00:55 +02:00
|
|
|
isc_task_unsendrange(isc_task_t *task, void *sender, isc_eventtype_t first,
|
2009-09-01 00:22:28 +00:00
|
|
|
isc_eventtype_t last, void *tag,
|
|
|
|
isc_eventlist_t *events)
|
1999-07-10 01:00:05 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Remove events from a task's event queue.
|
|
|
|
*/
|
2018-08-06 13:00:55 +02:00
|
|
|
REQUIRE(VALID_TASK(task));
|
1999-07-10 01:00:05 +00:00
|
|
|
|
|
|
|
XTRACE("isc_task_unsendrange");
|
|
|
|
|
2009-09-01 00:22:28 +00:00
|
|
|
return (dequeue_events((isc__task_t *)task, sender, first,
|
2018-04-17 08:29:14 -07:00
|
|
|
last, tag, events, false));
|
1999-07-10 01:00:05 +00:00
|
|
|
}
|
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
unsigned int
|
2018-08-06 13:00:55 +02:00
|
|
|
isc_task_unsend(isc_task_t *task, void *sender, isc_eventtype_t type,
|
2009-09-01 00:22:28 +00:00
|
|
|
void *tag, isc_eventlist_t *events)
|
1999-07-10 01:00:05 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Remove events from a task's event queue.
|
|
|
|
*/
|
|
|
|
|
|
|
|
XTRACE("isc_task_unsend");
|
|
|
|
|
2009-09-01 00:22:28 +00:00
|
|
|
return (dequeue_events((isc__task_t *)task, sender, type,
|
2018-04-17 08:29:14 -07:00
|
|
|
type, tag, events, false));
|
1999-07-10 01:00:05 +00:00
|
|
|
}
|
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
isc_result_t
|
2018-08-06 13:00:55 +02:00
|
|
|
isc_task_onshutdown(isc_task_t *task0, isc_taskaction_t action,
|
2014-04-24 18:59:01 +10:00
|
|
|
void *arg)
|
2000-06-01 17:20:56 +00:00
|
|
|
{
|
2009-09-01 00:22:28 +00:00
|
|
|
isc__task_t *task = (isc__task_t *)task0;
|
2018-04-17 08:29:14 -07:00
|
|
|
bool disallowed = false;
|
1998-12-16 02:02:10 +00:00
|
|
|
isc_result_t result = ISC_R_SUCCESS;
|
|
|
|
isc_event_t *event;
|
|
|
|
|
1999-05-18 19:23:13 +00:00
|
|
|
/*
|
|
|
|
* Send a shutdown event with action 'action' and argument 'arg' when
|
|
|
|
* 'task' is shutdown.
|
|
|
|
*/
|
|
|
|
|
1998-12-16 02:02:10 +00:00
|
|
|
REQUIRE(VALID_TASK(task));
|
1999-05-18 19:23:13 +00:00
|
|
|
REQUIRE(action != NULL);
|
1998-12-16 02:02:10 +00:00
|
|
|
|
2000-04-12 01:34:16 +00:00
|
|
|
event = isc_event_allocate(task->manager->mctx,
|
1999-05-10 23:00:30 +00:00
|
|
|
NULL,
|
|
|
|
ISC_TASKEVENT_SHUTDOWN,
|
|
|
|
action,
|
|
|
|
arg,
|
2001-11-27 01:56:32 +00:00
|
|
|
sizeof(*event));
|
1998-12-16 02:02:10 +00:00
|
|
|
|
1999-09-23 21:30:26 +00:00
|
|
|
if (TASK_SHUTTINGDOWN(task)) {
|
2018-04-17 08:29:14 -07:00
|
|
|
disallowed = true;
|
1999-07-14 22:15:29 +00:00
|
|
|
result = ISC_R_SHUTTINGDOWN;
|
2019-12-12 15:22:10 +01:00
|
|
|
} else {
|
|
|
|
LOCK(&task->lock);
|
2000-04-17 19:22:44 +00:00
|
|
|
ENQUEUE(task->on_shutdown, event, ev_link);
|
2019-12-12 15:22:10 +01:00
|
|
|
UNLOCK(&task->lock);
|
|
|
|
}
|
1998-12-16 02:02:10 +00:00
|
|
|
|
|
|
|
if (disallowed)
|
2001-11-27 01:56:32 +00:00
|
|
|
isc_mem_put(task->manager->mctx, event, sizeof(*event));
|
1998-12-16 02:02:10 +00:00
|
|
|
|
|
|
|
return (result);
|
|
|
|
}
|
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
void
|
2018-08-06 13:00:55 +02:00
|
|
|
isc_task_shutdown(isc_task_t *task0) {
|
2009-09-01 00:22:28 +00:00
|
|
|
isc__task_t *task = (isc__task_t *)task0;
|
2018-04-17 08:29:14 -07:00
|
|
|
bool was_idle;
|
1998-08-17 22:05:58 +00:00
|
|
|
|
1999-05-18 19:23:13 +00:00
|
|
|
/*
|
|
|
|
* Shutdown 'task'.
|
|
|
|
*/
|
|
|
|
|
1998-08-17 22:05:58 +00:00
|
|
|
REQUIRE(VALID_TASK(task));
|
|
|
|
|
|
|
|
LOCK(&task->lock);
|
1999-07-10 01:00:05 +00:00
|
|
|
was_idle = task_shutdown(task);
|
1998-08-17 22:05:58 +00:00
|
|
|
UNLOCK(&task->lock);
|
|
|
|
|
1999-07-10 01:00:05 +00:00
|
|
|
if (was_idle)
|
|
|
|
task_ready(task);
|
1998-08-18 00:29:57 +00:00
|
|
|
}
|
1998-08-17 22:05:58 +00:00
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
void
|
2018-08-06 13:00:55 +02:00
|
|
|
isc_task_destroy(isc_task_t **taskp) {
|
1998-08-18 00:29:57 +00:00
|
|
|
|
1999-05-18 19:23:13 +00:00
|
|
|
/*
|
|
|
|
* Destroy '*taskp'.
|
|
|
|
*/
|
|
|
|
|
1998-08-18 00:29:57 +00:00
|
|
|
REQUIRE(taskp != NULL);
|
|
|
|
|
1998-10-21 02:26:57 +00:00
|
|
|
isc_task_shutdown(*taskp);
|
|
|
|
isc_task_detach(taskp);
|
1998-08-17 22:05:58 +00:00
|
|
|
}
|
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
void
|
2018-08-06 13:00:55 +02:00
|
|
|
isc_task_setname(isc_task_t *task0, const char *name, void *tag) {
|
2009-09-01 00:22:28 +00:00
|
|
|
isc__task_t *task = (isc__task_t *)task0;
|
2000-01-25 19:25:20 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Name 'task'.
|
|
|
|
*/
|
|
|
|
|
|
|
|
REQUIRE(VALID_TASK(task));
|
|
|
|
|
|
|
|
LOCK(&task->lock);
|
2017-09-14 18:11:56 +10:00
|
|
|
strlcpy(task->name, name, sizeof(task->name));
|
2000-01-25 19:25:20 +00:00
|
|
|
task->tag = tag;
|
|
|
|
UNLOCK(&task->lock);
|
|
|
|
}
|
1998-08-18 00:29:57 +00:00
|
|
|
|
2018-08-06 13:00:55 +02:00
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
const char *
|
2018-08-06 13:00:55 +02:00
|
|
|
isc_task_getname(isc_task_t *task0) {
|
2009-09-01 00:22:28 +00:00
|
|
|
isc__task_t *task = (isc__task_t *)task0;
|
|
|
|
|
|
|
|
REQUIRE(VALID_TASK(task));
|
|
|
|
|
2000-07-26 17:11:14 +00:00
|
|
|
return (task->name);
|
|
|
|
}
|
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
void *
|
2018-08-06 13:00:55 +02:00
|
|
|
isc_task_gettag(isc_task_t *task0) {
|
2009-09-01 00:22:28 +00:00
|
|
|
isc__task_t *task = (isc__task_t *)task0;
|
|
|
|
|
|
|
|
REQUIRE(VALID_TASK(task));
|
|
|
|
|
2000-07-26 17:11:14 +00:00
|
|
|
return (task->tag);
|
|
|
|
}
|
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
void
|
2018-08-06 13:00:55 +02:00
|
|
|
isc_task_getcurrenttime(isc_task_t *task0, isc_stdtime_t *t) {
|
2009-09-01 00:22:28 +00:00
|
|
|
isc__task_t *task = (isc__task_t *)task0;
|
|
|
|
|
2003-10-25 00:09:14 +00:00
|
|
|
REQUIRE(VALID_TASK(task));
|
|
|
|
REQUIRE(t != NULL);
|
|
|
|
|
|
|
|
LOCK(&task->lock);
|
|
|
|
*t = task->now;
|
|
|
|
UNLOCK(&task->lock);
|
|
|
|
}
|
2000-07-26 17:11:14 +00:00
|
|
|
|
2015-10-02 12:32:42 -07:00
|
|
|
void
|
2018-08-06 13:00:55 +02:00
|
|
|
isc_task_getcurrenttimex(isc_task_t *task0, isc_time_t *t) {
|
2015-10-02 12:32:42 -07:00
|
|
|
isc__task_t *task = (isc__task_t *)task0;
|
|
|
|
|
|
|
|
REQUIRE(VALID_TASK(task));
|
|
|
|
REQUIRE(t != NULL);
|
|
|
|
|
|
|
|
LOCK(&task->lock);
|
|
|
|
*t = task->tnow;
|
|
|
|
UNLOCK(&task->lock);
|
|
|
|
}
|
|
|
|
|
1998-08-17 22:05:58 +00:00
|
|
|
/***
|
|
|
|
*** Task Manager.
|
|
|
|
***/
|
2011-09-02 21:15:39 +00:00
|
|
|
|
|
|
|
/*
|
2018-04-17 08:29:14 -07:00
|
|
|
* Return true if the current ready list for the manager, which is
|
2011-09-02 21:15:39 +00:00
|
|
|
* either ready_tasks or the ready_priority_tasks, depending on whether
|
|
|
|
* the manager is currently in normal or privileged execution mode.
|
|
|
|
*
|
|
|
|
* Caller must hold the task manager lock.
|
|
|
|
*/
|
2018-04-17 08:29:14 -07:00
|
|
|
static inline bool
|
2018-10-11 13:39:04 +00:00
|
|
|
empty_readyq(isc__taskmgr_t *manager, int c) {
|
2011-09-02 21:15:39 +00:00
|
|
|
isc__tasklist_t queue;
|
|
|
|
|
2019-01-18 11:47:43 +01:00
|
|
|
if (atomic_load_relaxed(&manager->mode) == isc_taskmgrmode_normal) {
|
2018-10-22 09:37:17 +00:00
|
|
|
queue = manager->queues[c].ready_tasks;
|
2019-01-18 11:47:43 +01:00
|
|
|
} else {
|
2018-10-22 09:37:17 +00:00
|
|
|
queue = manager->queues[c].ready_priority_tasks;
|
2019-01-18 11:47:43 +01:00
|
|
|
}
|
2018-04-17 08:29:14 -07:00
|
|
|
return (EMPTY(queue));
|
2011-09-02 21:15:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Dequeue and return a pointer to the first task on the current ready
|
|
|
|
* list for the manager.
|
|
|
|
* If the task is privileged, dequeue it from the other ready list
|
|
|
|
* as well.
|
|
|
|
*
|
|
|
|
* Caller must hold the task manager lock.
|
|
|
|
*/
|
|
|
|
static inline isc__task_t *
|
2018-10-11 13:39:04 +00:00
|
|
|
pop_readyq(isc__taskmgr_t *manager, int c) {
|
2011-09-02 21:15:39 +00:00
|
|
|
isc__task_t *task;
|
|
|
|
|
2019-01-18 11:47:43 +01:00
|
|
|
if (atomic_load_relaxed(&manager->mode) == isc_taskmgrmode_normal) {
|
2018-10-22 09:37:17 +00:00
|
|
|
task = HEAD(manager->queues[c].ready_tasks);
|
2018-11-08 19:34:51 -08:00
|
|
|
} else {
|
2018-10-22 09:37:17 +00:00
|
|
|
task = HEAD(manager->queues[c].ready_priority_tasks);
|
2018-11-08 19:34:51 -08:00
|
|
|
}
|
2011-09-02 21:15:39 +00:00
|
|
|
|
|
|
|
if (task != NULL) {
|
2018-10-22 09:37:17 +00:00
|
|
|
DEQUEUE(manager->queues[c].ready_tasks, task, ready_link);
|
2018-11-08 19:34:51 -08:00
|
|
|
if (ISC_LINK_LINKED(task, ready_priority_link)) {
|
2018-10-22 09:37:17 +00:00
|
|
|
DEQUEUE(manager->queues[c].ready_priority_tasks, task,
|
2011-09-02 21:15:39 +00:00
|
|
|
ready_priority_link);
|
2018-11-08 19:34:51 -08:00
|
|
|
}
|
2011-09-02 21:15:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return (task);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Push 'task' onto the ready_tasks queue. If 'task' has the privilege
|
|
|
|
* flag set, then also push it onto the ready_priority_tasks queue.
|
|
|
|
*
|
|
|
|
* Caller must hold the task manager lock.
|
|
|
|
*/
|
|
|
|
static inline void
|
2018-10-11 13:39:04 +00:00
|
|
|
push_readyq(isc__taskmgr_t *manager, isc__task_t *task, int c) {
|
2018-10-22 09:37:17 +00:00
|
|
|
ENQUEUE(manager->queues[c].ready_tasks, task, ready_link);
|
2019-12-12 15:22:10 +01:00
|
|
|
if (TASK_PRIVILEGED(task)) {
|
2018-10-22 09:37:17 +00:00
|
|
|
ENQUEUE(manager->queues[c].ready_priority_tasks, task,
|
2011-09-02 21:15:39 +00:00
|
|
|
ready_priority_link);
|
2018-11-08 19:34:51 -08:00
|
|
|
}
|
2018-10-25 06:27:24 +00:00
|
|
|
atomic_fetch_add_explicit(&manager->tasks_ready, 1,
|
|
|
|
memory_order_acquire);
|
2011-09-02 21:15:39 +00:00
|
|
|
}
|
|
|
|
|
2000-08-29 22:30:14 +00:00
|
|
|
static void
|
2018-10-22 10:57:05 +00:00
|
|
|
dispatch(isc__taskmgr_t *manager, unsigned int threadid) {
|
2009-09-01 00:22:28 +00:00
|
|
|
isc__task_t *task;
|
1998-08-17 22:05:58 +00:00
|
|
|
|
|
|
|
REQUIRE(VALID_MANAGER(manager));
|
|
|
|
|
2018-10-11 13:39:04 +00:00
|
|
|
/* Wait for everything to initialize */
|
|
|
|
LOCK(&manager->lock);
|
|
|
|
UNLOCK(&manager->lock);
|
|
|
|
|
1998-08-17 22:05:58 +00:00
|
|
|
/*
|
|
|
|
* Again we're trying to hold the lock for as short a time as possible
|
|
|
|
* and to do as little locking and unlocking as possible.
|
2000-08-01 01:33:37 +00:00
|
|
|
*
|
1998-08-17 22:05:58 +00:00
|
|
|
* In both while loops, the appropriate lock must be held before the
|
|
|
|
* while body starts. Code which acquired the lock at the top of
|
|
|
|
* the loop would be more readable, but would result in a lot of
|
|
|
|
* extra locking. Compare:
|
|
|
|
*
|
|
|
|
* Straightforward:
|
|
|
|
*
|
|
|
|
* LOCK();
|
|
|
|
* ...
|
|
|
|
* UNLOCK();
|
|
|
|
* while (expression) {
|
|
|
|
* LOCK();
|
|
|
|
* ...
|
|
|
|
* UNLOCK();
|
|
|
|
*
|
|
|
|
* Unlocked part here...
|
|
|
|
*
|
|
|
|
* LOCK();
|
|
|
|
* ...
|
|
|
|
* UNLOCK();
|
|
|
|
* }
|
|
|
|
*
|
|
|
|
* Note how if the loop continues we unlock and then immediately lock.
|
|
|
|
* For N iterations of the loop, this code does 2N+1 locks and 2N+1
|
|
|
|
* unlocks. Also note that the lock is not held when the while
|
|
|
|
* condition is tested, which may or may not be important, depending
|
|
|
|
* on the expression.
|
2000-08-01 01:33:37 +00:00
|
|
|
*
|
1998-08-17 22:05:58 +00:00
|
|
|
* As written:
|
|
|
|
*
|
|
|
|
* LOCK();
|
|
|
|
* while (expression) {
|
|
|
|
* ...
|
|
|
|
* UNLOCK();
|
|
|
|
*
|
|
|
|
* Unlocked part here...
|
|
|
|
*
|
|
|
|
* LOCK();
|
|
|
|
* ...
|
|
|
|
* }
|
|
|
|
* UNLOCK();
|
|
|
|
*
|
|
|
|
* For N iterations of the loop, this code does N+1 locks and N+1
|
|
|
|
* unlocks. The while expression is always protected by the lock.
|
|
|
|
*/
|
2018-10-22 09:37:17 +00:00
|
|
|
LOCK(&manager->queues[threadid].lock);
|
2011-09-02 21:15:39 +00:00
|
|
|
|
1998-08-17 22:05:58 +00:00
|
|
|
while (!FINISHED(manager)) {
|
|
|
|
/*
|
|
|
|
* For reasons similar to those given in the comment in
|
1998-10-28 01:45:43 +00:00
|
|
|
* isc_task_send() above, it is safe for us to dequeue
|
1998-08-17 22:05:58 +00:00
|
|
|
* the task while only holding the manager lock, and then
|
|
|
|
* change the task to running state while only holding the
|
|
|
|
* task lock.
|
2011-09-02 21:15:39 +00:00
|
|
|
*
|
|
|
|
* If a pause has been requested, don't do any work
|
|
|
|
* until it's been released.
|
1998-08-17 22:05:58 +00:00
|
|
|
*/
|
2018-11-08 19:34:51 -08:00
|
|
|
while ((empty_readyq(manager, threadid) &&
|
2019-01-18 11:47:43 +01:00
|
|
|
!atomic_load_relaxed(&manager->pause_req) &&
|
|
|
|
!atomic_load_relaxed(&manager->exclusive_req)) &&
|
2018-11-08 19:34:51 -08:00
|
|
|
!FINISHED(manager))
|
2008-03-27 23:46:57 +00:00
|
|
|
{
|
2018-11-23 21:35:01 +01:00
|
|
|
XTHREADTRACE("wait");
|
2019-01-18 11:47:43 +01:00
|
|
|
XTHREADTRACE(atomic_load_relaxed(&manager->pause_req)
|
2018-11-23 21:35:01 +01:00
|
|
|
? "paused"
|
|
|
|
: "notpaused");
|
2019-01-18 11:47:43 +01:00
|
|
|
XTHREADTRACE(atomic_load_relaxed(&manager->exclusive_req)
|
2018-11-23 21:35:01 +01:00
|
|
|
? "excreq"
|
|
|
|
: "notexcreq");
|
2018-11-08 19:34:51 -08:00
|
|
|
WAIT(&manager->queues[threadid].work_available,
|
|
|
|
&manager->queues[threadid].lock);
|
2018-11-23 21:35:01 +01:00
|
|
|
XTHREADTRACE("awake");
|
1998-08-17 22:05:58 +00:00
|
|
|
}
|
2018-11-23 21:35:01 +01:00
|
|
|
XTHREADTRACE("working");
|
2000-08-01 01:33:37 +00:00
|
|
|
|
2019-01-18 11:47:43 +01:00
|
|
|
if (atomic_load_relaxed(&manager->pause_req) ||
|
|
|
|
atomic_load_relaxed(&manager->exclusive_req)) {
|
2018-10-22 09:37:17 +00:00
|
|
|
UNLOCK(&manager->queues[threadid].lock);
|
2018-11-23 21:35:01 +01:00
|
|
|
XTHREADTRACE("halting");
|
2018-10-22 09:37:17 +00:00
|
|
|
|
2018-10-23 09:39:56 +00:00
|
|
|
/*
|
2018-11-08 19:34:51 -08:00
|
|
|
* Switching to exclusive mode is done as a
|
|
|
|
* 2-phase-lock, checking if we have to switch is
|
2019-01-18 11:47:43 +01:00
|
|
|
* done without any locks on pause_req and
|
|
|
|
* exclusive_req to save time - the worst
|
2018-11-08 19:34:51 -08:00
|
|
|
* thing that can happen is that we'll launch one
|
|
|
|
* task more and exclusive task will be postponed a
|
|
|
|
* bit.
|
2018-10-23 09:39:56 +00:00
|
|
|
*
|
2018-11-08 19:34:51 -08:00
|
|
|
* Broadcasting on halt_cond seems suboptimal, but
|
|
|
|
* exclusive tasks are rare enought that we don't
|
|
|
|
* care.
|
2018-10-23 09:39:56 +00:00
|
|
|
*/
|
2018-10-23 08:47:44 +00:00
|
|
|
LOCK(&manager->halt_lock);
|
2018-10-22 09:37:17 +00:00
|
|
|
manager->halted++;
|
2018-10-23 08:47:44 +00:00
|
|
|
BROADCAST(&manager->halt_cond);
|
2019-01-18 11:47:43 +01:00
|
|
|
while (atomic_load_relaxed(&manager->pause_req) ||
|
|
|
|
atomic_load_relaxed(&manager->exclusive_req))
|
2018-11-08 19:34:51 -08:00
|
|
|
{
|
2018-10-23 08:47:44 +00:00
|
|
|
WAIT(&manager->halt_cond, &manager->halt_lock);
|
|
|
|
}
|
2018-10-22 09:37:17 +00:00
|
|
|
manager->halted--;
|
2018-10-23 08:47:44 +00:00
|
|
|
SIGNAL(&manager->halt_cond);
|
|
|
|
UNLOCK(&manager->halt_lock);
|
2018-10-22 09:37:17 +00:00
|
|
|
|
|
|
|
LOCK(&manager->queues[threadid].lock);
|
|
|
|
/* Restart the loop after */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
task = pop_readyq(manager, threadid);
|
1998-08-17 22:05:58 +00:00
|
|
|
if (task != NULL) {
|
1998-08-17 23:15:50 +00:00
|
|
|
unsigned int dispatch_count = 0;
|
2018-04-17 08:29:14 -07:00
|
|
|
bool done = false;
|
|
|
|
bool requeue = false;
|
|
|
|
bool finished = false;
|
1998-12-13 23:45:21 +00:00
|
|
|
isc_event_t *event;
|
1998-08-17 22:05:58 +00:00
|
|
|
|
|
|
|
INSIST(VALID_TASK(task));
|
|
|
|
|
|
|
|
/*
|
2018-10-25 06:27:24 +00:00
|
|
|
* Note we only unlock the queue lock if we actually
|
|
|
|
* have a task to do. We must reacquire the queue
|
1998-08-17 22:05:58 +00:00
|
|
|
* lock before exiting the 'if (task != NULL)' block.
|
|
|
|
*/
|
2018-10-22 09:37:17 +00:00
|
|
|
UNLOCK(&manager->queues[threadid].lock);
|
2018-11-08 19:34:51 -08:00
|
|
|
RUNTIME_CHECK(
|
|
|
|
atomic_fetch_sub_explicit(&manager->tasks_ready,
|
|
|
|
1, memory_order_release) > 0);
|
2018-10-25 06:27:24 +00:00
|
|
|
atomic_fetch_add_explicit(&manager->tasks_running, 1,
|
|
|
|
memory_order_acquire);
|
1998-08-17 22:05:58 +00:00
|
|
|
|
|
|
|
LOCK(&task->lock);
|
1998-10-13 20:22:22 +00:00
|
|
|
INSIST(task->state == task_state_ready);
|
1999-07-10 01:00:05 +00:00
|
|
|
task->state = task_state_running;
|
2018-11-23 21:35:01 +01:00
|
|
|
XTRACE("running");
|
2018-10-11 13:39:04 +00:00
|
|
|
XTRACE(task->name);
|
2015-10-02 12:32:42 -07:00
|
|
|
TIME_NOW(&task->tnow);
|
|
|
|
task->now = isc_time_seconds(&task->tnow);
|
1999-07-10 01:00:05 +00:00
|
|
|
do {
|
|
|
|
if (!EMPTY(task->events)) {
|
|
|
|
event = HEAD(task->events);
|
2000-04-17 19:22:44 +00:00
|
|
|
DEQUEUE(task->events, event, ev_link);
|
2012-05-14 10:06:05 -07:00
|
|
|
task->nevents--;
|
1998-08-17 22:05:58 +00:00
|
|
|
|
1999-07-10 01:00:05 +00:00
|
|
|
/*
|
|
|
|
* Execute the event action.
|
|
|
|
*/
|
2018-11-23 21:35:01 +01:00
|
|
|
XTRACE("execute action");
|
2018-10-11 13:39:04 +00:00
|
|
|
XTRACE(task->name);
|
2000-04-17 19:22:44 +00:00
|
|
|
if (event->ev_action != NULL) {
|
1999-07-10 01:00:05 +00:00
|
|
|
UNLOCK(&task->lock);
|
2009-09-01 00:22:28 +00:00
|
|
|
(event->ev_action)(
|
|
|
|
(isc_task_t *)task,
|
|
|
|
event);
|
1999-07-10 01:00:05 +00:00
|
|
|
LOCK(&task->lock);
|
|
|
|
}
|
2019-11-05 15:23:33 -08:00
|
|
|
XTRACE("execution complete");
|
1999-07-10 01:00:05 +00:00
|
|
|
dispatch_count++;
|
1999-04-01 01:09:28 +00:00
|
|
|
}
|
1999-07-10 01:00:05 +00:00
|
|
|
|
2019-11-05 15:23:33 -08:00
|
|
|
if (isc_refcount_current(
|
|
|
|
&task->references) == 0 &&
|
1999-09-23 21:30:26 +00:00
|
|
|
EMPTY(task->events) &&
|
2019-11-05 15:23:33 -08:00
|
|
|
!TASK_SHUTTINGDOWN(task))
|
|
|
|
{
|
2018-04-17 08:29:14 -07:00
|
|
|
bool was_idle;
|
2000-08-01 01:33:37 +00:00
|
|
|
|
1999-09-23 21:30:26 +00:00
|
|
|
/*
|
|
|
|
* There are no references and no
|
|
|
|
* pending events for this task,
|
|
|
|
* which means it will not become
|
|
|
|
* runnable again via an external
|
|
|
|
* action (such as sending an event
|
|
|
|
* or detaching).
|
|
|
|
*
|
|
|
|
* We initiate shutdown to prevent
|
|
|
|
* it from becoming a zombie.
|
|
|
|
*
|
|
|
|
* We do this here instead of in
|
|
|
|
* the "if EMPTY(task->events)" block
|
|
|
|
* below because:
|
|
|
|
*
|
|
|
|
* If we post no shutdown events,
|
|
|
|
* we want the task to finish.
|
|
|
|
*
|
|
|
|
* If we did post shutdown events,
|
|
|
|
* will still want the task's
|
|
|
|
* quantum to be applied.
|
|
|
|
*/
|
|
|
|
was_idle = task_shutdown(task);
|
|
|
|
INSIST(!was_idle);
|
1999-07-10 01:00:05 +00:00
|
|
|
}
|
|
|
|
|
1998-12-16 02:02:10 +00:00
|
|
|
if (EMPTY(task->events)) {
|
1998-08-17 23:15:50 +00:00
|
|
|
/*
|
1998-12-16 02:02:10 +00:00
|
|
|
* Nothing else to do for this task
|
1999-09-23 21:30:26 +00:00
|
|
|
* right now.
|
1998-08-17 23:15:50 +00:00
|
|
|
*/
|
2018-11-23 21:35:01 +01:00
|
|
|
XTRACE("empty");
|
2019-11-05 15:23:33 -08:00
|
|
|
if (isc_refcount_current(
|
|
|
|
&task->references) == 0 &&
|
|
|
|
TASK_SHUTTINGDOWN(task))
|
|
|
|
{
|
1999-09-23 21:30:26 +00:00
|
|
|
/*
|
|
|
|
* The task is done.
|
|
|
|
*/
|
2018-11-23 21:35:01 +01:00
|
|
|
XTRACE("done");
|
2018-04-17 08:29:14 -07:00
|
|
|
finished = true;
|
1999-05-10 23:00:30 +00:00
|
|
|
task->state = task_state_done;
|
2019-11-05 15:23:33 -08:00
|
|
|
} else {
|
2019-11-13 11:26:34 +01:00
|
|
|
if (task->state ==
|
|
|
|
task_state_running) {
|
|
|
|
task->state = task_state_idle;
|
Fix a race in taskmgr between worker and task pausing/unpausing.
To reproduce the race - create a task, send two events to it, first one
must take some time. Then, from the outside, pause(), unpause() and detach()
the task.
When the long-running event is processed by the task it is in
task_state_running state. When we called pause() the state changed to
task_state_paused, on unpause we checked that there are events in the task
queue, changed the state to task_state_ready and enqueued the task on the
workers readyq. We then detach the task.
The dispatch() is done with processing the event, it processes the second
event in the queue, and then shuts down the task and frees it (as it's not
referenced anymore). Dispatcher then takes the, already freed, task from
the queue where it was wrongly put, causing an use-after free and,
subsequently, either an assertion failure or a segmentation fault.
The probability of this happening is very slim, yet it might happen under a
very high load, more probably on a recursive resolver than on an
authoritative.
The fix introduces a new 'task_state_pausing' state - to which tasks
are moved if they're being paused while still running. They are moved
to task_state_paused state when dispatcher is done with them, and
if we unpause a task in paused state it's moved back to task_state_running
and not requeued.
2020-01-20 11:39:14 +01:00
|
|
|
} else if (task->state ==
|
|
|
|
task_state_pausing) {
|
|
|
|
task->state = task_state_paused;
|
2019-11-13 11:26:34 +01:00
|
|
|
}
|
2019-11-05 15:23:33 -08:00
|
|
|
}
|
2018-04-17 08:29:14 -07:00
|
|
|
done = true;
|
1998-08-17 22:05:58 +00:00
|
|
|
} else if (dispatch_count >= task->quantum) {
|
|
|
|
/*
|
|
|
|
* Our quantum has expired, but
|
|
|
|
* there is more work to be done.
|
|
|
|
* We'll requeue it to the ready
|
|
|
|
* queue later.
|
|
|
|
*
|
|
|
|
* We don't check quantum until
|
|
|
|
* dispatching at least one event,
|
|
|
|
* so the minimum quantum is one.
|
|
|
|
*/
|
2018-11-23 21:35:01 +01:00
|
|
|
XTRACE("quantum");
|
2019-11-13 11:26:34 +01:00
|
|
|
if (task->state == task_state_running) {
|
|
|
|
/*
|
|
|
|
* We requeue only if it's
|
|
|
|
* not paused.
|
|
|
|
*/
|
|
|
|
task->state = task_state_ready;
|
|
|
|
requeue = true;
|
Fix a race in taskmgr between worker and task pausing/unpausing.
To reproduce the race - create a task, send two events to it, first one
must take some time. Then, from the outside, pause(), unpause() and detach()
the task.
When the long-running event is processed by the task it is in
task_state_running state. When we called pause() the state changed to
task_state_paused, on unpause we checked that there are events in the task
queue, changed the state to task_state_ready and enqueued the task on the
workers readyq. We then detach the task.
The dispatch() is done with processing the event, it processes the second
event in the queue, and then shuts down the task and frees it (as it's not
referenced anymore). Dispatcher then takes the, already freed, task from
the queue where it was wrongly put, causing an use-after free and,
subsequently, either an assertion failure or a segmentation fault.
The probability of this happening is very slim, yet it might happen under a
very high load, more probably on a recursive resolver than on an
authoritative.
The fix introduces a new 'task_state_pausing' state - to which tasks
are moved if they're being paused while still running. They are moved
to task_state_paused state when dispatcher is done with them, and
if we unpause a task in paused state it's moved back to task_state_running
and not requeued.
2020-01-20 11:39:14 +01:00
|
|
|
} else if (task->state ==
|
|
|
|
task_state_pausing) {
|
|
|
|
task->state = task_state_paused;
|
2019-11-13 11:26:34 +01:00
|
|
|
}
|
2018-04-17 08:29:14 -07:00
|
|
|
done = true;
|
1998-08-17 22:05:58 +00:00
|
|
|
}
|
1999-07-10 01:00:05 +00:00
|
|
|
} while (!done);
|
1998-08-17 22:05:58 +00:00
|
|
|
UNLOCK(&task->lock);
|
|
|
|
|
1999-07-10 01:00:05 +00:00
|
|
|
if (finished)
|
|
|
|
task_finished(task);
|
1998-08-17 22:05:58 +00:00
|
|
|
|
2018-11-08 19:34:51 -08:00
|
|
|
RUNTIME_CHECK(
|
|
|
|
atomic_fetch_sub_explicit(&manager->tasks_running,
|
2018-10-25 06:27:24 +00:00
|
|
|
1, memory_order_release) > 0);
|
2018-10-22 09:37:17 +00:00
|
|
|
LOCK(&manager->queues[threadid].lock);
|
1998-08-17 22:05:58 +00:00
|
|
|
if (requeue) {
|
|
|
|
/*
|
|
|
|
* We know we're awake, so we don't have
|
|
|
|
* to wakeup any sleeping threads if the
|
|
|
|
* ready queue is empty before we requeue.
|
|
|
|
*
|
|
|
|
* A possible optimization if the queue is
|
|
|
|
* empty is to 'goto' the 'if (task != NULL)'
|
|
|
|
* block, avoiding the ENQUEUE of the task
|
|
|
|
* and the subsequent immediate DEQUEUE
|
|
|
|
* (since it is the only executable task).
|
|
|
|
* We don't do this because then we'd be
|
|
|
|
* skipping the exit_requested check. The
|
|
|
|
* cost of ENQUEUE is low anyway, especially
|
|
|
|
* when you consider that we'd have to do
|
|
|
|
* an extra EMPTY check to see if we could
|
|
|
|
* do the optimization. If the ready queue
|
|
|
|
* were usually nonempty, the 'optimization'
|
|
|
|
* might even hurt rather than help.
|
|
|
|
*/
|
2018-10-22 09:37:17 +00:00
|
|
|
push_readyq(manager, task, threadid);
|
1998-08-17 22:05:58 +00:00
|
|
|
}
|
|
|
|
}
|
2011-09-02 21:15:39 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we are in privileged execution mode and there are no
|
|
|
|
* tasks remaining on the current ready queue, then
|
|
|
|
* we're stuck. Automatically drop privileges at that
|
|
|
|
* point and continue with the regular ready queue.
|
|
|
|
*/
|
2019-07-08 17:30:06 +02:00
|
|
|
if (atomic_load_relaxed(&manager->mode) !=
|
|
|
|
isc_taskmgrmode_normal &&
|
2018-10-25 15:01:25 +00:00
|
|
|
atomic_load_explicit(&manager->tasks_running,
|
|
|
|
memory_order_acquire) == 0)
|
2018-10-25 06:27:24 +00:00
|
|
|
{
|
2018-10-25 12:41:59 +00:00
|
|
|
UNLOCK(&manager->queues[threadid].lock);
|
|
|
|
LOCK(&manager->lock);
|
|
|
|
/*
|
|
|
|
* Check once again, under lock. Mode can only
|
|
|
|
* change from privileged to normal anyway, and
|
|
|
|
* if we enter this loop twice at the same time
|
|
|
|
* we'll end up in a deadlock over queue locks.
|
|
|
|
*
|
|
|
|
*/
|
2019-07-08 17:30:06 +02:00
|
|
|
if (atomic_load(&manager->mode) !=
|
|
|
|
isc_taskmgrmode_normal &&
|
2018-10-25 15:01:25 +00:00
|
|
|
atomic_load_explicit(&manager->tasks_running,
|
|
|
|
memory_order_acquire) == 0)
|
2018-10-25 12:41:59 +00:00
|
|
|
{
|
|
|
|
bool empty = true;
|
2018-10-25 15:01:25 +00:00
|
|
|
unsigned int i;
|
2018-11-08 19:34:51 -08:00
|
|
|
for (i = 0; i < manager->workers && empty; i++)
|
2018-10-25 15:01:25 +00:00
|
|
|
{
|
2018-10-22 10:57:05 +00:00
|
|
|
LOCK(&manager->queues[i].lock);
|
2018-10-25 12:41:59 +00:00
|
|
|
empty &= empty_readyq(manager, i);
|
2018-10-22 10:57:05 +00:00
|
|
|
UNLOCK(&manager->queues[i].lock);
|
|
|
|
}
|
2018-10-25 12:41:59 +00:00
|
|
|
if (empty) {
|
2019-01-18 11:47:43 +01:00
|
|
|
atomic_store(&manager->mode,
|
|
|
|
isc_taskmgrmode_normal);
|
2018-10-25 15:01:25 +00:00
|
|
|
wake_all_queues(manager);
|
2018-10-22 10:57:05 +00:00
|
|
|
}
|
2018-10-11 13:39:04 +00:00
|
|
|
}
|
2018-10-25 12:41:59 +00:00
|
|
|
UNLOCK(&manager->lock);
|
|
|
|
LOCK(&manager->queues[threadid].lock);
|
2011-09-02 21:15:39 +00:00
|
|
|
}
|
1998-08-17 22:05:58 +00:00
|
|
|
}
|
2018-10-22 09:37:17 +00:00
|
|
|
UNLOCK(&manager->queues[threadid].lock);
|
2018-10-18 18:16:25 +00:00
|
|
|
/*
|
|
|
|
* There might be other dispatchers waiting on empty tasks,
|
|
|
|
* wake them up.
|
|
|
|
*/
|
2018-10-25 15:01:25 +00:00
|
|
|
wake_all_queues(manager);
|
2000-08-29 22:30:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static isc_threadresult_t
|
|
|
|
#ifdef _WIN32
|
|
|
|
WINAPI
|
|
|
|
#endif
|
2018-10-22 09:37:17 +00:00
|
|
|
run(void *queuep) {
|
|
|
|
isc__taskqueue_t *tq = queuep;
|
|
|
|
isc__taskmgr_t *manager = tq->manager;
|
|
|
|
int threadid = tq->threadid;
|
2018-10-11 13:39:04 +00:00
|
|
|
isc_thread_setaffinity(threadid);
|
2000-08-29 22:30:14 +00:00
|
|
|
|
2018-11-23 21:35:01 +01:00
|
|
|
XTHREADTRACE("starting");
|
2000-08-29 22:30:14 +00:00
|
|
|
|
2018-10-11 13:39:04 +00:00
|
|
|
dispatch(manager, threadid);
|
1998-08-17 22:05:58 +00:00
|
|
|
|
2018-11-23 21:35:01 +01:00
|
|
|
XTHREADTRACE("exiting");
|
1998-08-17 22:05:58 +00:00
|
|
|
|
2009-10-05 17:30:49 +00:00
|
|
|
#ifdef OPENSSL_LEAKS
|
|
|
|
ERR_remove_state(0);
|
|
|
|
#endif
|
|
|
|
|
1998-10-23 05:45:26 +00:00
|
|
|
return ((isc_threadresult_t)0);
|
1998-08-17 22:05:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2009-09-01 00:22:28 +00:00
|
|
|
manager_free(isc__taskmgr_t *manager) {
|
2018-11-08 19:34:51 -08:00
|
|
|
for (unsigned int i = 0; i < manager->workers; i++) {
|
2018-11-19 10:31:09 +00:00
|
|
|
isc_mutex_destroy(&manager->queues[i].lock);
|
2018-10-23 08:20:17 +00:00
|
|
|
}
|
2018-11-19 10:31:09 +00:00
|
|
|
isc_mutex_destroy(&manager->lock);
|
|
|
|
isc_mutex_destroy(&manager->halt_lock);
|
2018-10-23 08:20:17 +00:00
|
|
|
isc_mem_put(manager->mctx, manager->queues,
|
|
|
|
manager->workers * sizeof(isc__taskqueue_t));
|
2009-09-01 00:22:28 +00:00
|
|
|
manager->common.impmagic = 0;
|
|
|
|
manager->common.magic = 0;
|
2018-10-22 09:37:17 +00:00
|
|
|
isc_mem_putanddetach(&manager->mctx, manager, sizeof(*manager));
|
1998-08-17 22:05:58 +00:00
|
|
|
}
|
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
isc_result_t
|
2018-08-06 13:00:55 +02:00
|
|
|
isc_taskmgr_create(isc_mem_t *mctx, unsigned int workers,
|
2019-11-05 15:23:33 -08:00
|
|
|
unsigned int default_quantum,
|
|
|
|
isc_nm_t *nm, isc_taskmgr_t **managerp)
|
1998-08-17 23:15:50 +00:00
|
|
|
{
|
2018-10-22 09:37:17 +00:00
|
|
|
unsigned int i;
|
2009-09-01 00:22:28 +00:00
|
|
|
isc__taskmgr_t *manager;
|
1998-10-22 01:33:20 +00:00
|
|
|
|
1999-05-18 19:23:13 +00:00
|
|
|
/*
|
|
|
|
* Create a new task manager.
|
|
|
|
*/
|
|
|
|
|
1998-10-22 01:33:20 +00:00
|
|
|
REQUIRE(workers > 0);
|
1999-03-07 19:21:49 +00:00
|
|
|
REQUIRE(managerp != NULL && *managerp == NULL);
|
1998-08-17 22:05:58 +00:00
|
|
|
|
2001-11-27 01:56:32 +00:00
|
|
|
manager = isc_mem_get(mctx, sizeof(*manager));
|
2019-11-05 13:55:54 -08:00
|
|
|
*manager = (isc__taskmgr_t) {
|
|
|
|
.common.impmagic = TASK_MANAGER_MAGIC,
|
|
|
|
.common.magic = ISCAPI_TASKMGR_MAGIC
|
|
|
|
};
|
|
|
|
|
2019-05-20 17:00:22 +02:00
|
|
|
atomic_store(&manager->mode, isc_taskmgrmode_normal);
|
2018-11-16 15:33:22 +01:00
|
|
|
isc_mutex_init(&manager->lock);
|
|
|
|
isc_mutex_init(&manager->excl_lock);
|
2018-10-22 09:37:17 +00:00
|
|
|
|
2018-11-16 15:33:22 +01:00
|
|
|
isc_mutex_init(&manager->halt_lock);
|
2018-11-15 17:20:36 +01:00
|
|
|
isc_condition_init(&manager->halt_cond);
|
2018-10-22 09:37:17 +00:00
|
|
|
|
|
|
|
manager->workers = workers;
|
|
|
|
|
2018-11-08 19:34:51 -08:00
|
|
|
if (default_quantum == 0) {
|
2000-08-29 22:30:14 +00:00
|
|
|
default_quantum = DEFAULT_DEFAULT_QUANTUM;
|
2018-11-08 19:34:51 -08:00
|
|
|
}
|
2000-08-29 22:30:14 +00:00
|
|
|
manager->default_quantum = default_quantum;
|
2019-11-05 15:23:33 -08:00
|
|
|
|
|
|
|
if (nm != NULL) {
|
|
|
|
isc_nm_attach(nm, &manager->nm);
|
|
|
|
}
|
|
|
|
|
2000-08-29 22:30:14 +00:00
|
|
|
INIT_LIST(manager->tasks);
|
2019-05-20 17:00:22 +02:00
|
|
|
atomic_store(&manager->tasks_count, 0);
|
2019-07-16 15:52:14 +02:00
|
|
|
manager->queues = isc_mem_get(mctx,
|
|
|
|
workers * sizeof(isc__taskqueue_t));
|
2018-10-23 08:20:17 +00:00
|
|
|
RUNTIME_CHECK(manager->queues != NULL);
|
|
|
|
|
2019-07-08 17:30:06 +02:00
|
|
|
atomic_init(&manager->tasks_running, 0);
|
|
|
|
atomic_init(&manager->tasks_ready, 0);
|
|
|
|
atomic_init(&manager->curq, 0);
|
|
|
|
atomic_init(&manager->exiting, false);
|
2019-05-20 17:00:22 +02:00
|
|
|
atomic_store_relaxed(&manager->exclusive_req, false);
|
|
|
|
atomic_store_relaxed(&manager->pause_req, false);
|
1998-08-17 22:05:58 +00:00
|
|
|
|
2000-04-12 01:34:16 +00:00
|
|
|
isc_mem_attach(mctx, &manager->mctx);
|
|
|
|
|
1998-08-17 22:05:58 +00:00
|
|
|
LOCK(&manager->lock);
|
|
|
|
/*
|
|
|
|
* Start workers.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < workers; i++) {
|
2018-10-22 09:37:17 +00:00
|
|
|
INIT_LIST(manager->queues[i].ready_tasks);
|
|
|
|
INIT_LIST(manager->queues[i].ready_priority_tasks);
|
2018-11-16 15:33:22 +01:00
|
|
|
isc_mutex_init(&manager->queues[i].lock);
|
2018-11-15 17:20:36 +01:00
|
|
|
isc_condition_init(&manager->queues[i].work_available);
|
|
|
|
|
2018-10-22 09:37:17 +00:00
|
|
|
manager->queues[i].manager = manager;
|
|
|
|
manager->queues[i].threadid = i;
|
2019-07-18 17:24:05 +02:00
|
|
|
isc_thread_create(run, &manager->queues[i],
|
|
|
|
&manager->queues[i].thread);
|
Address GCC 9.1 -O3 compilation warnings
Compiling with -O3 triggers the following warnings with GCC 9.1:
task.c: In function ‘isc_taskmgr_create’:
task.c:1384:43: warning: ‘%04u’ directive output may be truncated writing between 4 and 10 bytes into a region of size 6 [-Wformat-truncation=]
1384 | snprintf(name, sizeof(name), "isc-worker%04u", i);
| ^~~~
task.c:1384:32: note: directive argument in the range [0, 4294967294]
1384 | snprintf(name, sizeof(name), "isc-worker%04u", i);
| ^~~~~~~~~~~~~~~~
task.c:1384:3: note: ‘snprintf’ output between 15 and 21 bytes into a destination of size 16
1384 | snprintf(name, sizeof(name), "isc-worker%04u", i);
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
private_test.c: In function ‘private_nsec3_totext_test’:
private_test.c:110:9: warning: array subscript 4 is outside array bounds of ‘uint32_t[1]’ {aka ‘unsigned int[1]’} [-Warray-bounds]
110 | while (*sp == '\0' && slen > 0) {
| ^~~
private_test.c:103:11: note: while referencing ‘salt’
103 | uint32_t salt;
| ^~~~
Prevent these warnings from being triggered by increasing the size of
the relevant array (task.c) and reordering conditions (private_test.c).
2019-05-31 14:34:34 +02:00
|
|
|
char name[21];
|
2018-10-11 13:39:04 +00:00
|
|
|
snprintf(name, sizeof(name), "isc-worker%04u", i);
|
2018-10-22 09:37:17 +00:00
|
|
|
isc_thread_setname(manager->queues[i].thread, name);
|
1998-08-17 22:05:58 +00:00
|
|
|
}
|
|
|
|
UNLOCK(&manager->lock);
|
|
|
|
|
2001-02-07 21:16:12 +00:00
|
|
|
isc_thread_setconcurrency(workers);
|
1998-08-17 22:05:58 +00:00
|
|
|
|
2009-09-01 00:22:28 +00:00
|
|
|
*managerp = (isc_taskmgr_t *)manager;
|
1998-08-17 22:05:58 +00:00
|
|
|
|
1998-10-22 01:33:20 +00:00
|
|
|
return (ISC_R_SUCCESS);
|
1998-08-17 22:05:58 +00:00
|
|
|
}
|
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
void
|
2018-08-06 13:00:55 +02:00
|
|
|
isc_taskmgr_destroy(isc_taskmgr_t **managerp) {
|
2009-09-01 00:22:28 +00:00
|
|
|
isc__taskmgr_t *manager;
|
|
|
|
isc__task_t *task;
|
1998-10-23 23:50:15 +00:00
|
|
|
unsigned int i;
|
2019-01-29 16:19:50 +01:00
|
|
|
bool exiting;
|
1998-08-17 22:05:58 +00:00
|
|
|
|
1999-05-18 19:23:13 +00:00
|
|
|
/*
|
|
|
|
* Destroy '*managerp'.
|
|
|
|
*/
|
|
|
|
|
1998-08-17 22:05:58 +00:00
|
|
|
REQUIRE(managerp != NULL);
|
2009-09-01 00:22:28 +00:00
|
|
|
manager = (isc__taskmgr_t *)*managerp;
|
1998-08-17 22:05:58 +00:00
|
|
|
REQUIRE(VALID_MANAGER(manager));
|
|
|
|
|
1999-05-10 23:00:30 +00:00
|
|
|
XTHREADTRACE("isc_taskmgr_destroy");
|
1998-08-17 22:05:58 +00:00
|
|
|
/*
|
|
|
|
* Only one non-worker thread may ever call this routine.
|
|
|
|
* If a worker thread wants to initiate shutdown of the
|
|
|
|
* task manager, it should ask some non-worker thread to call
|
1998-10-21 02:26:57 +00:00
|
|
|
* isc_taskmgr_destroy(), e.g. by signalling a condition variable
|
1998-08-17 22:05:58 +00:00
|
|
|
* that the startup thread is sleeping on.
|
|
|
|
*/
|
|
|
|
|
2012-07-19 23:00:21 +10:00
|
|
|
/*
|
|
|
|
* Detach the exclusive task before acquiring the manager lock
|
|
|
|
*/
|
2015-02-27 12:34:43 +11:00
|
|
|
LOCK(&manager->excl_lock);
|
2012-07-19 23:00:21 +10:00
|
|
|
if (manager->excl != NULL)
|
2018-08-06 13:00:55 +02:00
|
|
|
isc_task_detach((isc_task_t **) &manager->excl);
|
2015-02-27 12:34:43 +11:00
|
|
|
UNLOCK(&manager->excl_lock);
|
2012-07-19 23:00:21 +10:00
|
|
|
|
1998-08-17 22:05:58 +00:00
|
|
|
/*
|
|
|
|
* Unlike elsewhere, we're going to hold this lock a long time.
|
|
|
|
* We need to do so, because otherwise the list of tasks could
|
|
|
|
* change while we were traversing it.
|
|
|
|
*
|
|
|
|
* This is also the only function where we will hold both the
|
|
|
|
* task manager lock and a task lock at the same time.
|
|
|
|
*/
|
|
|
|
|
|
|
|
LOCK(&manager->lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure we only get called once.
|
|
|
|
*/
|
2019-01-29 16:19:50 +01:00
|
|
|
exiting = false;
|
|
|
|
|
|
|
|
INSIST(!!atomic_compare_exchange_strong(&manager->exiting,
|
|
|
|
&exiting, true));
|
1998-08-17 22:05:58 +00:00
|
|
|
|
2011-09-02 21:15:39 +00:00
|
|
|
/*
|
|
|
|
* If privileged mode was on, turn it off.
|
|
|
|
*/
|
2019-01-18 11:47:43 +01:00
|
|
|
atomic_store(&manager->mode, isc_taskmgrmode_normal);
|
2011-09-02 21:15:39 +00:00
|
|
|
|
1998-08-17 22:05:58 +00:00
|
|
|
/*
|
1999-05-10 23:00:30 +00:00
|
|
|
* Post shutdown event(s) to every task (if they haven't already been
|
2018-10-22 12:26:27 +00:00
|
|
|
* posted). To make things easier post idle tasks to worker 0.
|
1998-08-17 22:05:58 +00:00
|
|
|
*/
|
2018-10-22 12:26:27 +00:00
|
|
|
LOCK(&manager->queues[0].lock);
|
1998-08-17 22:05:58 +00:00
|
|
|
for (task = HEAD(manager->tasks);
|
|
|
|
task != NULL;
|
|
|
|
task = NEXT(task, link)) {
|
|
|
|
LOCK(&task->lock);
|
2018-10-18 18:16:25 +00:00
|
|
|
if (task_shutdown(task)) {
|
2018-10-22 12:26:27 +00:00
|
|
|
task->threadid = 0;
|
|
|
|
push_readyq(manager, task, 0);
|
2018-10-18 18:16:25 +00:00
|
|
|
}
|
1998-08-17 22:05:58 +00:00
|
|
|
UNLOCK(&task->lock);
|
|
|
|
}
|
2018-10-22 12:26:27 +00:00
|
|
|
UNLOCK(&manager->queues[0].lock);
|
|
|
|
|
1998-08-17 22:05:58 +00:00
|
|
|
/*
|
|
|
|
* Wake up any sleeping workers. This ensures we get work done if
|
|
|
|
* there's work left to do, and if there are already no tasks left
|
|
|
|
* it will cause the workers to see manager->exiting.
|
|
|
|
*/
|
2018-10-25 15:01:25 +00:00
|
|
|
wake_all_queues(manager);
|
1998-10-23 23:50:15 +00:00
|
|
|
UNLOCK(&manager->lock);
|
1998-08-17 22:05:58 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Wait for all the worker threads to exit.
|
|
|
|
*/
|
2019-07-18 17:47:40 +02:00
|
|
|
for (i = 0; i < manager->workers; i++) {
|
|
|
|
isc_thread_join(manager->queues[i].thread, NULL);
|
|
|
|
}
|
1998-08-17 22:05:58 +00:00
|
|
|
|
2019-11-05 15:23:33 -08:00
|
|
|
/*
|
|
|
|
* Detach from the network manager if it was set.
|
|
|
|
*/
|
|
|
|
if (manager->nm != NULL) {
|
|
|
|
isc_nm_detach(&manager->nm);
|
|
|
|
}
|
|
|
|
|
1998-08-17 22:05:58 +00:00
|
|
|
manager_free(manager);
|
|
|
|
|
|
|
|
*managerp = NULL;
|
|
|
|
}
|
2000-08-29 22:30:14 +00:00
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
void
|
2018-10-22 11:18:45 +00:00
|
|
|
isc_taskmgr_setprivilegedmode(isc_taskmgr_t *manager0) {
|
2011-09-02 21:15:39 +00:00
|
|
|
isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
|
|
|
|
|
2019-01-18 11:47:43 +01:00
|
|
|
atomic_store(&manager->mode, isc_taskmgrmode_privileged);
|
2011-09-02 21:15:39 +00:00
|
|
|
}
|
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
isc_taskmgrmode_t
|
2018-08-06 13:00:55 +02:00
|
|
|
isc_taskmgr_mode(isc_taskmgr_t *manager0) {
|
2011-09-02 21:15:39 +00:00
|
|
|
isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
|
2019-01-18 11:47:43 +01:00
|
|
|
return (atomic_load(&manager->mode));
|
2011-09-02 21:15:39 +00:00
|
|
|
}
|
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
void
|
2011-09-02 21:15:39 +00:00
|
|
|
isc__taskmgr_pause(isc_taskmgr_t *manager0) {
|
|
|
|
isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
|
2018-10-22 09:37:17 +00:00
|
|
|
|
2018-10-23 08:47:44 +00:00
|
|
|
LOCK(&manager->halt_lock);
|
2019-01-18 11:47:43 +01:00
|
|
|
while (atomic_load_relaxed(&manager->exclusive_req) ||
|
|
|
|
atomic_load_relaxed(&manager->pause_req)) {
|
2018-10-23 08:47:44 +00:00
|
|
|
UNLOCK(&manager->halt_lock);
|
2018-10-22 09:37:17 +00:00
|
|
|
/* This is ugly but pause is used EXCLUSIVELY in tests */
|
|
|
|
isc_thread_yield();
|
2018-10-23 08:47:44 +00:00
|
|
|
LOCK(&manager->halt_lock);
|
2018-10-22 09:37:17 +00:00
|
|
|
}
|
2018-10-23 08:47:44 +00:00
|
|
|
|
2019-01-18 11:47:43 +01:00
|
|
|
atomic_store_relaxed(&manager->pause_req, true);
|
2018-10-22 09:37:17 +00:00
|
|
|
while (manager->halted < manager->workers) {
|
2018-10-25 15:01:25 +00:00
|
|
|
wake_all_queues(manager);
|
2018-10-23 08:47:44 +00:00
|
|
|
WAIT(&manager->halt_cond, &manager->halt_lock);
|
2011-09-02 21:15:39 +00:00
|
|
|
}
|
2018-10-23 08:47:44 +00:00
|
|
|
UNLOCK(&manager->halt_lock);
|
2011-09-02 21:15:39 +00:00
|
|
|
}
|
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
void
|
2011-09-02 21:15:39 +00:00
|
|
|
isc__taskmgr_resume(isc_taskmgr_t *manager0) {
|
|
|
|
isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
|
2018-10-23 08:47:44 +00:00
|
|
|
LOCK(&manager->halt_lock);
|
2019-07-08 17:30:06 +02:00
|
|
|
if (atomic_load(&manager->pause_req)) {
|
|
|
|
atomic_store(&manager->pause_req, false);
|
2018-10-23 08:47:44 +00:00
|
|
|
while (manager->halted > 0) {
|
|
|
|
BROADCAST(&manager->halt_cond);
|
|
|
|
WAIT(&manager->halt_cond, &manager->halt_lock);
|
|
|
|
}
|
2011-09-02 21:15:39 +00:00
|
|
|
}
|
2018-10-23 08:47:44 +00:00
|
|
|
UNLOCK(&manager->halt_lock);
|
2011-09-02 21:15:39 +00:00
|
|
|
}
|
2001-02-13 04:11:44 +00:00
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
void
|
|
|
|
isc_taskmgr_setexcltask(isc_taskmgr_t *mgr0, isc_task_t *task0) {
|
2012-07-19 23:00:21 +10:00
|
|
|
isc__taskmgr_t *mgr = (isc__taskmgr_t *) mgr0;
|
|
|
|
isc__task_t *task = (isc__task_t *) task0;
|
|
|
|
|
|
|
|
REQUIRE(VALID_MANAGER(mgr));
|
|
|
|
REQUIRE(VALID_TASK(task));
|
2015-02-27 12:34:43 +11:00
|
|
|
LOCK(&mgr->excl_lock);
|
2012-07-19 23:00:21 +10:00
|
|
|
if (mgr->excl != NULL)
|
2018-08-06 13:00:55 +02:00
|
|
|
isc_task_detach((isc_task_t **) &mgr->excl);
|
|
|
|
isc_task_attach(task0, (isc_task_t **) &mgr->excl);
|
2015-02-27 12:34:43 +11:00
|
|
|
UNLOCK(&mgr->excl_lock);
|
2012-07-19 23:00:21 +10:00
|
|
|
}
|
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
isc_result_t
|
|
|
|
isc_taskmgr_excltask(isc_taskmgr_t *mgr0, isc_task_t **taskp) {
|
2012-07-19 23:00:21 +10:00
|
|
|
isc__taskmgr_t *mgr = (isc__taskmgr_t *) mgr0;
|
2015-02-27 12:34:43 +11:00
|
|
|
isc_result_t result = ISC_R_SUCCESS;
|
2012-07-19 23:00:21 +10:00
|
|
|
|
|
|
|
REQUIRE(VALID_MANAGER(mgr));
|
|
|
|
REQUIRE(taskp != NULL && *taskp == NULL);
|
|
|
|
|
2015-02-27 12:34:43 +11:00
|
|
|
LOCK(&mgr->excl_lock);
|
|
|
|
if (mgr->excl != NULL)
|
2018-08-06 13:00:55 +02:00
|
|
|
isc_task_attach((isc_task_t *) mgr->excl, taskp);
|
2015-02-27 12:34:43 +11:00
|
|
|
else
|
|
|
|
result = ISC_R_NOTFOUND;
|
|
|
|
UNLOCK(&mgr->excl_lock);
|
2012-07-19 23:00:21 +10:00
|
|
|
|
2015-02-27 12:34:43 +11:00
|
|
|
return (result);
|
2012-07-19 23:00:21 +10:00
|
|
|
}
|
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
isc_result_t
|
2018-08-06 13:00:55 +02:00
|
|
|
isc_task_beginexclusive(isc_task_t *task0) {
|
2009-09-01 00:22:28 +00:00
|
|
|
isc__task_t *task = (isc__task_t *)task0;
|
2019-09-27 12:02:41 +02:00
|
|
|
isc__taskmgr_t *manager;
|
2018-10-22 09:37:17 +00:00
|
|
|
|
2018-08-06 13:00:55 +02:00
|
|
|
REQUIRE(VALID_TASK(task));
|
2012-07-19 23:00:21 +10:00
|
|
|
|
2019-09-27 12:02:41 +02:00
|
|
|
manager = task->manager;
|
|
|
|
|
2001-02-13 04:11:44 +00:00
|
|
|
REQUIRE(task->state == task_state_running);
|
2018-10-24 13:12:55 -07:00
|
|
|
|
|
|
|
LOCK(&manager->excl_lock);
|
2018-10-23 09:39:56 +00:00
|
|
|
REQUIRE(task == task->manager->excl ||
|
2019-01-18 11:47:43 +01:00
|
|
|
(atomic_load_relaxed(&task->manager->exiting) &&
|
|
|
|
task->manager->excl == NULL));
|
2018-10-24 13:12:55 -07:00
|
|
|
UNLOCK(&manager->excl_lock);
|
2012-07-19 23:00:21 +10:00
|
|
|
|
2019-01-18 11:47:43 +01:00
|
|
|
if (atomic_load_relaxed(&manager->exclusive_req) ||
|
|
|
|
atomic_load_relaxed(&manager->pause_req)) {
|
2001-02-13 04:11:44 +00:00
|
|
|
return (ISC_R_LOCKBUSY);
|
|
|
|
}
|
2018-10-22 09:37:17 +00:00
|
|
|
|
2018-10-23 08:47:44 +00:00
|
|
|
LOCK(&manager->halt_lock);
|
2019-01-18 11:47:43 +01:00
|
|
|
INSIST(!atomic_load_relaxed(&manager->exclusive_req) &&
|
|
|
|
!atomic_load_relaxed(&manager->pause_req));
|
|
|
|
atomic_store_relaxed(&manager->exclusive_req, true);
|
2018-10-22 09:37:17 +00:00
|
|
|
while (manager->halted + 1 < manager->workers) {
|
2018-10-25 15:01:25 +00:00
|
|
|
wake_all_queues(manager);
|
2018-10-23 08:47:44 +00:00
|
|
|
WAIT(&manager->halt_cond, &manager->halt_lock);
|
2001-02-13 04:11:44 +00:00
|
|
|
}
|
2018-10-23 08:47:44 +00:00
|
|
|
UNLOCK(&manager->halt_lock);
|
2019-11-05 15:23:33 -08:00
|
|
|
if (manager->nm != NULL) {
|
|
|
|
isc_nm_pause(manager->nm);
|
|
|
|
}
|
2001-02-13 04:11:44 +00:00
|
|
|
return (ISC_R_SUCCESS);
|
|
|
|
}
|
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
void
|
2018-08-06 13:00:55 +02:00
|
|
|
isc_task_endexclusive(isc_task_t *task0) {
|
2009-09-01 00:22:28 +00:00
|
|
|
isc__task_t *task = (isc__task_t *)task0;
|
2019-09-27 12:02:41 +02:00
|
|
|
isc__taskmgr_t *manager;
|
2009-09-01 00:22:28 +00:00
|
|
|
|
2018-08-06 13:00:55 +02:00
|
|
|
REQUIRE(VALID_TASK(task));
|
2001-02-13 04:11:44 +00:00
|
|
|
REQUIRE(task->state == task_state_running);
|
2019-09-27 12:02:41 +02:00
|
|
|
manager = task->manager;
|
|
|
|
|
2019-11-05 15:23:33 -08:00
|
|
|
if (manager->nm != NULL) {
|
|
|
|
isc_nm_resume(manager->nm);
|
|
|
|
}
|
2018-10-23 08:47:44 +00:00
|
|
|
LOCK(&manager->halt_lock);
|
2019-01-18 11:47:43 +01:00
|
|
|
REQUIRE(atomic_load_relaxed(&manager->exclusive_req) == true);
|
|
|
|
atomic_store_relaxed(&manager->exclusive_req, false);
|
2018-10-23 08:47:44 +00:00
|
|
|
while (manager->halted > 0) {
|
|
|
|
BROADCAST(&manager->halt_cond);
|
|
|
|
WAIT(&manager->halt_cond, &manager->halt_lock);
|
|
|
|
}
|
|
|
|
UNLOCK(&manager->halt_lock);
|
2001-02-13 04:11:44 +00:00
|
|
|
}
|
2007-02-13 02:49:08 +00:00
|
|
|
|
2019-11-05 15:23:33 -08:00
|
|
|
void
|
|
|
|
isc_task_pause(isc_task_t *task0) {
|
|
|
|
REQUIRE(ISCAPI_TASK_VALID(task0));
|
|
|
|
isc__task_t *task = (isc__task_t *)task0;
|
|
|
|
isc__taskmgr_t *manager = task->manager;
|
|
|
|
bool running = false;
|
|
|
|
|
|
|
|
LOCK(&task->lock);
|
|
|
|
INSIST(task->state == task_state_idle ||
|
|
|
|
task->state == task_state_ready ||
|
|
|
|
task->state == task_state_running);
|
Fix a race in taskmgr between worker and task pausing/unpausing.
To reproduce the race - create a task, send two events to it, first one
must take some time. Then, from the outside, pause(), unpause() and detach()
the task.
When the long-running event is processed by the task it is in
task_state_running state. When we called pause() the state changed to
task_state_paused, on unpause we checked that there are events in the task
queue, changed the state to task_state_ready and enqueued the task on the
workers readyq. We then detach the task.
The dispatch() is done with processing the event, it processes the second
event in the queue, and then shuts down the task and frees it (as it's not
referenced anymore). Dispatcher then takes the, already freed, task from
the queue where it was wrongly put, causing an use-after free and,
subsequently, either an assertion failure or a segmentation fault.
The probability of this happening is very slim, yet it might happen under a
very high load, more probably on a recursive resolver than on an
authoritative.
The fix introduces a new 'task_state_pausing' state - to which tasks
are moved if they're being paused while still running. They are moved
to task_state_paused state when dispatcher is done with them, and
if we unpause a task in paused state it's moved back to task_state_running
and not requeued.
2020-01-20 11:39:14 +01:00
|
|
|
if (task->state == task_state_running) {
|
|
|
|
running = true;
|
|
|
|
task->state = task_state_pausing;
|
|
|
|
} else {
|
|
|
|
task->state = task_state_paused;
|
|
|
|
}
|
2019-11-05 15:23:33 -08:00
|
|
|
UNLOCK(&task->lock);
|
|
|
|
|
|
|
|
if (running) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
LOCK(&manager->queues[task->threadid].lock);
|
|
|
|
if (ISC_LINK_LINKED(task, ready_link)) {
|
|
|
|
DEQUEUE(manager->queues[task->threadid].ready_tasks,
|
|
|
|
task, ready_link);
|
|
|
|
}
|
|
|
|
UNLOCK(&manager->queues[task->threadid].lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc_task_unpause(isc_task_t *task0) {
|
|
|
|
isc__task_t *task = (isc__task_t *)task0;
|
|
|
|
bool was_idle = false;
|
|
|
|
|
|
|
|
REQUIRE(ISCAPI_TASK_VALID(task0));
|
|
|
|
|
|
|
|
LOCK(&task->lock);
|
Fix a race in taskmgr between worker and task pausing/unpausing.
To reproduce the race - create a task, send two events to it, first one
must take some time. Then, from the outside, pause(), unpause() and detach()
the task.
When the long-running event is processed by the task it is in
task_state_running state. When we called pause() the state changed to
task_state_paused, on unpause we checked that there are events in the task
queue, changed the state to task_state_ready and enqueued the task on the
workers readyq. We then detach the task.
The dispatch() is done with processing the event, it processes the second
event in the queue, and then shuts down the task and frees it (as it's not
referenced anymore). Dispatcher then takes the, already freed, task from
the queue where it was wrongly put, causing an use-after free and,
subsequently, either an assertion failure or a segmentation fault.
The probability of this happening is very slim, yet it might happen under a
very high load, more probably on a recursive resolver than on an
authoritative.
The fix introduces a new 'task_state_pausing' state - to which tasks
are moved if they're being paused while still running. They are moved
to task_state_paused state when dispatcher is done with them, and
if we unpause a task in paused state it's moved back to task_state_running
and not requeued.
2020-01-20 11:39:14 +01:00
|
|
|
INSIST(task->state == task_state_paused ||
|
|
|
|
task->state == task_state_pausing);
|
|
|
|
/* If the task was pausing we can't reschedule it */
|
|
|
|
if (task->state == task_state_pausing) {
|
|
|
|
task->state = task_state_running;
|
2019-11-05 15:23:33 -08:00
|
|
|
} else {
|
|
|
|
task->state = task_state_idle;
|
|
|
|
}
|
Fix a race in taskmgr between worker and task pausing/unpausing.
To reproduce the race - create a task, send two events to it, first one
must take some time. Then, from the outside, pause(), unpause() and detach()
the task.
When the long-running event is processed by the task it is in
task_state_running state. When we called pause() the state changed to
task_state_paused, on unpause we checked that there are events in the task
queue, changed the state to task_state_ready and enqueued the task on the
workers readyq. We then detach the task.
The dispatch() is done with processing the event, it processes the second
event in the queue, and then shuts down the task and frees it (as it's not
referenced anymore). Dispatcher then takes the, already freed, task from
the queue where it was wrongly put, causing an use-after free and,
subsequently, either an assertion failure or a segmentation fault.
The probability of this happening is very slim, yet it might happen under a
very high load, more probably on a recursive resolver than on an
authoritative.
The fix introduces a new 'task_state_pausing' state - to which tasks
are moved if they're being paused while still running. They are moved
to task_state_paused state when dispatcher is done with them, and
if we unpause a task in paused state it's moved back to task_state_running
and not requeued.
2020-01-20 11:39:14 +01:00
|
|
|
if (task->state == task_state_idle && !EMPTY(task->events)) {
|
|
|
|
task->state = task_state_ready;
|
|
|
|
was_idle = true;
|
|
|
|
}
|
2019-11-05 15:23:33 -08:00
|
|
|
UNLOCK(&task->lock);
|
|
|
|
|
|
|
|
if (was_idle) {
|
|
|
|
task_ready(task);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-10 13:49:57 -07:00
|
|
|
void
|
2018-08-06 13:00:55 +02:00
|
|
|
isc_task_setprivilege(isc_task_t *task0, bool priv) {
|
|
|
|
REQUIRE(ISCAPI_TASK_VALID(task0));
|
2011-09-02 21:15:39 +00:00
|
|
|
isc__task_t *task = (isc__task_t *)task0;
|
|
|
|
isc__taskmgr_t *manager = task->manager;
|
2019-12-12 15:22:10 +01:00
|
|
|
uint_fast32_t oldflags, newflags;
|
|
|
|
|
|
|
|
oldflags = atomic_load_acquire(&task->flags);
|
|
|
|
do {
|
|
|
|
if (priv) {
|
|
|
|
newflags = oldflags | TASK_F_PRIVILEGED;
|
|
|
|
} else {
|
|
|
|
newflags = oldflags & ~TASK_F_PRIVILEGED;
|
|
|
|
}
|
|
|
|
if (newflags == oldflags) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
} while (!atomic_compare_exchange_weak_acq_rel(&task->flags,
|
|
|
|
&oldflags,
|
|
|
|
newflags));
|
2011-09-02 21:15:39 +00:00
|
|
|
|
2018-10-22 09:37:17 +00:00
|
|
|
LOCK(&manager->queues[task->threadid].lock);
|
2011-09-02 21:15:39 +00:00
|
|
|
if (priv && ISC_LINK_LINKED(task, ready_link))
|
2018-10-22 09:37:17 +00:00
|
|
|
ENQUEUE(manager->queues[task->threadid].ready_priority_tasks,
|
|
|
|
task, ready_priority_link);
|
2011-09-02 21:15:39 +00:00
|
|
|
else if (!priv && ISC_LINK_LINKED(task, ready_priority_link))
|
2018-10-22 09:37:17 +00:00
|
|
|
DEQUEUE(manager->queues[task->threadid].ready_priority_tasks,
|
|
|
|
task, ready_priority_link);
|
|
|
|
UNLOCK(&manager->queues[task->threadid].lock);
|
2011-09-02 21:15:39 +00:00
|
|
|
}
|
|
|
|
|
2018-04-17 08:29:14 -07:00
|
|
|
bool
|
2018-08-06 13:00:55 +02:00
|
|
|
isc_task_privilege(isc_task_t *task0) {
|
2011-09-02 21:15:39 +00:00
|
|
|
isc__task_t *task = (isc__task_t *)task0;
|
2018-08-06 13:00:55 +02:00
|
|
|
REQUIRE(VALID_TASK(task));
|
2011-09-02 21:15:39 +00:00
|
|
|
|
2019-12-12 15:22:10 +01:00
|
|
|
return (TASK_PRIVILEGED(task));
|
2011-09-02 21:15:39 +00:00
|
|
|
}
|
|
|
|
|
2018-04-17 08:29:14 -07:00
|
|
|
bool
|
2010-12-04 13:25:59 +00:00
|
|
|
isc_task_exiting(isc_task_t *t) {
|
2010-12-03 22:05:19 +00:00
|
|
|
isc__task_t *task = (isc__task_t *)t;
|
|
|
|
REQUIRE(VALID_TASK(task));
|
2019-12-12 15:22:10 +01:00
|
|
|
|
2010-12-03 22:05:19 +00:00
|
|
|
return (TASK_SHUTTINGDOWN(task));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-03-13 14:24:50 -07:00
|
|
|
#ifdef HAVE_LIBXML2
|
2012-11-01 10:22:11 +11:00
|
|
|
#define TRY0(a) do { xmlrc = (a); if (xmlrc < 0) goto error; } while(0)
|
|
|
|
int
|
2019-06-24 14:25:55 +02:00
|
|
|
isc_taskmgr_renderxml(isc_taskmgr_t *mgr0, void *writer0) {
|
2009-09-01 00:22:28 +00:00
|
|
|
isc__taskmgr_t *mgr = (isc__taskmgr_t *)mgr0;
|
2012-11-01 10:22:11 +11:00
|
|
|
isc__task_t *task = NULL;
|
|
|
|
int xmlrc;
|
2019-06-24 14:25:55 +02:00
|
|
|
xmlTextWriterPtr writer = (xmlTextWriterPtr)writer0;
|
2007-02-13 02:49:08 +00:00
|
|
|
|
|
|
|
LOCK(&mgr->lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write out the thread-model, and some details about each depending
|
|
|
|
* on which type is enabled.
|
|
|
|
*/
|
2012-11-01 10:22:11 +11:00
|
|
|
TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "thread-model"));
|
|
|
|
TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "type"));
|
|
|
|
TRY0(xmlTextWriterWriteString(writer, ISC_XMLCHAR "threaded"));
|
|
|
|
TRY0(xmlTextWriterEndElement(writer)); /* type */
|
2007-02-13 02:49:08 +00:00
|
|
|
|
2012-11-01 10:22:11 +11:00
|
|
|
TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "worker-threads"));
|
|
|
|
TRY0(xmlTextWriterWriteFormatString(writer, "%d", mgr->workers));
|
|
|
|
TRY0(xmlTextWriterEndElement(writer)); /* worker-threads */
|
2007-02-13 02:49:08 +00:00
|
|
|
|
2012-11-01 10:22:11 +11:00
|
|
|
TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "default-quantum"));
|
|
|
|
TRY0(xmlTextWriterWriteFormatString(writer, "%d",
|
|
|
|
mgr->default_quantum));
|
|
|
|
TRY0(xmlTextWriterEndElement(writer)); /* default-quantum */
|
2007-02-13 02:49:08 +00:00
|
|
|
|
2019-01-18 11:47:43 +01:00
|
|
|
TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "tasks-count"));
|
|
|
|
TRY0(xmlTextWriterWriteFormatString(writer, "%d",
|
|
|
|
(int) atomic_load_relaxed(&mgr->tasks_count)));
|
|
|
|
TRY0(xmlTextWriterEndElement(writer)); /* tasks-count */
|
|
|
|
|
2012-11-01 10:22:11 +11:00
|
|
|
TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "tasks-running"));
|
2018-10-11 13:39:04 +00:00
|
|
|
TRY0(xmlTextWriterWriteFormatString(writer, "%d",
|
2019-01-18 11:47:43 +01:00
|
|
|
(int) atomic_load_relaxed(&mgr->tasks_running)));
|
2012-11-01 10:22:11 +11:00
|
|
|
TRY0(xmlTextWriterEndElement(writer)); /* tasks-running */
|
2007-02-13 02:49:08 +00:00
|
|
|
|
2012-11-01 10:22:11 +11:00
|
|
|
TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "tasks-ready"));
|
2018-10-11 13:39:04 +00:00
|
|
|
TRY0(xmlTextWriterWriteFormatString(writer, "%d",
|
2019-01-18 11:47:43 +01:00
|
|
|
(int) atomic_load_relaxed(&mgr->tasks_ready)));
|
2012-11-01 10:22:11 +11:00
|
|
|
TRY0(xmlTextWriterEndElement(writer)); /* tasks-ready */
|
2012-05-14 10:06:05 -07:00
|
|
|
|
2012-11-01 10:22:11 +11:00
|
|
|
TRY0(xmlTextWriterEndElement(writer)); /* thread-model */
|
2007-02-13 02:49:08 +00:00
|
|
|
|
2012-11-01 10:22:11 +11:00
|
|
|
TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "tasks"));
|
2007-02-13 02:49:08 +00:00
|
|
|
task = ISC_LIST_HEAD(mgr->tasks);
|
|
|
|
while (task != NULL) {
|
|
|
|
LOCK(&task->lock);
|
2012-11-01 10:22:11 +11:00
|
|
|
TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "task"));
|
2007-02-13 02:49:08 +00:00
|
|
|
|
|
|
|
if (task->name[0] != 0) {
|
2012-11-01 10:22:11 +11:00
|
|
|
TRY0(xmlTextWriterStartElement(writer,
|
|
|
|
ISC_XMLCHAR "name"));
|
|
|
|
TRY0(xmlTextWriterWriteFormatString(writer, "%s",
|
|
|
|
task->name));
|
|
|
|
TRY0(xmlTextWriterEndElement(writer)); /* name */
|
2007-02-13 02:49:08 +00:00
|
|
|
}
|
|
|
|
|
2012-11-01 10:22:11 +11:00
|
|
|
TRY0(xmlTextWriterStartElement(writer,
|
|
|
|
ISC_XMLCHAR "references"));
|
2019-05-20 17:00:22 +02:00
|
|
|
TRY0(xmlTextWriterWriteFormatString(writer, "%" PRIuFAST32,
|
2019-05-16 18:51:39 +02:00
|
|
|
isc_refcount_current(&task->references)));
|
2012-11-01 10:22:11 +11:00
|
|
|
TRY0(xmlTextWriterEndElement(writer)); /* references */
|
2007-02-13 02:49:08 +00:00
|
|
|
|
2012-11-01 10:22:11 +11:00
|
|
|
TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "id"));
|
|
|
|
TRY0(xmlTextWriterWriteFormatString(writer, "%p", task));
|
|
|
|
TRY0(xmlTextWriterEndElement(writer)); /* id */
|
2007-02-13 02:49:08 +00:00
|
|
|
|
2012-11-01 10:22:11 +11:00
|
|
|
TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "state"));
|
|
|
|
TRY0(xmlTextWriterWriteFormatString(writer, "%s",
|
|
|
|
statenames[task->state]));
|
|
|
|
TRY0(xmlTextWriterEndElement(writer)); /* state */
|
2007-02-13 02:49:08 +00:00
|
|
|
|
2012-11-01 10:22:11 +11:00
|
|
|
TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "quantum"));
|
|
|
|
TRY0(xmlTextWriterWriteFormatString(writer, "%d",
|
|
|
|
task->quantum));
|
|
|
|
TRY0(xmlTextWriterEndElement(writer)); /* quantum */
|
2007-02-13 02:49:08 +00:00
|
|
|
|
2012-11-01 10:22:11 +11:00
|
|
|
TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "events"));
|
|
|
|
TRY0(xmlTextWriterWriteFormatString(writer, "%d",
|
|
|
|
task->nevents));
|
|
|
|
TRY0(xmlTextWriterEndElement(writer)); /* events */
|
2012-05-14 10:06:05 -07:00
|
|
|
|
2012-11-01 10:22:11 +11:00
|
|
|
TRY0(xmlTextWriterEndElement(writer));
|
2007-02-13 02:49:08 +00:00
|
|
|
|
|
|
|
UNLOCK(&task->lock);
|
|
|
|
task = ISC_LIST_NEXT(task, link);
|
|
|
|
}
|
2012-11-01 10:22:11 +11:00
|
|
|
TRY0(xmlTextWriterEndElement(writer)); /* tasks */
|
2007-02-13 02:49:08 +00:00
|
|
|
|
2012-11-01 10:22:11 +11:00
|
|
|
error:
|
|
|
|
if (task != NULL)
|
|
|
|
UNLOCK(&task->lock);
|
2007-02-13 02:49:08 +00:00
|
|
|
UNLOCK(&mgr->lock);
|
2012-11-01 10:22:11 +11:00
|
|
|
|
|
|
|
return (xmlrc);
|
2007-02-13 02:49:08 +00:00
|
|
|
}
|
2013-03-13 14:24:50 -07:00
|
|
|
#endif /* HAVE_LIBXML2 */
|
|
|
|
|
2019-02-06 11:56:42 +01:00
|
|
|
#ifdef HAVE_JSON_C
|
2013-03-13 14:24:50 -07:00
|
|
|
#define CHECKMEM(m) do { \
|
|
|
|
if (m == NULL) { \
|
|
|
|
result = ISC_R_NOMEMORY;\
|
|
|
|
goto error;\
|
|
|
|
} \
|
|
|
|
} while(0)
|
|
|
|
|
|
|
|
isc_result_t
|
2019-06-24 12:21:47 +02:00
|
|
|
isc_taskmgr_renderjson(isc_taskmgr_t *mgr0, void *tasks0) {
|
2013-03-13 14:24:50 -07:00
|
|
|
isc_result_t result = ISC_R_SUCCESS;
|
|
|
|
isc__taskmgr_t *mgr = (isc__taskmgr_t *)mgr0;
|
|
|
|
isc__task_t *task = NULL;
|
|
|
|
json_object *obj = NULL, *array = NULL, *taskobj = NULL;
|
2019-06-24 12:21:47 +02:00
|
|
|
json_object *tasks = (json_object *)tasks0;
|
2013-03-13 14:24:50 -07:00
|
|
|
|
|
|
|
LOCK(&mgr->lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write out the thread-model, and some details about each depending
|
|
|
|
* on which type is enabled.
|
|
|
|
*/
|
|
|
|
obj = json_object_new_string("threaded");
|
|
|
|
CHECKMEM(obj);
|
|
|
|
json_object_object_add(tasks, "thread-model", obj);
|
|
|
|
|
|
|
|
obj = json_object_new_int(mgr->workers);
|
|
|
|
CHECKMEM(obj);
|
|
|
|
json_object_object_add(tasks, "worker-threads", obj);
|
|
|
|
|
|
|
|
obj = json_object_new_int(mgr->default_quantum);
|
|
|
|
CHECKMEM(obj);
|
|
|
|
json_object_object_add(tasks, "default-quantum", obj);
|
|
|
|
|
2019-01-18 11:47:43 +01:00
|
|
|
obj = json_object_new_int(atomic_load_relaxed(&mgr->tasks_count));
|
|
|
|
CHECKMEM(obj);
|
|
|
|
json_object_object_add(tasks, "tasks-count", obj);
|
|
|
|
|
|
|
|
obj = json_object_new_int(atomic_load_relaxed(&mgr->tasks_running));
|
2013-03-13 14:24:50 -07:00
|
|
|
CHECKMEM(obj);
|
|
|
|
json_object_object_add(tasks, "tasks-running", obj);
|
|
|
|
|
2019-01-18 11:47:43 +01:00
|
|
|
obj = json_object_new_int(atomic_load_relaxed(&mgr->tasks_ready));
|
2013-03-13 14:24:50 -07:00
|
|
|
CHECKMEM(obj);
|
|
|
|
json_object_object_add(tasks, "tasks-ready", obj);
|
|
|
|
|
|
|
|
array = json_object_new_array();
|
|
|
|
CHECKMEM(array);
|
|
|
|
|
|
|
|
for (task = ISC_LIST_HEAD(mgr->tasks);
|
|
|
|
task != NULL;
|
|
|
|
task = ISC_LIST_NEXT(task, link))
|
|
|
|
{
|
|
|
|
char buf[255];
|
|
|
|
|
|
|
|
LOCK(&task->lock);
|
|
|
|
|
|
|
|
taskobj = json_object_new_object();
|
|
|
|
CHECKMEM(taskobj);
|
|
|
|
json_object_array_add(array, taskobj);
|
|
|
|
|
2017-10-03 14:54:19 +11:00
|
|
|
snprintf(buf, sizeof(buf), "%p", task);
|
2013-03-13 14:24:50 -07:00
|
|
|
obj = json_object_new_string(buf);
|
|
|
|
CHECKMEM(obj);
|
|
|
|
json_object_object_add(taskobj, "id", obj);
|
|
|
|
|
|
|
|
if (task->name[0] != 0) {
|
|
|
|
obj = json_object_new_string(task->name);
|
|
|
|
CHECKMEM(obj);
|
|
|
|
json_object_object_add(taskobj, "name", obj);
|
|
|
|
}
|
|
|
|
|
2019-05-16 18:51:39 +02:00
|
|
|
obj = json_object_new_int(isc_refcount_current(&task->references));
|
2013-03-13 14:24:50 -07:00
|
|
|
CHECKMEM(obj);
|
|
|
|
json_object_object_add(taskobj, "references", obj);
|
|
|
|
|
|
|
|
obj = json_object_new_string(statenames[task->state]);
|
|
|
|
CHECKMEM(obj);
|
|
|
|
json_object_object_add(taskobj, "state", obj);
|
|
|
|
|
|
|
|
obj = json_object_new_int(task->quantum);
|
|
|
|
CHECKMEM(obj);
|
|
|
|
json_object_object_add(taskobj, "quantum", obj);
|
|
|
|
|
|
|
|
obj = json_object_new_int(task->nevents);
|
|
|
|
CHECKMEM(obj);
|
|
|
|
json_object_object_add(taskobj, "events", obj);
|
|
|
|
|
|
|
|
UNLOCK(&task->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
json_object_object_add(tasks, "tasks", array);
|
|
|
|
array = NULL;
|
|
|
|
result = ISC_R_SUCCESS;
|
|
|
|
|
|
|
|
error:
|
|
|
|
if (array != NULL)
|
|
|
|
json_object_put(array);
|
|
|
|
|
|
|
|
if (task != NULL)
|
|
|
|
UNLOCK(&task->lock);
|
|
|
|
UNLOCK(&mgr->lock);
|
|
|
|
|
|
|
|
return (result);
|
|
|
|
}
|
|
|
|
#endif
|
2013-04-10 13:49:57 -07:00
|
|
|
|
|
|
|
|
|
|
|
isc_result_t
|
2019-05-13 20:58:20 +07:00
|
|
|
isc_taskmgr_createinctx(isc_mem_t *mctx,
|
2013-04-10 13:49:57 -07:00
|
|
|
unsigned int workers, unsigned int default_quantum,
|
|
|
|
isc_taskmgr_t **managerp)
|
|
|
|
{
|
|
|
|
isc_result_t result;
|
|
|
|
|
2019-11-05 15:23:33 -08:00
|
|
|
result = isc_taskmgr_create(mctx, workers, default_quantum, NULL,
|
|
|
|
managerp);
|
2013-04-10 13:49:57 -07:00
|
|
|
|
|
|
|
return (result);
|
|
|
|
}
|