2
0
mirror of https://gitlab.isc.org/isc-projects/bind9 synced 2025-08-29 05:28:00 +00:00

Remove taskmgr->excl_lock, fix the locking for taskmgr->exiting

While doing code review, it was found that the taskmgr->exiting is set
under taskmgr->lock, but accessed under taskmgr->excl_lock in the
isc_task_beginexclusive().

Additionally, before the change that moved running the tasks to the
netmgr, the task_ready() subrouting of isc_task_detach() would lock
mgr->lock, requiring the mgr->excl to be protected mgr->excl_lock
to prevent deadlock in the code.  After !4918 has been merged, this is
no longer true, and we can remove taskmgr->excl_lock and use
taskmgr->lock in its stead.

Solve both issues by removing the taskmgr->excl_lock and exclusively use
taskmgr->lock to protect both taskmgr->excl and taskmgr->exiting which
now doesn't need to be atomic_bool, because it's always accessed from
within the locked section.
This commit is contained in:
Ondřej Surý 2022-01-05 13:06:37 +01:00
parent f9d90159b8
commit e705f213ca

View File

@ -139,14 +139,7 @@ struct isc_taskmgr {
LIST(isc_task_t) tasks;
atomic_uint_fast32_t mode;
atomic_bool exclusive_req;
atomic_bool exiting;
/*
* Multiple threads can read/write 'excl' at the same time, so we need
* to protect the access. We can't use 'lock' since isc_task_detach()
* will try to acquire it.
*/
isc_mutex_t excl_lock;
bool exiting;
isc_task_t *excl;
};
@ -252,13 +245,11 @@ isc_task_create_bound(isc_taskmgr_t *manager, unsigned int quantum,
INIT_LINK(task, link);
task->magic = TASK_MAGIC;
exiting = false;
LOCK(&manager->lock);
if (!atomic_load_relaxed(&manager->exiting)) {
exiting = manager->exiting;
if (!exiting) {
APPEND(manager->tasks, task, link);
atomic_fetch_add(&manager->tasks_count, 1);
} else {
exiting = true;
}
UNLOCK(&manager->lock);
@ -913,7 +904,6 @@ manager_free(isc_taskmgr_t *manager) {
isc_nm_detach(&manager->netmgr);
isc_mutex_destroy(&manager->lock);
isc_mutex_destroy(&manager->excl_lock);
manager->magic = 0;
isc_mem_putanddetach(&manager->mctx, manager, sizeof(*manager));
}
@ -957,7 +947,6 @@ isc__taskmgr_create(isc_mem_t *mctx, unsigned int default_quantum, isc_nm_t *nm,
*manager = (isc_taskmgr_t){ .magic = TASK_MANAGER_MAGIC };
isc_mutex_init(&manager->lock);
isc_mutex_init(&manager->excl_lock);
if (default_quantum == 0) {
default_quantum = DEFAULT_DEFAULT_QUANTUM;
@ -969,7 +958,6 @@ isc__taskmgr_create(isc_mem_t *mctx, unsigned int default_quantum, isc_nm_t *nm,
}
INIT_LIST(manager->tasks);
atomic_init(&manager->exiting, false);
atomic_init(&manager->mode, isc_taskmgrmode_normal);
atomic_init(&manager->exclusive_req, false);
atomic_init(&manager->tasks_count, 0);
@ -998,15 +986,6 @@ isc__taskmgr_shutdown(isc_taskmgr_t *manager) {
* that the startup thread is sleeping on.
*/
/*
* Detach the exclusive task before acquiring the manager lock
*/
LOCK(&manager->excl_lock);
if (manager->excl != NULL) {
isc_task_detach((isc_task_t **)&manager->excl);
}
UNLOCK(&manager->excl_lock);
/*
* Unlike elsewhere, we're going to hold this lock a long time.
* We need to do so, because otherwise the list of tasks could
@ -1015,14 +994,16 @@ isc__taskmgr_shutdown(isc_taskmgr_t *manager) {
* This is also the only function where we will hold both the
* task manager lock and a task lock at the same time.
*/
LOCK(&manager->lock);
if (manager->excl != NULL) {
isc_task_detach((isc_task_t **)&manager->excl);
}
/*
* Make sure we only get called once.
*/
INSIST(atomic_compare_exchange_strong(&manager->exiting,
&(bool){ false }, true));
INSIST(manager->exiting == false);
manager->exiting = true;
/*
* Post shutdown event(s) to every task (if they haven't already been
@ -1074,12 +1055,12 @@ isc_taskmgr_setexcltask(isc_taskmgr_t *mgr, isc_task_t *task) {
REQUIRE(task->threadid == 0);
UNLOCK(&task->lock);
LOCK(&mgr->excl_lock);
LOCK(&mgr->lock);
if (mgr->excl != NULL) {
isc_task_detach(&mgr->excl);
}
isc_task_attach(task, &mgr->excl);
UNLOCK(&mgr->excl_lock);
UNLOCK(&mgr->lock);
}
isc_result_t
@ -1089,16 +1070,16 @@ isc_taskmgr_excltask(isc_taskmgr_t *mgr, isc_task_t **taskp) {
REQUIRE(VALID_MANAGER(mgr));
REQUIRE(taskp != NULL && *taskp == NULL);
LOCK(&mgr->excl_lock);
LOCK(&mgr->lock);
if (mgr->excl != NULL) {
isc_task_attach(mgr->excl, taskp);
result = ISC_R_SUCCESS;
} else if (atomic_load_relaxed(&mgr->exiting)) {
} else if (mgr->exiting) {
result = ISC_R_SHUTTINGDOWN;
} else {
result = ISC_R_NOTFOUND;
}
UNLOCK(&mgr->excl_lock);
UNLOCK(&mgr->lock);
return (result);
}
@ -1113,11 +1094,10 @@ isc_task_beginexclusive(isc_task_t *task) {
REQUIRE(task->state == task_state_running);
LOCK(&manager->excl_lock);
REQUIRE(task == task->manager->excl ||
(atomic_load_relaxed(&task->manager->exiting) &&
task->manager->excl == NULL));
UNLOCK(&manager->excl_lock);
LOCK(&manager->lock);
REQUIRE(task == manager->excl ||
(manager->exiting && manager->excl == NULL));
UNLOCK(&manager->lock);
if (!atomic_compare_exchange_strong(&manager->exclusive_req,
&(bool){ false }, true))