2
0
mirror of https://gitlab.isc.org/isc-projects/bind9 synced 2025-08-23 18:49:54 +00:00
bind/lib/isc/tests/task_test.c

1605 lines
35 KiB
C
Raw Normal View History

/*
* Copyright (C) Internet Systems Consortium, Inc. ("ISC")
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, you can obtain one at https://mozilla.org/MPL/2.0/.
*
* See the COPYRIGHT file distributed with this work for additional
* information regarding copyright ownership.
*/
2018-10-24 13:12:55 -07:00
#if HAVE_CMOCKA
Include <sched.h> where necessary for musl libc All unit tests define the UNIT_TESTING macro, which causes <cmocka.h> to replace malloc(), calloc(), realloc(), and free() with its own functions tracking memory allocations. In order for this not to break compilation, the system header declaring the prototypes for these standard functions must be included before <cmocka.h>. Normally, these prototypes are only present in <stdlib.h>, so we make sure it is included before <cmocka.h>. However, musl libc also defines the prototypes for calloc() and free() in <sched.h>, which is included by <pthread.h>, which is included e.g. by <isc/mutex.h>. Thus, unit tests including "dnstest.h" (which includes <isc/mem.h>, which includes <isc/mutex.h>) after <cmocka.h> will not compile with musl libc as for these programs, <sched.h> will be included after <cmocka.h>. Always including <cmocka.h> after all other header files is not a feasible solution as that causes the mock assertion macros defined in <isc/util.h> to mangle the contents of <cmocka.h>, thus breaking compilation. We cannot really use the __noreturn__ or analyzer_noreturn attributes with cmocka assertion functions because they do return if the tested condition is true. The problem is that what BIND unit tests do is incompatible with Clang Static Analyzer's assumptions: since we use cmocka, our custom assertion handlers are present in a shared library (i.e. it is the cmocka library that checks the assertion condition, not a macro in unit test code). Redefining cmocka's assertion macros in <isc/util.h> is an ugly hack to overcome that problem - unfortunately, this is the only way we can think of to make Clang Static Analyzer properly process unit test code. Giving up on Clang Static Analyzer being able to properly process unit test code is not a satisfactory solution. Undefining _GNU_SOURCE for unit test code could work around the problem (musl libc's <sched.h> only defines the prototypes for calloc() and free() when _GNU_SOURCE is defined), but doing that could introduce discrepancies for unit tests including entire *.c files, so it is also not a good solution. All in all, including <sched.h> before <cmocka.h> for all affected unit tests seems to be the most benign way of working around this musl libc quirk. While quite an ugly solution, it achieves our goals here, which are to keep the benefit of proper static analysis of unit test code and to fix compilation against musl libc.
2019-07-30 21:08:40 +02:00
#include <inttypes.h>
#include <sched.h> /* IWYU pragma: keep */
#include <setjmp.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdlib.h>
2018-10-24 13:12:55 -07:00
#include <string.h>
Include <sched.h> where necessary for musl libc All unit tests define the UNIT_TESTING macro, which causes <cmocka.h> to replace malloc(), calloc(), realloc(), and free() with its own functions tracking memory allocations. In order for this not to break compilation, the system header declaring the prototypes for these standard functions must be included before <cmocka.h>. Normally, these prototypes are only present in <stdlib.h>, so we make sure it is included before <cmocka.h>. However, musl libc also defines the prototypes for calloc() and free() in <sched.h>, which is included by <pthread.h>, which is included e.g. by <isc/mutex.h>. Thus, unit tests including "dnstest.h" (which includes <isc/mem.h>, which includes <isc/mutex.h>) after <cmocka.h> will not compile with musl libc as for these programs, <sched.h> will be included after <cmocka.h>. Always including <cmocka.h> after all other header files is not a feasible solution as that causes the mock assertion macros defined in <isc/util.h> to mangle the contents of <cmocka.h>, thus breaking compilation. We cannot really use the __noreturn__ or analyzer_noreturn attributes with cmocka assertion functions because they do return if the tested condition is true. The problem is that what BIND unit tests do is incompatible with Clang Static Analyzer's assumptions: since we use cmocka, our custom assertion handlers are present in a shared library (i.e. it is the cmocka library that checks the assertion condition, not a macro in unit test code). Redefining cmocka's assertion macros in <isc/util.h> is an ugly hack to overcome that problem - unfortunately, this is the only way we can think of to make Clang Static Analyzer properly process unit test code. Giving up on Clang Static Analyzer being able to properly process unit test code is not a satisfactory solution. Undefining _GNU_SOURCE for unit test code could work around the problem (musl libc's <sched.h> only defines the prototypes for calloc() and free() when _GNU_SOURCE is defined), but doing that could introduce discrepancies for unit tests including entire *.c files, so it is also not a good solution. All in all, including <sched.h> before <cmocka.h> for all affected unit tests seems to be the most benign way of working around this musl libc quirk. While quite an ugly solution, it achieves our goals here, which are to keep the benefit of proper static analysis of unit test code and to fix compilation against musl libc.
2019-07-30 21:08:40 +02:00
#include <unistd.h>
2018-10-24 13:12:55 -07:00
#define UNIT_TESTING
#include <cmocka.h>
#include <isc/atomic.h>
#include <isc/cmocka.h>
2018-10-24 13:12:55 -07:00
#include <isc/commandline.h>
#include <isc/condition.h>
#include <isc/mem.h>
#include <isc/platform.h>
2018-03-09 16:55:21 -08:00
#include <isc/print.h>
#include <isc/task.h>
#include <isc/time.h>
#include <isc/timer.h>
#include <isc/util.h>
#include "../task_p.h"
#include "isctest.h"
/* Set to true (or use -v option) for verbose output */
2018-10-24 13:12:55 -07:00
static bool verbose = false;
2020-02-13 14:44:37 -08:00
static isc_mutex_t lock;
2018-10-24 13:12:55 -07:00
static isc_condition_t cv;
2019-07-12 16:44:51 +02:00
atomic_int_fast32_t counter;
2020-02-13 14:44:37 -08:00
static int active[10];
static atomic_bool done, done2;
2018-10-24 13:12:55 -07:00
static int
2020-02-13 14:44:37 -08:00
_setup(void **state) {
2018-10-24 13:12:55 -07:00
isc_result_t result;
UNUSED(state);
2018-11-16 15:33:22 +01:00
isc_mutex_init(&lock);
2018-10-24 13:12:55 -07:00
2018-11-15 17:20:36 +01:00
isc_condition_init(&cv);
2018-10-24 13:12:55 -07:00
result = isc_test_begin(NULL, true, 0);
assert_int_equal(result, ISC_R_SUCCESS);
return (0);
}
static int
2020-02-13 14:44:37 -08:00
_setup2(void **state) {
2018-10-24 13:12:55 -07:00
isc_result_t result;
UNUSED(state);
2018-11-16 15:33:22 +01:00
isc_mutex_init(&lock);
2018-10-24 13:12:55 -07:00
2018-11-15 17:20:36 +01:00
isc_condition_init(&cv);
2018-10-24 13:12:55 -07:00
/* Two worker threads */
result = isc_test_begin(NULL, true, 2);
assert_int_equal(result, ISC_R_SUCCESS);
return (0);
}
static int
2020-02-13 14:44:37 -08:00
_setup4(void **state) {
2018-10-24 13:12:55 -07:00
isc_result_t result;
UNUSED(state);
2018-11-16 15:33:22 +01:00
isc_mutex_init(&lock);
2018-10-24 13:12:55 -07:00
2018-11-15 17:20:36 +01:00
isc_condition_init(&cv);
2018-10-24 13:12:55 -07:00
/* Four worker threads */
result = isc_test_begin(NULL, true, 4);
assert_int_equal(result, ISC_R_SUCCESS);
return (0);
}
static int
2020-02-13 14:44:37 -08:00
_teardown(void **state) {
2018-10-24 13:12:55 -07:00
UNUSED(state);
isc_test_end();
isc_condition_destroy(&cv);
return (0);
}
static void
2020-02-13 14:44:37 -08:00
set(isc_task_t *task, isc_event_t *event) {
atomic_int_fast32_t *value = (atomic_int_fast32_t *)event->ev_arg;
UNUSED(task);
isc_event_free(&event);
atomic_store(value, atomic_fetch_add(&counter, 1));
}
static void
2020-02-13 14:44:37 -08:00
set_and_drop(isc_task_t *task, isc_event_t *event) {
atomic_int_fast32_t *value = (atomic_int_fast32_t *)event->ev_arg;
UNUSED(task);
isc_event_free(&event);
LOCK(&lock);
atomic_store(value, (int)isc_taskmgr_mode(taskmgr));
atomic_fetch_add(&counter, 1);
UNLOCK(&lock);
}
/* Create a task */
2018-10-24 13:12:55 -07:00
static void
2020-02-13 14:44:37 -08:00
create_task(void **state) {
isc_result_t result;
2020-02-13 14:44:37 -08:00
isc_task_t *task = NULL;
2018-10-24 13:12:55 -07:00
UNUSED(state);
result = isc_task_create(taskmgr, 0, &task);
2018-10-24 13:12:55 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
isc_task_destroy(&task);
2018-10-24 13:12:55 -07:00
assert_null(task);
}
/* Process events */
2018-10-24 13:12:55 -07:00
static void
2020-02-13 14:44:37 -08:00
all_events(void **state) {
isc_result_t result;
isc_task_t *task = NULL;
isc_event_t *event = NULL;
2019-07-12 16:44:51 +02:00
atomic_int_fast32_t a, b;
2020-02-13 14:44:37 -08:00
int i = 0;
2018-10-24 13:12:55 -07:00
UNUSED(state);
atomic_init(&counter, 1);
2019-07-12 16:44:51 +02:00
atomic_init(&a, 0);
atomic_init(&b, 0);
result = isc_task_create(taskmgr, 0, &task);
2018-10-24 13:12:55 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
/* First event */
event = isc_event_allocate(test_mctx, task, ISC_TASKEVENT_TEST, set, &a,
sizeof(isc_event_t));
2018-10-24 13:12:55 -07:00
assert_non_null(event);
assert_int_equal(atomic_load(&a), 0);
isc_task_send(task, &event);
event = isc_event_allocate(test_mctx, task, ISC_TASKEVENT_TEST, set, &b,
sizeof(isc_event_t));
2018-10-24 13:12:55 -07:00
assert_non_null(event);
assert_int_equal(atomic_load(&b), 0);
isc_task_send(task, &event);
while ((atomic_load(&a) == 0 || atomic_load(&b) == 0) && i++ < 5000) {
isc_test_nap(1000);
}
assert_int_not_equal(atomic_load(&a), 0);
assert_int_not_equal(atomic_load(&b), 0);
isc_task_destroy(&task);
2018-10-24 13:12:55 -07:00
assert_null(task);
}
/* Privileged events */
2018-10-24 13:12:55 -07:00
static void
2020-02-13 14:44:37 -08:00
privileged_events(void **state) {
isc_result_t result;
isc_task_t *task1 = NULL, *task2 = NULL;
isc_event_t *event = NULL;
2019-07-12 16:44:51 +02:00
atomic_int_fast32_t a, b, c, d, e;
2020-02-13 14:44:37 -08:00
int i = 0;
2018-10-24 13:12:55 -07:00
UNUSED(state);
atomic_init(&counter, 1);
2019-07-12 16:44:51 +02:00
atomic_init(&a, 0);
atomic_init(&b, 0);
atomic_init(&c, 0);
atomic_init(&d, 0);
atomic_init(&e, 0);
/*
* Pause the task manager so we can fill up the work queue
* without things happening while we do it.
*/
isc__taskmgr_pause(taskmgr);
result = isc_task_create(taskmgr, 0, &task1);
2018-10-24 13:12:55 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
isc_task_setname(task1, "privileged", NULL);
2018-10-24 13:12:55 -07:00
assert_false(isc_task_privilege(task1));
isc_task_setprivilege(task1, true);
2018-10-24 13:12:55 -07:00
assert_true(isc_task_privilege(task1));
result = isc_task_create(taskmgr, 0, &task2);
2018-10-24 13:12:55 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
isc_task_setname(task2, "normal", NULL);
2018-10-24 13:12:55 -07:00
assert_false(isc_task_privilege(task2));
/* First event: privileged */
event = isc_event_allocate(test_mctx, task1, ISC_TASKEVENT_TEST, set,
&a, sizeof(isc_event_t));
2018-10-24 13:12:55 -07:00
assert_non_null(event);
assert_int_equal(atomic_load(&a), 0);
isc_task_send(task1, &event);
/* Second event: not privileged */
event = isc_event_allocate(test_mctx, task2, ISC_TASKEVENT_TEST, set,
&b, sizeof(isc_event_t));
2018-10-24 13:12:55 -07:00
assert_non_null(event);
assert_int_equal(atomic_load(&b), 0);
isc_task_send(task2, &event);
/* Third event: privileged */
event = isc_event_allocate(test_mctx, task1, ISC_TASKEVENT_TEST, set,
&c, sizeof(isc_event_t));
2018-10-24 13:12:55 -07:00
assert_non_null(event);
assert_int_equal(atomic_load(&c), 0);
isc_task_send(task1, &event);
/* Fourth event: privileged */
event = isc_event_allocate(test_mctx, task1, ISC_TASKEVENT_TEST, set,
&d, sizeof(isc_event_t));
2018-10-24 13:12:55 -07:00
assert_non_null(event);
assert_int_equal(atomic_load(&d), 0);
isc_task_send(task1, &event);
/* Fifth event: not privileged */
event = isc_event_allocate(test_mctx, task2, ISC_TASKEVENT_TEST, set,
&e, sizeof(isc_event_t));
2018-10-24 13:12:55 -07:00
assert_non_null(event);
assert_int_equal(atomic_load(&e), 0);
isc_task_send(task2, &event);
2018-10-24 13:12:55 -07:00
assert_int_equal(isc_taskmgr_mode(taskmgr), isc_taskmgrmode_normal);
isc_taskmgr_setprivilegedmode(taskmgr);
2018-10-24 13:12:55 -07:00
assert_int_equal(isc_taskmgr_mode(taskmgr), isc_taskmgrmode_privileged);
isc__taskmgr_resume(taskmgr);
/* We're waiting for *all* variables to be set */
while ((atomic_load(&a) == 0 || atomic_load(&b) == 0 ||
atomic_load(&c) == 0 || atomic_load(&d) == 0 ||
atomic_load(&e) == 0) &&
2020-02-13 14:44:37 -08:00
i++ < 5000)
{
isc_test_nap(1000);
}
/*
* We can't guarantee what order the events fire, but
* we do know the privileged tasks that set a, c, and d
* would have fired first.
*/
assert_true(atomic_load(&a) <= 3);
assert_true(atomic_load(&c) <= 3);
assert_true(atomic_load(&d) <= 3);
/* ...and the non-privileged tasks that set b and e, last */
assert_true(atomic_load(&b) >= 4);
assert_true(atomic_load(&e) >= 4);
assert_int_equal(atomic_load(&counter), 6);
isc_task_setprivilege(task1, false);
2018-10-24 13:12:55 -07:00
assert_false(isc_task_privilege(task1));
2018-10-24 13:12:55 -07:00
assert_int_equal(isc_taskmgr_mode(taskmgr), isc_taskmgrmode_normal);
isc_task_destroy(&task1);
2018-10-24 13:12:55 -07:00
assert_null(task1);
isc_task_destroy(&task2);
2018-10-24 13:12:55 -07:00
assert_null(task2);
}
/*
* Edge case: this tests that the task manager behaves as expected when
* we explicitly set it into normal mode *while* running privileged.
*/
2018-10-24 13:12:55 -07:00
static void
2020-02-13 14:44:37 -08:00
privilege_drop(void **state) {
isc_result_t result;
isc_task_t *task1 = NULL, *task2 = NULL;
isc_event_t *event = NULL;
atomic_int_fast32_t a, b, c, d, e; /* non valid states */
2020-02-13 14:44:37 -08:00
int i = 0;
2018-10-24 13:12:55 -07:00
UNUSED(state);
atomic_init(&counter, 1);
2019-07-12 16:44:51 +02:00
atomic_init(&a, -1);
atomic_init(&b, -1);
atomic_init(&c, -1);
atomic_init(&d, -1);
atomic_init(&e, -1);
/*
* Pause the task manager so we can fill up the work queue
* without things happening while we do it.
*/
isc__taskmgr_pause(taskmgr);
result = isc_task_create(taskmgr, 0, &task1);
2018-10-24 13:12:55 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
isc_task_setname(task1, "privileged", NULL);
2018-10-24 13:12:55 -07:00
assert_false(isc_task_privilege(task1));
isc_task_setprivilege(task1, true);
2018-10-24 13:12:55 -07:00
assert_true(isc_task_privilege(task1));
result = isc_task_create(taskmgr, 0, &task2);
2018-10-24 13:12:55 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
isc_task_setname(task2, "normal", NULL);
2018-10-24 13:12:55 -07:00
assert_false(isc_task_privilege(task2));
/* First event: privileged */
event = isc_event_allocate(test_mctx, task1, ISC_TASKEVENT_TEST,
set_and_drop, &a, sizeof(isc_event_t));
2018-10-24 13:12:55 -07:00
assert_non_null(event);
assert_int_equal(atomic_load(&a), -1);
isc_task_send(task1, &event);
/* Second event: not privileged */
event = isc_event_allocate(test_mctx, task2, ISC_TASKEVENT_TEST,
set_and_drop, &b, sizeof(isc_event_t));
2018-10-24 13:12:55 -07:00
assert_non_null(event);
assert_int_equal(atomic_load(&b), -1);
isc_task_send(task2, &event);
/* Third event: privileged */
event = isc_event_allocate(test_mctx, task1, ISC_TASKEVENT_TEST,
set_and_drop, &c, sizeof(isc_event_t));
2018-10-24 13:12:55 -07:00
assert_non_null(event);
assert_int_equal(atomic_load(&c), -1);
isc_task_send(task1, &event);
/* Fourth event: privileged */
event = isc_event_allocate(test_mctx, task1, ISC_TASKEVENT_TEST,
set_and_drop, &d, sizeof(isc_event_t));
2018-10-24 13:12:55 -07:00
assert_non_null(event);
assert_int_equal(atomic_load(&d), -1);
isc_task_send(task1, &event);
/* Fifth event: not privileged */
event = isc_event_allocate(test_mctx, task2, ISC_TASKEVENT_TEST,
set_and_drop, &e, sizeof(isc_event_t));
2018-10-24 13:12:55 -07:00
assert_non_null(event);
assert_int_equal(atomic_load(&e), -1);
isc_task_send(task2, &event);
2018-10-24 13:12:55 -07:00
assert_int_equal(isc_taskmgr_mode(taskmgr), isc_taskmgrmode_normal);
isc_taskmgr_setprivilegedmode(taskmgr);
2018-10-24 13:12:55 -07:00
assert_int_equal(isc_taskmgr_mode(taskmgr), isc_taskmgrmode_privileged);
isc__taskmgr_resume(taskmgr);
/* We're waiting for all variables to be set. */
while ((atomic_load(&a) == -1 || atomic_load(&b) == -1 ||
atomic_load(&c) == -1 || atomic_load(&d) == -1 ||
atomic_load(&e) == -1) &&
2020-02-13 14:44:37 -08:00
i++ < 5000)
{
isc_test_nap(1000);
}
/*
* We need to check that all privilege mode events were fired
* in privileged mode, and non privileged in non-privileged.
*/
assert_true(atomic_load(&a) == isc_taskmgrmode_privileged ||
atomic_load(&c) == isc_taskmgrmode_privileged ||
atomic_load(&d) == isc_taskmgrmode_privileged);
/* ...and neither of the non-privileged tasks did... */
assert_true(atomic_load(&b) == isc_taskmgrmode_normal ||
atomic_load(&e) == isc_taskmgrmode_normal);
/* ...but all five of them did run. */
assert_int_equal(atomic_load(&counter), 6);
2018-10-24 13:12:55 -07:00
assert_int_equal(isc_taskmgr_mode(taskmgr), isc_taskmgrmode_normal);
isc_task_destroy(&task1);
2018-10-24 13:12:55 -07:00
assert_null(task1);
isc_task_destroy(&task2);
2018-10-24 13:12:55 -07:00
assert_null(task2);
}
static void
2020-02-13 14:44:37 -08:00
sleep_cb(isc_task_t *task, isc_event_t *event) {
UNUSED(task);
int p = *(int *)event->ev_arg;
if (p == 1) {
/*
* Signal the main thread that we're running, so that
* it can trigger the race.
*/
LOCK(&lock);
atomic_store(&done2, true);
SIGNAL(&cv);
UNLOCK(&lock);
/*
* Wait for the operations in the main thread to be finished.
*/
LOCK(&lock);
while (!atomic_load(&done)) {
WAIT(&cv, &lock);
}
UNLOCK(&lock);
} else {
/*
* Wait for the operations in the main thread to be finished.
*/
LOCK(&lock);
atomic_store(&done2, true);
SIGNAL(&cv);
UNLOCK(&lock);
}
isc_event_free(&event);
}
static void
2020-02-13 14:44:37 -08:00
pause_unpause(void **state) {
isc_result_t result;
2020-02-13 14:44:37 -08:00
isc_task_t *task = NULL;
isc_event_t *event1, *event2 = NULL;
UNUSED(state);
atomic_store(&done, false);
atomic_store(&done2, false);
result = isc_task_create(taskmgr, 0, &task);
assert_int_equal(result, ISC_R_SUCCESS);
event1 = isc_event_allocate(test_mctx, task, ISC_TASKEVENT_TEST,
sleep_cb, &(int){ 1 }, sizeof(isc_event_t));
assert_non_null(event1);
event2 = isc_event_allocate(test_mctx, task, ISC_TASKEVENT_TEST,
sleep_cb, &(int){ 2 }, sizeof(isc_event_t));
assert_non_null(event2);
isc_task_send(task, &event1);
isc_task_send(task, &event2);
/* Wait for event1 to be running */
LOCK(&lock);
while (!atomic_load(&done2)) {
WAIT(&cv, &lock);
}
UNLOCK(&lock);
/* Pause-unpause-detach is what causes the race */
isc_task_pause(task);
isc_task_unpause(task);
isc_task_detach(&task);
/* Signal event1 to finish */
LOCK(&lock);
atomic_store(&done2, false);
atomic_store(&done, true);
SIGNAL(&cv);
UNLOCK(&lock);
/* Wait for event2 to finish */
LOCK(&lock);
while (!atomic_load(&done2)) {
WAIT(&cv, &lock);
}
UNLOCK(&lock);
}
/*
* Basic task functions:
*/
static void
2020-02-13 14:44:37 -08:00
basic_cb(isc_task_t *task, isc_event_t *event) {
2018-10-24 13:12:55 -07:00
int i, j;
UNUSED(task);
j = 0;
for (i = 0; i < 1000000; i++) {
j += 100;
}
2019-08-08 13:52:44 +10:00
UNUSED(j);
2018-10-24 13:12:55 -07:00
if (verbose) {
print_message("# task %s\n", (char *)event->ev_arg);
}
isc_event_free(&event);
}
static void
2020-02-13 14:44:37 -08:00
basic_shutdown(isc_task_t *task, isc_event_t *event) {
UNUSED(task);
2018-10-24 13:12:55 -07:00
if (verbose) {
print_message("# shutdown %s\n", (char *)event->ev_arg);
}
isc_event_free(&event);
}
static void
2020-02-13 14:44:37 -08:00
basic_tick(isc_task_t *task, isc_event_t *event) {
UNUSED(task);
2018-10-24 13:12:55 -07:00
if (verbose) {
print_message("# %s\n", (char *)event->ev_arg);
}
isc_event_free(&event);
}
static char one[] = "1";
static char two[] = "2";
static char three[] = "3";
static char four[] = "4";
static char tick[] = "tick";
static char tock[] = "tock";
2018-10-24 13:12:55 -07:00
static void
2020-02-13 14:44:37 -08:00
basic(void **state) {
isc_result_t result;
isc_task_t *task1 = NULL;
isc_task_t *task2 = NULL;
isc_task_t *task3 = NULL;
isc_task_t *task4 = NULL;
isc_event_t *event = NULL;
isc_timer_t *ti1 = NULL;
isc_timer_t *ti2 = NULL;
isc_time_t absolute;
isc_interval_t interval;
char *testarray[] = { one, one, one, one, one, one, one, one,
one, two, three, four, two, three, four, NULL };
2020-02-13 14:44:37 -08:00
int i;
2018-10-24 13:12:55 -07:00
UNUSED(state);
result = isc_task_create(taskmgr, 0, &task1);
2018-10-24 13:12:55 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
result = isc_task_create(taskmgr, 0, &task2);
2018-10-24 13:12:55 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
result = isc_task_create(taskmgr, 0, &task3);
2018-10-24 13:12:55 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
result = isc_task_create(taskmgr, 0, &task4);
2018-10-24 13:12:55 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
result = isc_task_onshutdown(task1, basic_shutdown, one);
2018-10-24 13:12:55 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
result = isc_task_onshutdown(task2, basic_shutdown, two);
2018-10-24 13:12:55 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
result = isc_task_onshutdown(task3, basic_shutdown, three);
2018-10-24 13:12:55 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
result = isc_task_onshutdown(task4, basic_shutdown, four);
2018-10-24 13:12:55 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
isc_time_settoepoch(&absolute);
isc_interval_set(&interval, 1, 0);
result = isc_timer_create(timermgr, isc_timertype_ticker, &absolute,
&interval, task1, basic_tick, tick, &ti1);
2018-10-24 13:12:55 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
ti2 = NULL;
isc_time_settoepoch(&absolute);
isc_interval_set(&interval, 1, 0);
result = isc_timer_create(timermgr, isc_timertype_ticker, &absolute,
&interval, task2, basic_tick, tock, &ti2);
2018-10-24 13:12:55 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
#ifndef WIN32
sleep(2);
#else /* ifndef WIN32 */
Sleep(2000);
#endif /* ifndef WIN32 */
for (i = 0; testarray[i] != NULL; i++) {
/*
* Note: (void *)1 is used as a sender here, since some
* compilers don't like casting a function pointer to a
* (void *).
*
* In a real use, it is more likely the sender would be a
* structure (socket, timer, task, etc) but this is just a
* test program.
*/
event = isc_event_allocate(test_mctx, (void *)1, 1, basic_cb,
testarray[i], sizeof(*event));
2018-10-24 13:12:55 -07:00
assert_non_null(event);
isc_task_send(task1, &event);
}
(void)isc_task_purge(task3, NULL, 0, 0);
isc_task_detach(&task1);
isc_task_detach(&task2);
isc_task_detach(&task3);
isc_task_detach(&task4);
#ifndef WIN32
sleep(10);
#else /* ifndef WIN32 */
Sleep(10000);
#endif /* ifndef WIN32 */
isc_timer_detach(&ti1);
isc_timer_detach(&ti2);
}
/*
* Exclusive mode test:
* When one task enters exclusive mode, all other active
* tasks complete first.
*/
2018-10-24 13:12:55 -07:00
static int
2020-02-13 14:44:37 -08:00
spin(int n) {
int i;
int r = 0;
for (i = 0; i < n; i++) {
r += i;
2018-10-24 13:12:55 -07:00
if (r > 1000000) {
r = 0;
2018-10-24 13:12:55 -07:00
}
}
return (r);
}
static void
2020-02-13 14:44:37 -08:00
exclusive_cb(isc_task_t *task, isc_event_t *event) {
int taskno = *(int *)(event->ev_arg);
2018-10-24 13:12:55 -07:00
if (verbose) {
print_message("# task enter %d\n", taskno);
}
/* task chosen from the middle of the range */
if (taskno == 6) {
isc_result_t result;
2020-02-13 14:44:37 -08:00
int i;
result = isc_task_beginexclusive(task);
2018-10-24 13:12:55 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
for (i = 0; i < 10; i++) {
2018-10-24 13:12:55 -07:00
assert_int_equal(active[i], 0);
}
isc_task_endexclusive(task);
atomic_store(&done, true);
} else {
active[taskno]++;
(void)spin(10000000);
active[taskno]--;
}
2018-10-24 13:12:55 -07:00
if (verbose) {
print_message("# task exit %d\n", taskno);
}
if (atomic_load(&done)) {
isc_mem_put(event->ev_destroy_arg, event->ev_arg, sizeof(int));
isc_event_free(&event);
} else {
isc_task_send(task, &event);
}
}
2018-10-24 13:12:55 -07:00
static void
2020-02-13 14:44:37 -08:00
task_exclusive(void **state) {
isc_task_t *tasks[10];
isc_result_t result;
2020-02-13 14:44:37 -08:00
int i;
2018-10-24 13:12:55 -07:00
UNUSED(state);
for (i = 0; i < 10; i++) {
isc_event_t *event = NULL;
2020-02-13 14:44:37 -08:00
int *v;
tasks[i] = NULL;
result = isc_task_create(taskmgr, 0, &tasks[i]);
2018-10-24 13:12:55 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
/* task chosen from the middle of the range */
if (i == 6) {
isc_taskmgr_setexcltask(taskmgr, tasks[6]);
}
v = isc_mem_get(test_mctx, sizeof *v);
2018-10-24 13:12:55 -07:00
assert_non_null(v);
*v = i;
event = isc_event_allocate(test_mctx, NULL, 1, exclusive_cb, v,
sizeof(*event));
2018-10-24 13:12:55 -07:00
assert_non_null(event);
isc_task_send(tasks[i], &event);
}
for (i = 0; i < 10; i++) {
isc_task_detach(&tasks[i]);
}
}
/*
* Max tasks test:
* The task system can create and execute many tasks. Tests with 10000.
*/
static void
2020-02-13 14:44:37 -08:00
maxtask_shutdown(isc_task_t *task, isc_event_t *event) {
UNUSED(task);
if (event->ev_arg != NULL) {
isc_task_destroy((isc_task_t **)&event->ev_arg);
} else {
LOCK(&lock);
atomic_store(&done, true);
SIGNAL(&cv);
UNLOCK(&lock);
}
2018-10-24 13:12:55 -07:00
isc_event_free(&event);
}
static void
2020-02-13 14:44:37 -08:00
maxtask_cb(isc_task_t *task, isc_event_t *event) {
isc_result_t result;
if (event->ev_arg != NULL) {
isc_task_t *newtask = NULL;
event->ev_arg = (void *)(((uintptr_t)event->ev_arg) - 1);
/*
* Create a new task and forward the message.
*/
result = isc_task_create(taskmgr, 0, &newtask);
2018-10-24 13:12:55 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
result = isc_task_onshutdown(newtask, maxtask_shutdown,
2018-10-24 13:12:55 -07:00
(void *)task);
assert_int_equal(result, ISC_R_SUCCESS);
isc_task_send(newtask, &event);
} else if (task != NULL) {
isc_task_destroy(&task);
2018-10-24 13:12:55 -07:00
isc_event_free(&event);
}
}
2018-10-24 13:12:55 -07:00
static void
2020-02-13 14:44:37 -08:00
manytasks(void **state) {
isc_mem_t *mctx = NULL;
isc_result_t result;
isc_event_t *event = NULL;
2020-02-13 14:44:37 -08:00
uintptr_t ntasks = 10000;
2018-10-24 13:12:55 -07:00
UNUSED(state);
2018-10-24 13:12:55 -07:00
if (verbose) {
print_message("# Testing with %lu tasks\n",
(unsigned long)ntasks);
}
isc_mutex_init(&lock);
2018-11-15 17:20:36 +01:00
isc_condition_init(&cv);
2018-10-24 13:12:55 -07:00
isc_mem_debugging = ISC_MEM_DEBUGRECORD;
isc_mem_create(&mctx);
result = isc_taskmgr_create(mctx, 4, 0, NULL, &taskmgr);
2018-10-24 13:12:55 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
atomic_init(&done, false);
event = isc_event_allocate(mctx, (void *)1, 1, maxtask_cb,
(void *)ntasks, sizeof(*event));
2018-10-24 13:12:55 -07:00
assert_non_null(event);
LOCK(&lock);
maxtask_cb(NULL, event);
while (!atomic_load(&done)) {
WAIT(&cv, &lock);
}
UNLOCK(&lock);
2018-10-24 13:12:55 -07:00
isc_taskmgr_destroy(&taskmgr);
isc_mem_destroy(&mctx);
isc_condition_destroy(&cv);
isc_mutex_destroy(&lock);
2018-10-24 13:12:55 -07:00
}
/*
* Shutdown test:
* When isc_task_shutdown() is called, shutdown events are posted
* in LIFO order.
*/
2020-02-13 14:44:37 -08:00
static int nevents = 0;
static int nsdevents = 0;
static int senders[4];
2019-07-12 16:44:51 +02:00
atomic_bool ready, all_done;
static void
2020-02-13 14:44:37 -08:00
sd_sde1(isc_task_t *task, isc_event_t *event) {
UNUSED(task);
2018-10-24 13:12:55 -07:00
assert_int_equal(nevents, 256);
assert_int_equal(nsdevents, 1);
++nsdevents;
2018-10-24 13:12:55 -07:00
if (verbose) {
print_message("# shutdown 1\n");
}
isc_event_free(&event);
2018-10-24 13:12:55 -07:00
atomic_store(&all_done, true);
}
static void
2020-02-13 14:44:37 -08:00
sd_sde2(isc_task_t *task, isc_event_t *event) {
UNUSED(task);
2018-10-24 13:12:55 -07:00
assert_int_equal(nevents, 256);
assert_int_equal(nsdevents, 0);
++nsdevents;
2018-10-24 13:12:55 -07:00
if (verbose) {
print_message("# shutdown 2\n");
}
isc_event_free(&event);
}
static void
2020-02-13 14:44:37 -08:00
sd_event1(isc_task_t *task, isc_event_t *event) {
UNUSED(task);
LOCK(&lock);
2019-07-12 16:44:51 +02:00
while (!atomic_load(&ready)) {
WAIT(&cv, &lock);
}
2018-10-24 13:12:55 -07:00
UNLOCK(&lock);
2018-10-24 13:12:55 -07:00
if (verbose) {
print_message("# event 1\n");
}
isc_event_free(&event);
}
static void
2020-02-13 14:44:37 -08:00
sd_event2(isc_task_t *task, isc_event_t *event) {
UNUSED(task);
++nevents;
2018-10-24 13:12:55 -07:00
if (verbose) {
print_message("# event 2\n");
}
isc_event_free(&event);
}
2018-10-24 13:12:55 -07:00
static void
2020-02-13 14:44:37 -08:00
shutdown(void **state) {
isc_result_t result;
isc_eventtype_t event_type;
2020-02-13 14:44:37 -08:00
isc_event_t *event = NULL;
isc_task_t *task = NULL;
int i;
2018-10-24 13:12:55 -07:00
UNUSED(state);
2018-10-24 13:12:55 -07:00
nevents = nsdevents = 0;
event_type = 3;
2019-07-12 16:44:51 +02:00
atomic_init(&ready, false);
atomic_init(&all_done, false);
LOCK(&lock);
result = isc_task_create(taskmgr, 0, &task);
2018-10-24 13:12:55 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
/*
* This event causes the task to wait on cv.
*/
event = isc_event_allocate(test_mctx, &senders[1], event_type,
sd_event1, NULL, sizeof(*event));
2018-10-24 13:12:55 -07:00
assert_non_null(event);
isc_task_send(task, &event);
/*
* Now we fill up the task's event queue with some events.
*/
for (i = 0; i < 256; ++i) {
event = isc_event_allocate(test_mctx, &senders[1], event_type,
sd_event2, NULL, sizeof(*event));
2018-10-24 13:12:55 -07:00
assert_non_null(event);
isc_task_send(task, &event);
}
/*
* Now we register two shutdown events.
*/
result = isc_task_onshutdown(task, sd_sde1, NULL);
2018-10-24 13:12:55 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
result = isc_task_onshutdown(task, sd_sde2, NULL);
2018-10-24 13:12:55 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
isc_task_shutdown(task);
2018-10-24 13:12:55 -07:00
isc_task_detach(&task);
/*
* Now we free the task by signaling cv.
*/
2019-07-12 16:44:51 +02:00
atomic_store(&ready, true);
SIGNAL(&cv);
UNLOCK(&lock);
while (!atomic_load(&all_done)) {
2018-10-24 13:12:55 -07:00
isc_test_nap(1000);
}
2018-10-24 13:12:55 -07:00
assert_int_equal(nsdevents, 2);
}
/*
* Post-shutdown test:
* After isc_task_shutdown() has been called, any call to
* isc_task_onshutdown() will return ISC_R_SHUTTINGDOWN.
*/
static void
2020-02-13 14:44:37 -08:00
psd_event1(isc_task_t *task, isc_event_t *event) {
UNUSED(task);
LOCK(&lock);
while (!atomic_load(&done)) {
WAIT(&cv, &lock);
}
UNLOCK(&lock);
isc_event_free(&event);
}
static void
2020-02-13 14:44:37 -08:00
psd_sde(isc_task_t *task, isc_event_t *event) {
UNUSED(task);
isc_event_free(&event);
}
2018-10-24 13:12:55 -07:00
static void
2020-02-13 14:44:37 -08:00
post_shutdown(void **state) {
isc_result_t result;
isc_eventtype_t event_type;
2020-02-13 14:44:37 -08:00
isc_event_t *event;
isc_task_t *task;
2018-10-24 13:12:55 -07:00
UNUSED(state);
atomic_init(&done, false);
event_type = 4;
2018-11-15 17:20:36 +01:00
isc_condition_init(&cv);
LOCK(&lock);
task = NULL;
result = isc_task_create(taskmgr, 0, &task);
2018-10-24 13:12:55 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
/*
* This event causes the task to wait on cv.
*/
event = isc_event_allocate(test_mctx, &senders[1], event_type,
psd_event1, NULL, sizeof(*event));
2018-10-24 13:12:55 -07:00
assert_non_null(event);
isc_task_send(task, &event);
isc_task_shutdown(task);
result = isc_task_onshutdown(task, psd_sde, NULL);
2018-10-24 13:12:55 -07:00
assert_int_equal(result, ISC_R_SHUTTINGDOWN);
/*
* Release the task.
*/
atomic_store(&done, true);
SIGNAL(&cv);
UNLOCK(&lock);
isc_task_detach(&task);
}
/*
* Helper for the purge tests below:
*/
#define SENDERCNT 3
2020-02-13 14:44:37 -08:00
#define TYPECNT 4
#define TAGCNT 5
#define NEVENTS (SENDERCNT * TYPECNT * TAGCNT)
2020-02-13 14:44:37 -08:00
static bool testrange;
static void *purge_sender;
static isc_eventtype_t purge_type_first;
static isc_eventtype_t purge_type_last;
2020-02-13 14:44:37 -08:00
static void *purge_tag;
static int eventcnt;
2019-07-12 16:44:51 +02:00
atomic_bool started;
static void
2020-02-13 14:44:37 -08:00
pg_event1(isc_task_t *task, isc_event_t *event) {
UNUSED(task);
LOCK(&lock);
while (!atomic_load(&started)) {
WAIT(&cv, &lock);
}
UNLOCK(&lock);
isc_event_free(&event);
}
static void
2020-02-13 14:44:37 -08:00
pg_event2(isc_task_t *task, isc_event_t *event) {
bool sender_match = false;
bool type_match = false;
bool tag_match = false;
UNUSED(task);
if ((purge_sender == NULL) || (purge_sender == event->ev_sender)) {
sender_match = true;
}
if (testrange) {
if ((purge_type_first <= event->ev_type) &&
(event->ev_type <= purge_type_last)) {
type_match = true;
}
} else {
if (purge_type_first == event->ev_type) {
type_match = true;
}
}
if ((purge_tag == NULL) || (purge_tag == event->ev_tag)) {
tag_match = true;
}
if (sender_match && type_match && tag_match) {
if ((event->ev_attributes & ISC_EVENTATTR_NOPURGE) != 0) {
2018-10-24 13:12:55 -07:00
if (verbose) {
print_message("# event %p,%d,%p "
"matched but was not "
"purgeable\n",
event->ev_sender,
(int)event->ev_type,
event->ev_tag);
}
++eventcnt;
2018-10-24 13:12:55 -07:00
} else if (verbose) {
print_message("# event %p,%d,%p not purged\n",
event->ev_sender, (int)event->ev_type,
event->ev_tag);
}
} else {
++eventcnt;
}
isc_event_free(&event);
}
static void
2020-02-13 14:44:37 -08:00
pg_sde(isc_task_t *task, isc_event_t *event) {
UNUSED(task);
LOCK(&lock);
atomic_store(&done, true);
SIGNAL(&cv);
UNLOCK(&lock);
isc_event_free(&event);
}
static void
2020-02-13 14:44:37 -08:00
test_purge(int sender, int type, int tag, int exp_purged) {
isc_result_t result;
isc_task_t *task = NULL;
isc_event_t *eventtab[NEVENTS];
isc_event_t *event = NULL;
isc_interval_t interval;
2020-02-13 14:44:37 -08:00
isc_time_t now;
int sender_cnt, type_cnt, tag_cnt, event_cnt, i;
int purged = 0;
atomic_init(&started, false);
atomic_init(&done, false);
eventcnt = 0;
2018-11-15 17:20:36 +01:00
isc_condition_init(&cv);
result = isc_task_create(taskmgr, 0, &task);
2018-10-24 13:12:55 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
result = isc_task_onshutdown(task, pg_sde, NULL);
2018-10-24 13:12:55 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
/*
* Block the task on cv.
*/
event = isc_event_allocate(test_mctx, (void *)1, 9999, pg_event1, NULL,
sizeof(*event));
2018-10-24 13:12:55 -07:00
assert_non_null(event);
isc_task_send(task, &event);
/*
* Fill the task's queue with some messages with varying
* sender, type, tag, and purgeable attribute values.
*/
event_cnt = 0;
for (sender_cnt = 0; sender_cnt < SENDERCNT; ++sender_cnt) {
for (type_cnt = 0; type_cnt < TYPECNT; ++type_cnt) {
for (tag_cnt = 0; tag_cnt < TAGCNT; ++tag_cnt) {
eventtab[event_cnt] = isc_event_allocate(
test_mctx,
&senders[sender + sender_cnt],
(isc_eventtype_t)(type + type_cnt),
pg_event2, NULL, sizeof(*event));
2018-10-24 13:12:55 -07:00
assert_non_null(eventtab[event_cnt]);
eventtab[event_cnt]->ev_tag =
(void *)((uintptr_t)tag + tag_cnt);
/*
* Mark events as non-purgeable if
* sender, type and tag are all
* odd-numbered. (There should be 4
* of these out of 60 events total.)
*/
if (((sender_cnt % 2) != 0) &&
((type_cnt % 2) != 0) &&
((tag_cnt % 2) != 0)) {
eventtab[event_cnt]->ev_attributes |=
ISC_EVENTATTR_NOPURGE;
}
++event_cnt;
}
}
}
for (i = 0; i < event_cnt; ++i) {
isc_task_send(task, &eventtab[i]);
}
if (testrange) {
/*
* We're testing isc_task_purgerange.
*/
purged = isc_task_purgerange(
task, purge_sender, (isc_eventtype_t)purge_type_first,
(isc_eventtype_t)purge_type_last, purge_tag);
2018-10-24 13:12:55 -07:00
assert_int_equal(purged, exp_purged);
} else {
/*
* We're testing isc_task_purge.
*/
2018-10-24 13:12:55 -07:00
if (verbose) {
print_message("# purge events %p,%u,%p\n", purge_sender,
purge_type_first, purge_tag);
2018-10-24 13:12:55 -07:00
}
purged = isc_task_purge(task, purge_sender,
(isc_eventtype_t)purge_type_first,
purge_tag);
2018-10-24 13:12:55 -07:00
if (verbose) {
print_message("# purged %d expected %d\n", purged,
exp_purged);
2018-10-24 13:12:55 -07:00
}
assert_int_equal(purged, exp_purged);
}
/*
* Unblock the task, allowing event processing.
*/
LOCK(&lock);
atomic_store(&started, true);
SIGNAL(&cv);
isc_task_shutdown(task);
isc_interval_set(&interval, 5, 0);
/*
* Wait for shutdown processing to complete.
*/
while (!atomic_load(&done)) {
result = isc_time_nowplusinterval(&now, &interval);
2018-10-24 13:12:55 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
WAITUNTIL(&cv, &lock, &now);
}
UNLOCK(&lock);
isc_task_detach(&task);
2018-10-24 13:12:55 -07:00
assert_int_equal(eventcnt, event_cnt - exp_purged);
}
/*
* Purge test:
* A call to isc_task_purge(task, sender, type, tag) purges all events of
* type 'type' and with tag 'tag' not marked as unpurgeable from sender
* from the task's " queue and returns the number of events purged.
*/
2018-10-24 13:12:55 -07:00
static void
2020-02-13 14:44:37 -08:00
purge(void **state) {
2018-10-24 13:12:55 -07:00
UNUSED(state);
/* Try purging on a specific sender. */
2018-10-24 13:12:55 -07:00
if (verbose) {
print_message("# testing purge on 2,4,8 expecting 1\n");
}
purge_sender = &senders[2];
purge_type_first = 4;
purge_type_last = 4;
purge_tag = (void *)8;
testrange = false;
test_purge(1, 4, 7, 1);
/* Try purging on all senders. */
2018-10-24 13:12:55 -07:00
if (verbose) {
print_message("# testing purge on 0,4,8 expecting 3\n");
}
purge_sender = NULL;
purge_type_first = 4;
purge_type_last = 4;
purge_tag = (void *)8;
testrange = false;
test_purge(1, 4, 7, 3);
/* Try purging on all senders, specified type, all tags. */
2018-10-24 13:12:55 -07:00
if (verbose) {
print_message("# testing purge on 0,4,0 expecting 15\n");
}
purge_sender = NULL;
purge_type_first = 4;
purge_type_last = 4;
purge_tag = NULL;
testrange = false;
test_purge(1, 4, 7, 15);
/* Try purging on a specified tag, no such type. */
2018-10-24 13:12:55 -07:00
if (verbose) {
print_message("# testing purge on 0,99,8 expecting 0\n");
}
purge_sender = NULL;
purge_type_first = 99;
purge_type_last = 99;
purge_tag = (void *)8;
testrange = false;
test_purge(1, 4, 7, 0);
2018-10-24 13:12:55 -07:00
/* Try purging on specified sender, type, all tags. */
if (verbose) {
print_message("# testing purge on 3,5,0 expecting 5\n");
}
purge_sender = &senders[3];
purge_type_first = 5;
purge_type_last = 5;
purge_tag = NULL;
testrange = false;
test_purge(1, 4, 7, 5);
}
/*
* Purge range test:
* A call to isc_event_purgerange(task, sender, first, last, tag) purges
* all events not marked unpurgeable from sender 'sender' and of type within
* the range 'first' to 'last' inclusive from the task's event queue and
* returns the number of tasks purged.
*/
2018-10-24 13:12:55 -07:00
static void
2020-02-13 14:44:37 -08:00
purgerange(void **state) {
2018-10-24 13:12:55 -07:00
UNUSED(state);
/* Now let's try some ranges. */
2018-10-24 13:12:55 -07:00
/* testing purgerange on 2,4-5,8 expecting 1 */
purge_sender = &senders[2];
purge_type_first = 4;
purge_type_last = 5;
purge_tag = (void *)8;
testrange = true;
test_purge(1, 4, 7, 1);
/* Try purging on all senders. */
2018-10-24 13:12:55 -07:00
if (verbose) {
print_message("# testing purge on 0,4-5,8 expecting 5\n");
}
purge_sender = NULL;
purge_type_first = 4;
purge_type_last = 5;
purge_tag = (void *)8;
testrange = true;
test_purge(1, 4, 7, 5);
/* Try purging on all senders, specified type, all tags. */
2018-10-24 13:12:55 -07:00
if (verbose) {
print_message("# testing purge on 0,5-6,0 expecting 28\n");
}
purge_sender = NULL;
purge_type_first = 5;
purge_type_last = 6;
purge_tag = NULL;
testrange = true;
test_purge(1, 4, 7, 28);
/* Try purging on a specified tag, no such type. */
2018-10-24 13:12:55 -07:00
if (verbose) {
print_message("# testing purge on 0,99-101,8 expecting 0\n");
}
purge_sender = NULL;
purge_type_first = 99;
purge_type_last = 101;
purge_tag = (void *)8;
testrange = true;
test_purge(1, 4, 7, 0);
/* Try purging on specified sender, type, all tags. */
2018-10-24 13:12:55 -07:00
if (verbose) {
print_message("# testing purge on 3,5-6,0 expecting 10\n");
}
purge_sender = &senders[3];
purge_type_first = 5;
purge_type_last = 6;
purge_tag = NULL;
testrange = true;
test_purge(1, 4, 7, 10);
}
/*
* Helpers for purge event tests
*/
static void
2020-02-13 14:44:37 -08:00
pge_event1(isc_task_t *task, isc_event_t *event) {
UNUSED(task);
LOCK(&lock);
while (!atomic_load(&started)) {
WAIT(&cv, &lock);
}
UNLOCK(&lock);
isc_event_free(&event);
}
static void
2020-02-13 14:44:37 -08:00
pge_event2(isc_task_t *task, isc_event_t *event) {
UNUSED(task);
++eventcnt;
isc_event_free(&event);
}
static void
2020-02-13 14:44:37 -08:00
pge_sde(isc_task_t *task, isc_event_t *event) {
UNUSED(task);
LOCK(&lock);
atomic_store(&done, true);
SIGNAL(&cv);
UNLOCK(&lock);
isc_event_free(&event);
}
static void
2020-02-13 14:44:37 -08:00
try_purgeevent(bool purgeable) {
isc_result_t result;
isc_task_t *task = NULL;
bool purged;
isc_event_t *event1 = NULL;
isc_event_t *event2 = NULL;
isc_event_t *event2_clone = NULL;
isc_time_t now;
isc_interval_t interval;
atomic_init(&started, false);
atomic_init(&done, false);
eventcnt = 0;
2018-11-15 17:20:36 +01:00
isc_condition_init(&cv);
result = isc_task_create(taskmgr, 0, &task);
2018-10-24 13:12:55 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
result = isc_task_onshutdown(task, pge_sde, NULL);
2018-10-24 13:12:55 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
/*
* Block the task on cv.
*/
event1 = isc_event_allocate(test_mctx, (void *)1, (isc_eventtype_t)1,
pge_event1, NULL, sizeof(*event1));
2018-10-24 13:12:55 -07:00
assert_non_null(event1);
isc_task_send(task, &event1);
event2 = isc_event_allocate(test_mctx, (void *)1, (isc_eventtype_t)1,
pge_event2, NULL, sizeof(*event2));
2018-10-24 13:12:55 -07:00
assert_non_null(event2);
event2_clone = event2;
if (purgeable) {
event2->ev_attributes &= ~ISC_EVENTATTR_NOPURGE;
} else {
event2->ev_attributes |= ISC_EVENTATTR_NOPURGE;
}
isc_task_send(task, &event2);
purged = isc_task_purgeevent(task, event2_clone);
2018-10-24 13:12:55 -07:00
assert_int_equal(purgeable, purged);
/*
* Unblock the task, allowing event processing.
*/
LOCK(&lock);
atomic_store(&started, true);
SIGNAL(&cv);
isc_task_shutdown(task);
isc_interval_set(&interval, 5, 0);
/*
* Wait for shutdown processing to complete.
*/
while (!atomic_load(&done)) {
result = isc_time_nowplusinterval(&now, &interval);
2018-10-24 13:12:55 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
WAITUNTIL(&cv, &lock, &now);
}
UNLOCK(&lock);
isc_task_detach(&task);
2018-10-24 13:12:55 -07:00
assert_int_equal(eventcnt, (purgeable ? 0 : 1));
}
/*
* Purge event test:
* When the event is marked as purgeable, a call to
* isc_task_purgeevent(task, event) purges the event 'event' from the
* task's queue and returns true.
*/
2018-10-24 13:12:55 -07:00
static void
2020-02-13 14:44:37 -08:00
purgeevent(void **state) {
2018-10-24 13:12:55 -07:00
UNUSED(state);
try_purgeevent(true);
}
/*
* Purge event not purgeable test:
* When the event is not marked as purgable, a call to
* isc_task_purgeevent(task, event) does not purge the event
* 'event' from the task's queue and returns false.
*/
2018-10-24 13:12:55 -07:00
static void
2020-02-13 14:44:37 -08:00
purgeevent_notpurge(void **state) {
2018-10-24 13:12:55 -07:00
UNUSED(state);
try_purgeevent(false);
}
2018-10-24 13:12:55 -07:00
int
2020-02-13 14:44:37 -08:00
main(int argc, char **argv) {
2018-10-24 13:12:55 -07:00
const struct CMUnitTest tests[] = {
cmocka_unit_test(manytasks),
cmocka_unit_test_setup_teardown(all_events, _setup, _teardown),
cmocka_unit_test_setup_teardown(basic, _setup2, _teardown),
2020-09-11 13:37:56 +10:00
cmocka_unit_test_setup_teardown(create_task, _setup, _teardown),
cmocka_unit_test_setup_teardown(pause_unpause, _setup,
_teardown),
2020-09-11 13:37:56 +10:00
cmocka_unit_test_setup_teardown(post_shutdown, _setup2,
_teardown),
2020-09-11 13:37:56 +10:00
cmocka_unit_test_setup_teardown(privilege_drop, _setup,
_teardown),
2020-09-11 13:37:56 +10:00
cmocka_unit_test_setup_teardown(privileged_events, _setup,
_teardown),
2018-10-24 13:12:55 -07:00
cmocka_unit_test_setup_teardown(purge, _setup2, _teardown),
cmocka_unit_test_setup_teardown(purgeevent, _setup2, _teardown),
cmocka_unit_test_setup_teardown(purgeevent_notpurge, _setup,
_teardown),
2020-09-11 13:37:56 +10:00
cmocka_unit_test_setup_teardown(purgerange, _setup, _teardown),
cmocka_unit_test_setup_teardown(shutdown, _setup4, _teardown),
cmocka_unit_test_setup_teardown(task_exclusive, _setup4,
_teardown),
2018-10-24 13:12:55 -07:00
};
struct CMUnitTest selected[sizeof(tests) / sizeof(tests[0])];
size_t i;
2018-10-24 13:12:55 -07:00
int c;
memset(selected, 0, sizeof(selected));
while ((c = isc_commandline_parse(argc, argv, "lt:v")) != -1) {
2018-10-24 13:12:55 -07:00
switch (c) {
case 'l':
for (i = 0; i < (sizeof(tests) / sizeof(tests[0])); i++)
{
if (tests[i].name != NULL) {
fprintf(stdout, "%s\n", tests[i].name);
}
}
return (0);
case 't':
if (!cmocka_add_test_byname(
tests, isc_commandline_argument, selected))
{
fprintf(stderr, "unknown test '%s'\n",
isc_commandline_argument);
exit(1);
}
break;
2018-10-24 13:12:55 -07:00
case 'v':
verbose = true;
break;
default:
break;
}
}
if (selected[0].name != NULL) {
return (cmocka_run_group_tests(selected, NULL, NULL));
} else {
return (cmocka_run_group_tests(tests, NULL, NULL));
}
}
2018-10-24 13:12:55 -07:00
#else /* HAVE_CMOCKA */
#include <stdio.h>
int
2020-02-13 14:44:37 -08:00
main(void) {
2018-10-24 13:12:55 -07:00
printf("1..0 # Skipped: cmocka not available\n");
return (SKIPPED_TEST_EXIT_CODE);
2018-10-24 13:12:55 -07:00
}
#endif /* if HAVE_CMOCKA */