2
0
mirror of https://gitlab.isc.org/isc-projects/bind9 synced 2025-08-22 18:19:42 +00:00
bind/lib/isc/tests/timer_test.c

570 lines
13 KiB
C
Raw Normal View History

/*
* Copyright (C) Internet Systems Consortium, Inc. ("ISC")
*
* SPDX-License-Identifier: MPL-2.0
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, you can obtain one at https://mozilla.org/MPL/2.0/.
*
* See the COPYRIGHT file distributed with this work for additional
* information regarding copyright ownership.
*/
2018-10-24 08:48:41 -07:00
#if HAVE_CMOCKA
2020-03-17 11:18:36 -07:00
#include <inttypes.h>
#include <sched.h> /* IWYU pragma: keep */
#include <setjmp.h>
2018-10-24 08:48:41 -07:00
#include <stdarg.h>
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
2018-10-24 08:48:41 -07:00
#define UNIT_TESTING
#include <cmocka.h>
#include <isc/atomic.h>
2018-10-24 08:48:41 -07:00
#include <isc/commandline.h>
#include <isc/condition.h>
#include <isc/mem.h>
2018-03-09 16:55:21 -08:00
#include <isc/print.h>
#include <isc/task.h>
#include <isc/time.h>
#include <isc/timer.h>
#include <isc/util.h>
#include "../timer.c"
#include "isctest.h"
/* Set to true (or use -v option) for verbose output */
2018-10-24 08:48:41 -07:00
static bool verbose = false;
2020-02-13 14:44:37 -08:00
#define FUDGE_SECONDS 0 /* in absence of clock_getres() */
#define FUDGE_NANOSECONDS 500000000 /* in absence of clock_getres() */
2020-02-13 14:44:37 -08:00
static isc_timer_t *timer = NULL;
static isc_condition_t cv;
static isc_mutex_t mx;
static isc_time_t endtime;
static isc_mutex_t lasttime_mx;
static isc_time_t lasttime;
static int seconds;
static int nanoseconds;
static atomic_int_fast32_t eventcnt;
static atomic_uint_fast32_t errcnt;
2020-02-13 14:44:37 -08:00
static int nevents;
2018-10-24 08:48:41 -07:00
static int
2020-02-13 14:44:37 -08:00
_setup(void **state) {
2018-10-24 08:48:41 -07:00
isc_result_t result;
UNUSED(state);
/* Timer tests require two worker threads */
result = isc_test_begin(NULL, true, 2);
assert_int_equal(result, ISC_R_SUCCESS);
atomic_init(&errcnt, ISC_R_SUCCESS);
2018-10-24 08:48:41 -07:00
return (0);
}
static int
2020-02-13 14:44:37 -08:00
_teardown(void **state) {
2018-10-24 08:48:41 -07:00
UNUSED(state);
isc_test_end();
return (0);
}
static void
test_shutdown(void) {
isc_result_t result;
/*
* Signal shutdown processing complete.
*/
result = isc_mutex_lock(&mx);
2018-10-24 08:48:41 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
result = isc_condition_signal(&cv);
2018-10-24 08:48:41 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
result = isc_mutex_unlock(&mx);
2018-10-24 08:48:41 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
}
static void
setup_test(isc_timertype_t timertype, isc_interval_t *interval,
2020-02-13 14:44:37 -08:00
void (*action)(isc_task_t *, isc_event_t *)) {
isc_result_t result;
2020-02-13 14:44:37 -08:00
isc_task_t *task = NULL;
isc_time_settoepoch(&endtime);
atomic_init(&eventcnt, 0);
2018-11-16 15:33:22 +01:00
isc_mutex_init(&mx);
isc_mutex_init(&lasttime_mx);
2018-11-15 17:20:36 +01:00
isc_condition_init(&cv);
atomic_store(&errcnt, ISC_R_SUCCESS);
LOCK(&mx);
result = isc_task_create(taskmgr, 0, &task);
2018-10-24 08:48:41 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
isc_mutex_lock(&lasttime_mx);
result = isc_time_now(&lasttime);
isc_mutex_unlock(&lasttime_mx);
2018-10-24 08:48:41 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
isc_timer_create(timermgr, task, action, (void *)timertype, &timer);
result = isc_timer_reset(timer, timertype, interval, false);
2018-10-24 08:48:41 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
/*
* Wait for shutdown processing to complete.
*/
while (atomic_load(&eventcnt) != nevents) {
result = isc_condition_wait(&cv, &mx);
2018-10-24 08:48:41 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
}
UNLOCK(&mx);
assert_int_equal(atomic_load(&errcnt), ISC_R_SUCCESS);
isc_task_detach(&task);
isc_mutex_destroy(&mx);
(void)isc_condition_destroy(&cv);
}
static void
2020-02-13 14:44:37 -08:00
set_global_error(isc_result_t result) {
(void)atomic_compare_exchange_strong(
&errcnt, &(uint_fast32_t){ ISC_R_SUCCESS }, result);
}
static void
subthread_assert_true(bool expected, const char *file, unsigned int line) {
if (!expected) {
printf("# %s:%u subthread_assert_true\n", file, line);
set_global_error(ISC_R_UNEXPECTED);
}
}
#define subthread_assert_true(expected) \
subthread_assert_true(expected, __FILE__, __LINE__)
static void
subthread_assert_int_equal(int observed, int expected, const char *file,
unsigned int line) {
if (observed != expected) {
printf("# %s:%u subthread_assert_int_equal(%d != %d)\n", file,
line, observed, expected);
set_global_error(ISC_R_UNEXPECTED);
}
}
#define subthread_assert_int_equal(observed, expected) \
subthread_assert_int_equal(observed, expected, __FILE__, __LINE__)
static void
subthread_assert_result_equal(isc_result_t result, isc_result_t expected,
const char *file, unsigned int line) {
if (result != expected) {
printf("# %s:%u subthread_assert_result_equal(%u != %u)\n",
file, line, result, expected);
set_global_error(result);
}
}
#define subthread_assert_result_equal(observed, expected) \
subthread_assert_result_equal(observed, expected, __FILE__, __LINE__)
static void
2020-02-13 14:44:37 -08:00
ticktock(isc_task_t *task, isc_event_t *event) {
isc_result_t result;
isc_time_t now;
isc_time_t base;
isc_time_t ulim;
isc_time_t llim;
isc_interval_t interval;
isc_eventtype_t expected_event_type;
UNUSED(task);
int tick = atomic_fetch_add(&eventcnt, 1);
2018-10-24 08:48:41 -07:00
if (verbose) {
print_message("# tick %d\n", tick);
2018-10-24 08:48:41 -07:00
}
expected_event_type = ISC_TIMEREVENT_ONCE;
2020-03-17 11:18:36 -07:00
if ((uintptr_t)event->ev_arg == isc_timertype_ticker) {
expected_event_type = ISC_TIMEREVENT_TICK;
}
if (event->ev_type != expected_event_type) {
2018-10-24 08:48:41 -07:00
print_error("# expected event type %u, got %u\n",
expected_event_type, event->ev_type);
}
result = isc_time_now(&now);
subthread_assert_result_equal(result, ISC_R_SUCCESS);
isc_interval_set(&interval, seconds, nanoseconds);
isc_mutex_lock(&lasttime_mx);
result = isc_time_add(&lasttime, &interval, &base);
isc_mutex_unlock(&lasttime_mx);
subthread_assert_result_equal(result, ISC_R_SUCCESS);
isc_interval_set(&interval, FUDGE_SECONDS, FUDGE_NANOSECONDS);
result = isc_time_add(&base, &interval, &ulim);
subthread_assert_result_equal(result, ISC_R_SUCCESS);
result = isc_time_subtract(&base, &interval, &llim);
subthread_assert_result_equal(result, ISC_R_SUCCESS);
subthread_assert_true(isc_time_compare(&llim, &now) <= 0);
subthread_assert_true(isc_time_compare(&ulim, &now) >= 0);
isc_interval_set(&interval, 0, 0);
isc_mutex_lock(&lasttime_mx);
result = isc_time_add(&now, &interval, &lasttime);
isc_mutex_unlock(&lasttime_mx);
subthread_assert_result_equal(result, ISC_R_SUCCESS);
isc_event_free(&event);
if (atomic_load(&eventcnt) == nevents) {
result = isc_time_now(&endtime);
subthread_assert_result_equal(result, ISC_R_SUCCESS);
isc_timer_destroy(&timer);
test_shutdown();
}
}
/*
* Individual unit tests
*/
2018-10-24 08:48:41 -07:00
/* timer type ticker */
static void
2020-02-13 14:44:37 -08:00
ticker(void **state) {
isc_interval_t interval;
2018-10-24 08:48:41 -07:00
UNUSED(state);
nevents = 12;
seconds = 0;
nanoseconds = 500000000;
isc_interval_set(&interval, seconds, nanoseconds);
setup_test(isc_timertype_ticker, &interval, ticktock);
}
static void
2020-02-13 14:44:37 -08:00
test_idle(isc_task_t *task, isc_event_t *event) {
isc_result_t result;
isc_time_t now;
isc_time_t base;
isc_time_t ulim;
isc_time_t llim;
isc_interval_t interval;
UNUSED(task);
int tick = atomic_fetch_add(&eventcnt, 1);
2018-10-24 08:48:41 -07:00
if (verbose) {
print_message("# tick %d\n", tick);
2018-10-24 08:48:41 -07:00
}
result = isc_time_now(&now);
subthread_assert_result_equal(result, ISC_R_SUCCESS);
isc_interval_set(&interval, seconds, nanoseconds);
isc_mutex_lock(&lasttime_mx);
result = isc_time_add(&lasttime, &interval, &base);
isc_mutex_unlock(&lasttime_mx);
subthread_assert_result_equal(result, ISC_R_SUCCESS);
isc_interval_set(&interval, FUDGE_SECONDS, FUDGE_NANOSECONDS);
result = isc_time_add(&base, &interval, &ulim);
subthread_assert_result_equal(result, ISC_R_SUCCESS);
result = isc_time_subtract(&base, &interval, &llim);
subthread_assert_result_equal(result, ISC_R_SUCCESS);
subthread_assert_true(isc_time_compare(&llim, &now) <= 0);
subthread_assert_true(isc_time_compare(&ulim, &now) >= 0);
isc_interval_set(&interval, 0, 0);
isc_mutex_lock(&lasttime_mx);
isc_time_add(&now, &interval, &lasttime);
isc_mutex_unlock(&lasttime_mx);
subthread_assert_int_equal(event->ev_type, ISC_TIMEREVENT_ONCE);
isc_event_free(&event);
isc_timer_destroy(&timer);
test_shutdown();
}
2018-10-24 08:48:41 -07:00
/* timer type once idles out */
static void
2020-02-13 14:44:37 -08:00
once_idle(void **state) {
isc_interval_t interval;
2018-10-24 08:48:41 -07:00
UNUSED(state);
nevents = 1;
seconds = 1;
nanoseconds = 200000000;
isc_interval_set(&interval, seconds, nanoseconds);
setup_test(isc_timertype_once, &interval, test_idle);
}
2018-10-24 08:48:41 -07:00
/* timer reset */
static void
2020-02-13 14:44:37 -08:00
test_reset(isc_task_t *task, isc_event_t *event) {
isc_result_t result;
isc_time_t now;
isc_time_t base;
isc_time_t ulim;
isc_time_t llim;
isc_interval_t interval;
UNUSED(task);
int tick = atomic_fetch_add(&eventcnt, 1);
2018-10-24 08:48:41 -07:00
if (verbose) {
print_message("# tick %d\n", tick);
2018-10-24 08:48:41 -07:00
}
/*
* Check expired time.
*/
result = isc_time_now(&now);
subthread_assert_result_equal(result, ISC_R_SUCCESS);
isc_interval_set(&interval, seconds, nanoseconds);
isc_mutex_lock(&lasttime_mx);
result = isc_time_add(&lasttime, &interval, &base);
isc_mutex_unlock(&lasttime_mx);
subthread_assert_result_equal(result, ISC_R_SUCCESS);
isc_interval_set(&interval, FUDGE_SECONDS, FUDGE_NANOSECONDS);
result = isc_time_add(&base, &interval, &ulim);
subthread_assert_result_equal(result, ISC_R_SUCCESS);
result = isc_time_subtract(&base, &interval, &llim);
subthread_assert_result_equal(result, ISC_R_SUCCESS);
subthread_assert_true(isc_time_compare(&llim, &now) <= 0);
subthread_assert_true(isc_time_compare(&ulim, &now) >= 0);
isc_interval_set(&interval, 0, 0);
isc_mutex_lock(&lasttime_mx);
isc_time_add(&now, &interval, &lasttime);
isc_mutex_unlock(&lasttime_mx);
int _eventcnt = atomic_load(&eventcnt);
if (_eventcnt < 3) {
subthread_assert_int_equal(event->ev_type, ISC_TIMEREVENT_TICK);
if (_eventcnt == 2) {
isc_interval_set(&interval, seconds, nanoseconds);
result = isc_timer_reset(timer, isc_timertype_once,
&interval, false);
subthread_assert_result_equal(result, ISC_R_SUCCESS);
}
isc_event_free(&event);
} else {
subthread_assert_int_equal(event->ev_type, ISC_TIMEREVENT_ONCE);
isc_event_free(&event);
isc_timer_destroy(&timer);
test_shutdown();
}
}
2018-10-24 08:48:41 -07:00
static void
2020-02-13 14:44:37 -08:00
reset(void **state) {
isc_interval_t interval;
2018-10-24 08:48:41 -07:00
UNUSED(state);
nevents = 3;
seconds = 0;
nanoseconds = 750000000;
isc_interval_set(&interval, seconds, nanoseconds);
setup_test(isc_timertype_ticker, &interval, test_reset);
}
Refactor taskmgr to run on top of netmgr This commit changes the taskmgr to run the individual tasks on the netmgr internal workers. While an effort has been put into keeping the taskmgr interface intact, couple of changes have been made: * The taskmgr has no concept of universal privileged mode - rather the tasks are either privileged or unprivileged (normal). The privileged tasks are run as a first thing when the netmgr is unpaused. There are now four different queues in in the netmgr: 1. priority queue - netievent on the priority queue are run even when the taskmgr enter exclusive mode and netmgr is paused. This is needed to properly start listening on the interfaces, free resources and resume. 2. privileged task queue - only privileged tasks are queued here and this is the first queue that gets processed when network manager is unpaused using isc_nm_resume(). All netmgr workers need to clean the privileged task queue before they all proceed normal operation. Both task queues are processed when the workers are finished. 3. task queue - only (traditional) task are scheduled here and this queue along with privileged task queues are process when the netmgr workers are finishing. This is needed to process the task shutdown events. 4. normal queue - this is the queue with netmgr events, e.g. reading, sending, callbacks and pretty much everything is processed here. * The isc_taskmgr_create() now requires initialized netmgr (isc_nm_t) object. * The isc_nm_destroy() function now waits for indefinite time, but it will print out the active objects when in tracing mode (-DNETMGR_TRACE=1 and -DNETMGR_TRACE_VERBOSE=1), the netmgr has been made a little bit more asynchronous and it might take longer time to shutdown all the active networking connections. * Previously, the isc_nm_stoplistening() was a synchronous operation. This has been changed and the isc_nm_stoplistening() just schedules the child sockets to stop listening and exits. This was needed to prevent a deadlock as the the (traditional) tasks are now executed on the netmgr threads. * The socket selection logic in isc__nm_udp_send() was flawed, but fortunatelly, it was broken, so we never hit the problem where we created uvreq_t on a socket from nmhandle_t, but then a different socket could be picked up and then we were trying to run the send callback on a socket that had different threadid than currently running.
2021-04-09 11:31:19 +02:00
static atomic_bool startflag;
static atomic_bool shutdownflag;
static isc_timer_t *tickertimer = NULL;
static isc_timer_t *oncetimer = NULL;
2020-02-13 14:44:37 -08:00
static isc_task_t *task1 = NULL;
static isc_task_t *task2 = NULL;
/*
* task1 blocks on mx while events accumulate
* in its queue, until signaled by task2.
*/
static void
2020-02-13 14:44:37 -08:00
tick_event(isc_task_t *task, isc_event_t *event) {
isc_result_t result;
isc_time_t expires;
isc_interval_t interval;
UNUSED(task);
Refactor taskmgr to run on top of netmgr This commit changes the taskmgr to run the individual tasks on the netmgr internal workers. While an effort has been put into keeping the taskmgr interface intact, couple of changes have been made: * The taskmgr has no concept of universal privileged mode - rather the tasks are either privileged or unprivileged (normal). The privileged tasks are run as a first thing when the netmgr is unpaused. There are now four different queues in in the netmgr: 1. priority queue - netievent on the priority queue are run even when the taskmgr enter exclusive mode and netmgr is paused. This is needed to properly start listening on the interfaces, free resources and resume. 2. privileged task queue - only privileged tasks are queued here and this is the first queue that gets processed when network manager is unpaused using isc_nm_resume(). All netmgr workers need to clean the privileged task queue before they all proceed normal operation. Both task queues are processed when the workers are finished. 3. task queue - only (traditional) task are scheduled here and this queue along with privileged task queues are process when the netmgr workers are finishing. This is needed to process the task shutdown events. 4. normal queue - this is the queue with netmgr events, e.g. reading, sending, callbacks and pretty much everything is processed here. * The isc_taskmgr_create() now requires initialized netmgr (isc_nm_t) object. * The isc_nm_destroy() function now waits for indefinite time, but it will print out the active objects when in tracing mode (-DNETMGR_TRACE=1 and -DNETMGR_TRACE_VERBOSE=1), the netmgr has been made a little bit more asynchronous and it might take longer time to shutdown all the active networking connections. * Previously, the isc_nm_stoplistening() was a synchronous operation. This has been changed and the isc_nm_stoplistening() just schedules the child sockets to stop listening and exits. This was needed to prevent a deadlock as the the (traditional) tasks are now executed on the netmgr threads. * The socket selection logic in isc__nm_udp_send() was flawed, but fortunatelly, it was broken, so we never hit the problem where we created uvreq_t on a socket from nmhandle_t, but then a different socket could be picked up and then we were trying to run the send callback on a socket that had different threadid than currently running.
2021-04-09 11:31:19 +02:00
if (!atomic_load(&startflag)) {
if (verbose) {
print_message("# tick_event %d\n", -1);
}
isc_event_free(&event);
return;
}
int tick = atomic_fetch_add(&eventcnt, 1);
2018-10-24 08:48:41 -07:00
if (verbose) {
print_message("# tick_event %d\n", tick);
2018-10-24 08:48:41 -07:00
}
/*
* On the first tick, purge all remaining tick events
* and then shut down the task.
*/
if (tick == 0) {
isc_time_settoepoch(&expires);
isc_interval_set(&interval, seconds, 0);
result = isc_timer_reset(tickertimer, isc_timertype_ticker,
&interval, true);
subthread_assert_result_equal(result, ISC_R_SUCCESS);
atomic_store(&shutdownflag, 1);
}
isc_event_free(&event);
}
static void
2020-02-13 14:44:37 -08:00
once_event(isc_task_t *task, isc_event_t *event) {
UNUSED(task);
2018-10-24 08:48:41 -07:00
if (verbose) {
print_message("# once_event\n");
}
/*
* Allow task1 to start processing events.
*/
Refactor taskmgr to run on top of netmgr This commit changes the taskmgr to run the individual tasks on the netmgr internal workers. While an effort has been put into keeping the taskmgr interface intact, couple of changes have been made: * The taskmgr has no concept of universal privileged mode - rather the tasks are either privileged or unprivileged (normal). The privileged tasks are run as a first thing when the netmgr is unpaused. There are now four different queues in in the netmgr: 1. priority queue - netievent on the priority queue are run even when the taskmgr enter exclusive mode and netmgr is paused. This is needed to properly start listening on the interfaces, free resources and resume. 2. privileged task queue - only privileged tasks are queued here and this is the first queue that gets processed when network manager is unpaused using isc_nm_resume(). All netmgr workers need to clean the privileged task queue before they all proceed normal operation. Both task queues are processed when the workers are finished. 3. task queue - only (traditional) task are scheduled here and this queue along with privileged task queues are process when the netmgr workers are finishing. This is needed to process the task shutdown events. 4. normal queue - this is the queue with netmgr events, e.g. reading, sending, callbacks and pretty much everything is processed here. * The isc_taskmgr_create() now requires initialized netmgr (isc_nm_t) object. * The isc_nm_destroy() function now waits for indefinite time, but it will print out the active objects when in tracing mode (-DNETMGR_TRACE=1 and -DNETMGR_TRACE_VERBOSE=1), the netmgr has been made a little bit more asynchronous and it might take longer time to shutdown all the active networking connections. * Previously, the isc_nm_stoplistening() was a synchronous operation. This has been changed and the isc_nm_stoplistening() just schedules the child sockets to stop listening and exits. This was needed to prevent a deadlock as the the (traditional) tasks are now executed on the netmgr threads. * The socket selection logic in isc__nm_udp_send() was flawed, but fortunatelly, it was broken, so we never hit the problem where we created uvreq_t on a socket from nmhandle_t, but then a different socket could be picked up and then we were trying to run the send callback on a socket that had different threadid than currently running.
2021-04-09 11:31:19 +02:00
atomic_store(&startflag, true);
isc_event_free(&event);
}
2018-10-24 08:48:41 -07:00
/* timer events purged */
static void
2020-02-13 14:44:37 -08:00
purge(void **state) {
isc_result_t result;
isc_interval_t interval;
2018-10-24 08:48:41 -07:00
UNUSED(state);
Refactor taskmgr to run on top of netmgr This commit changes the taskmgr to run the individual tasks on the netmgr internal workers. While an effort has been put into keeping the taskmgr interface intact, couple of changes have been made: * The taskmgr has no concept of universal privileged mode - rather the tasks are either privileged or unprivileged (normal). The privileged tasks are run as a first thing when the netmgr is unpaused. There are now four different queues in in the netmgr: 1. priority queue - netievent on the priority queue are run even when the taskmgr enter exclusive mode and netmgr is paused. This is needed to properly start listening on the interfaces, free resources and resume. 2. privileged task queue - only privileged tasks are queued here and this is the first queue that gets processed when network manager is unpaused using isc_nm_resume(). All netmgr workers need to clean the privileged task queue before they all proceed normal operation. Both task queues are processed when the workers are finished. 3. task queue - only (traditional) task are scheduled here and this queue along with privileged task queues are process when the netmgr workers are finishing. This is needed to process the task shutdown events. 4. normal queue - this is the queue with netmgr events, e.g. reading, sending, callbacks and pretty much everything is processed here. * The isc_taskmgr_create() now requires initialized netmgr (isc_nm_t) object. * The isc_nm_destroy() function now waits for indefinite time, but it will print out the active objects when in tracing mode (-DNETMGR_TRACE=1 and -DNETMGR_TRACE_VERBOSE=1), the netmgr has been made a little bit more asynchronous and it might take longer time to shutdown all the active networking connections. * Previously, the isc_nm_stoplistening() was a synchronous operation. This has been changed and the isc_nm_stoplistening() just schedules the child sockets to stop listening and exits. This was needed to prevent a deadlock as the the (traditional) tasks are now executed on the netmgr threads. * The socket selection logic in isc__nm_udp_send() was flawed, but fortunatelly, it was broken, so we never hit the problem where we created uvreq_t on a socket from nmhandle_t, but then a different socket could be picked up and then we were trying to run the send callback on a socket that had different threadid than currently running.
2021-04-09 11:31:19 +02:00
atomic_init(&startflag, 0);
atomic_init(&shutdownflag, 0);
atomic_init(&eventcnt, 0);
seconds = 1;
nanoseconds = 0;
result = isc_task_create(taskmgr, 0, &task1);
2018-10-24 08:48:41 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
result = isc_task_create(taskmgr, 0, &task2);
2018-10-24 08:48:41 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
isc_interval_set(&interval, seconds, 0);
tickertimer = NULL;
isc_timer_create(timermgr, task1, tick_event, NULL, &tickertimer);
result = isc_timer_reset(tickertimer, isc_timertype_ticker, &interval,
false);
2018-10-24 08:48:41 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
oncetimer = NULL;
isc_interval_set(&interval, (seconds * 2) + 1, 0);
isc_timer_create(timermgr, task2, once_event, NULL, &oncetimer);
result = isc_timer_reset(oncetimer, isc_timertype_once, &interval,
false);
2018-10-24 08:48:41 -07:00
assert_int_equal(result, ISC_R_SUCCESS);
/*
* Wait for shutdown processing to complete.
*/
Refactor taskmgr to run on top of netmgr This commit changes the taskmgr to run the individual tasks on the netmgr internal workers. While an effort has been put into keeping the taskmgr interface intact, couple of changes have been made: * The taskmgr has no concept of universal privileged mode - rather the tasks are either privileged or unprivileged (normal). The privileged tasks are run as a first thing when the netmgr is unpaused. There are now four different queues in in the netmgr: 1. priority queue - netievent on the priority queue are run even when the taskmgr enter exclusive mode and netmgr is paused. This is needed to properly start listening on the interfaces, free resources and resume. 2. privileged task queue - only privileged tasks are queued here and this is the first queue that gets processed when network manager is unpaused using isc_nm_resume(). All netmgr workers need to clean the privileged task queue before they all proceed normal operation. Both task queues are processed when the workers are finished. 3. task queue - only (traditional) task are scheduled here and this queue along with privileged task queues are process when the netmgr workers are finishing. This is needed to process the task shutdown events. 4. normal queue - this is the queue with netmgr events, e.g. reading, sending, callbacks and pretty much everything is processed here. * The isc_taskmgr_create() now requires initialized netmgr (isc_nm_t) object. * The isc_nm_destroy() function now waits for indefinite time, but it will print out the active objects when in tracing mode (-DNETMGR_TRACE=1 and -DNETMGR_TRACE_VERBOSE=1), the netmgr has been made a little bit more asynchronous and it might take longer time to shutdown all the active networking connections. * Previously, the isc_nm_stoplistening() was a synchronous operation. This has been changed and the isc_nm_stoplistening() just schedules the child sockets to stop listening and exits. This was needed to prevent a deadlock as the the (traditional) tasks are now executed on the netmgr threads. * The socket selection logic in isc__nm_udp_send() was flawed, but fortunatelly, it was broken, so we never hit the problem where we created uvreq_t on a socket from nmhandle_t, but then a different socket could be picked up and then we were trying to run the send callback on a socket that had different threadid than currently running.
2021-04-09 11:31:19 +02:00
while (!atomic_load(&shutdownflag)) {
isc_test_nap(1000);
}
assert_int_equal(atomic_load(&errcnt), ISC_R_SUCCESS);
assert_int_equal(atomic_load(&eventcnt), 1);
isc_timer_destroy(&tickertimer);
isc_timer_destroy(&oncetimer);
isc_task_destroy(&task1);
isc_task_destroy(&task2);
2018-10-24 08:48:41 -07:00
}
2018-10-24 08:48:41 -07:00
int
2020-02-13 14:44:37 -08:00
main(int argc, char **argv) {
2018-10-24 08:48:41 -07:00
const struct CMUnitTest tests[] = {
cmocka_unit_test(ticker),
cmocka_unit_test(once_idle),
cmocka_unit_test(reset),
cmocka_unit_test(purge),
2018-10-24 08:48:41 -07:00
};
int c;
while ((c = isc_commandline_parse(argc, argv, "v")) != -1) {
switch (c) {
case 'v':
verbose = true;
break;
default:
break;
}
}
return (cmocka_run_group_tests(tests, _setup, _teardown));
}
2018-10-24 08:48:41 -07:00
#else /* HAVE_CMOCKA */
#include <stdio.h>
int
2020-02-13 14:44:37 -08:00
main(void) {
2018-10-24 08:48:41 -07:00
printf("1..0 # Skipped: cmocka not available\n");
return (SKIPPED_TEST_EXIT_CODE);
}
2018-10-24 08:48:41 -07:00
#endif /* if HAVE_CMOCKA */