2018-02-27 14:29:49 -08:00
|
|
|
/*
|
|
|
|
* Copyright (C) Internet Systems Consortium, Inc. ("ISC")
|
|
|
|
*
|
2021-06-03 08:37:05 +02:00
|
|
|
* SPDX-License-Identifier: MPL-2.0
|
|
|
|
*
|
2018-02-27 14:29:49 -08:00
|
|
|
* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
2020-09-14 16:20:40 -07:00
|
|
|
* file, you can obtain one at https://mozilla.org/MPL/2.0/.
|
2018-02-27 14:29:49 -08:00
|
|
|
*
|
|
|
|
* See the COPYRIGHT file distributed with this work for additional
|
|
|
|
* information regarding copyright ownership.
|
|
|
|
*/
|
|
|
|
|
2018-10-24 08:48:41 -07:00
|
|
|
#if HAVE_CMOCKA
|
|
|
|
|
2020-03-17 11:18:36 -07:00
|
|
|
#include <inttypes.h>
|
2020-02-12 13:59:18 +01:00
|
|
|
#include <sched.h> /* IWYU pragma: keep */
|
|
|
|
#include <setjmp.h>
|
2018-10-24 08:48:41 -07:00
|
|
|
#include <stdarg.h>
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
2018-02-27 14:29:49 -08:00
|
|
|
#include <unistd.h>
|
|
|
|
|
2018-10-24 08:48:41 -07:00
|
|
|
#define UNIT_TESTING
|
|
|
|
#include <cmocka.h>
|
|
|
|
|
2019-07-04 14:21:15 +02:00
|
|
|
#include <isc/atomic.h>
|
2018-10-24 08:48:41 -07:00
|
|
|
#include <isc/commandline.h>
|
2020-02-12 13:59:18 +01:00
|
|
|
#include <isc/condition.h>
|
2018-02-27 14:29:49 -08:00
|
|
|
#include <isc/mem.h>
|
2018-03-09 16:55:21 -08:00
|
|
|
#include <isc/print.h>
|
2018-02-27 14:29:49 -08:00
|
|
|
#include <isc/task.h>
|
|
|
|
#include <isc/time.h>
|
|
|
|
#include <isc/timer.h>
|
|
|
|
#include <isc/util.h>
|
|
|
|
|
2019-11-09 14:01:25 +01:00
|
|
|
#include "../timer.c"
|
2020-02-12 13:59:18 +01:00
|
|
|
#include "isctest.h"
|
2019-11-09 14:01:25 +01:00
|
|
|
|
2018-11-16 08:19:06 +00:00
|
|
|
/* Set to true (or use -v option) for verbose output */
|
2018-10-24 08:48:41 -07:00
|
|
|
static bool verbose = false;
|
|
|
|
|
2020-02-13 14:44:37 -08:00
|
|
|
#define FUDGE_SECONDS 0 /* in absence of clock_getres() */
|
2020-02-12 13:59:18 +01:00
|
|
|
#define FUDGE_NANOSECONDS 500000000 /* in absence of clock_getres() */
|
|
|
|
|
2020-02-13 14:44:37 -08:00
|
|
|
static isc_timer_t *timer = NULL;
|
|
|
|
static isc_condition_t cv;
|
|
|
|
static isc_mutex_t mx;
|
|
|
|
static isc_time_t endtime;
|
|
|
|
static isc_mutex_t lasttime_mx;
|
|
|
|
static isc_time_t lasttime;
|
|
|
|
static int seconds;
|
|
|
|
static int nanoseconds;
|
|
|
|
static atomic_int_fast32_t eventcnt;
|
2019-11-12 12:15:10 +01:00
|
|
|
static atomic_uint_fast32_t errcnt;
|
2020-02-13 14:44:37 -08:00
|
|
|
static int nevents;
|
2018-10-24 08:48:41 -07:00
|
|
|
|
|
|
|
static int
|
2020-02-13 14:44:37 -08:00
|
|
|
_setup(void **state) {
|
2018-10-24 08:48:41 -07:00
|
|
|
isc_result_t result;
|
|
|
|
|
|
|
|
UNUSED(state);
|
|
|
|
|
|
|
|
/* Timer tests require two worker threads */
|
|
|
|
result = isc_test_begin(NULL, true, 2);
|
|
|
|
assert_int_equal(result, ISC_R_SUCCESS);
|
|
|
|
|
2019-11-12 12:15:10 +01:00
|
|
|
atomic_init(&errcnt, ISC_R_SUCCESS);
|
|
|
|
|
2018-10-24 08:48:41 -07:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2020-02-13 14:44:37 -08:00
|
|
|
_teardown(void **state) {
|
2018-10-24 08:48:41 -07:00
|
|
|
UNUSED(state);
|
|
|
|
|
|
|
|
isc_test_end();
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
2018-02-27 14:29:49 -08:00
|
|
|
|
|
|
|
static void
|
2022-05-09 11:33:09 +02:00
|
|
|
test_shutdown(void) {
|
2018-02-27 14:29:49 -08:00
|
|
|
isc_result_t result;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Signal shutdown processing complete.
|
|
|
|
*/
|
|
|
|
result = isc_mutex_lock(&mx);
|
2018-10-24 08:48:41 -07:00
|
|
|
assert_int_equal(result, ISC_R_SUCCESS);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
|
|
|
result = isc_condition_signal(&cv);
|
2018-10-24 08:48:41 -07:00
|
|
|
assert_int_equal(result, ISC_R_SUCCESS);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
|
|
|
result = isc_mutex_unlock(&mx);
|
2018-10-24 08:48:41 -07:00
|
|
|
assert_int_equal(result, ISC_R_SUCCESS);
|
2018-02-27 14:29:49 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2022-03-11 23:08:17 +01:00
|
|
|
setup_test(isc_timertype_t timertype, isc_interval_t *interval,
|
2020-02-13 14:44:37 -08:00
|
|
|
void (*action)(isc_task_t *, isc_event_t *)) {
|
2018-02-27 14:29:49 -08:00
|
|
|
isc_result_t result;
|
2020-02-13 14:44:37 -08:00
|
|
|
isc_task_t *task = NULL;
|
2018-02-27 14:29:49 -08:00
|
|
|
isc_time_settoepoch(&endtime);
|
2019-07-04 14:21:15 +02:00
|
|
|
atomic_init(&eventcnt, 0);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
2018-11-16 15:33:22 +01:00
|
|
|
isc_mutex_init(&mx);
|
2019-12-04 10:41:40 +01:00
|
|
|
isc_mutex_init(&lasttime_mx);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
2018-11-15 17:20:36 +01:00
|
|
|
isc_condition_init(&cv);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
2021-06-03 19:31:34 +10:00
|
|
|
atomic_store(&errcnt, ISC_R_SUCCESS);
|
|
|
|
|
2018-02-27 14:29:49 -08:00
|
|
|
LOCK(&mx);
|
|
|
|
|
2022-05-19 11:20:21 +02:00
|
|
|
result = isc_task_create(taskmgr, 0, &task, 0);
|
2018-10-24 08:48:41 -07:00
|
|
|
assert_int_equal(result, ISC_R_SUCCESS);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
2019-11-12 11:50:11 +01:00
|
|
|
isc_mutex_lock(&lasttime_mx);
|
2018-02-27 14:29:49 -08:00
|
|
|
result = isc_time_now(&lasttime);
|
2019-11-12 11:50:11 +01:00
|
|
|
isc_mutex_unlock(&lasttime_mx);
|
2018-10-24 08:48:41 -07:00
|
|
|
assert_int_equal(result, ISC_R_SUCCESS);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
2022-03-11 12:09:35 +01:00
|
|
|
isc_timer_create(timermgr, task, action, (void *)timertype, &timer);
|
2022-03-11 23:08:17 +01:00
|
|
|
result = isc_timer_reset(timer, timertype, interval, false);
|
2018-10-24 08:48:41 -07:00
|
|
|
assert_int_equal(result, ISC_R_SUCCESS);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Wait for shutdown processing to complete.
|
|
|
|
*/
|
2019-07-04 14:21:15 +02:00
|
|
|
while (atomic_load(&eventcnt) != nevents) {
|
2018-02-27 14:29:49 -08:00
|
|
|
result = isc_condition_wait(&cv, &mx);
|
2018-10-24 08:48:41 -07:00
|
|
|
assert_int_equal(result, ISC_R_SUCCESS);
|
2018-02-27 14:29:49 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
UNLOCK(&mx);
|
|
|
|
|
2019-11-12 12:15:10 +01:00
|
|
|
assert_int_equal(atomic_load(&errcnt), ISC_R_SUCCESS);
|
|
|
|
|
2018-02-27 14:29:49 -08:00
|
|
|
isc_task_detach(&task);
|
2018-11-19 10:31:09 +00:00
|
|
|
isc_mutex_destroy(&mx);
|
2020-02-12 13:59:18 +01:00
|
|
|
(void)isc_condition_destroy(&cv);
|
2018-02-27 14:29:49 -08:00
|
|
|
}
|
|
|
|
|
2019-11-12 12:15:10 +01:00
|
|
|
static void
|
2020-02-13 14:44:37 -08:00
|
|
|
set_global_error(isc_result_t result) {
|
2020-02-12 13:59:18 +01:00
|
|
|
(void)atomic_compare_exchange_strong(
|
|
|
|
&errcnt, &(uint_fast32_t){ ISC_R_SUCCESS }, result);
|
2019-11-12 12:15:10 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2021-06-03 11:55:31 +10:00
|
|
|
subthread_assert_true(bool expected, const char *file, unsigned int line) {
|
2019-11-12 12:15:10 +01:00
|
|
|
if (!expected) {
|
2021-06-03 11:55:31 +10:00
|
|
|
printf("# %s:%u subthread_assert_true\n", file, line);
|
2019-11-12 12:15:10 +01:00
|
|
|
set_global_error(ISC_R_UNEXPECTED);
|
|
|
|
}
|
|
|
|
}
|
2021-06-03 11:55:31 +10:00
|
|
|
#define subthread_assert_true(expected) \
|
|
|
|
subthread_assert_true(expected, __FILE__, __LINE__)
|
2019-11-12 12:15:10 +01:00
|
|
|
|
|
|
|
static void
|
2021-06-03 11:55:31 +10:00
|
|
|
subthread_assert_int_equal(int observed, int expected, const char *file,
|
|
|
|
unsigned int line) {
|
2019-11-12 12:15:10 +01:00
|
|
|
if (observed != expected) {
|
2021-06-03 11:55:31 +10:00
|
|
|
printf("# %s:%u subthread_assert_int_equal(%d != %d)\n", file,
|
|
|
|
line, observed, expected);
|
2019-11-12 12:15:10 +01:00
|
|
|
set_global_error(ISC_R_UNEXPECTED);
|
|
|
|
}
|
|
|
|
}
|
2021-06-03 11:55:31 +10:00
|
|
|
#define subthread_assert_int_equal(observed, expected) \
|
|
|
|
subthread_assert_int_equal(observed, expected, __FILE__, __LINE__)
|
2019-11-12 12:15:10 +01:00
|
|
|
|
|
|
|
static void
|
2021-06-03 11:55:31 +10:00
|
|
|
subthread_assert_result_equal(isc_result_t result, isc_result_t expected,
|
|
|
|
const char *file, unsigned int line) {
|
2019-11-12 12:15:10 +01:00
|
|
|
if (result != expected) {
|
2021-06-03 11:55:31 +10:00
|
|
|
printf("# %s:%u subthread_assert_result_equal(%u != %u)\n",
|
|
|
|
file, line, result, expected);
|
2019-11-12 12:15:10 +01:00
|
|
|
set_global_error(result);
|
|
|
|
}
|
|
|
|
}
|
2021-06-03 11:55:31 +10:00
|
|
|
#define subthread_assert_result_equal(observed, expected) \
|
|
|
|
subthread_assert_result_equal(observed, expected, __FILE__, __LINE__)
|
2019-11-12 12:15:10 +01:00
|
|
|
|
2018-02-27 14:29:49 -08:00
|
|
|
static void
|
2020-02-13 14:44:37 -08:00
|
|
|
ticktock(isc_task_t *task, isc_event_t *event) {
|
|
|
|
isc_result_t result;
|
|
|
|
isc_time_t now;
|
|
|
|
isc_time_t base;
|
|
|
|
isc_time_t ulim;
|
|
|
|
isc_time_t llim;
|
|
|
|
isc_interval_t interval;
|
2018-02-27 14:29:49 -08:00
|
|
|
isc_eventtype_t expected_event_type;
|
|
|
|
|
2022-05-09 11:33:09 +02:00
|
|
|
UNUSED(task);
|
|
|
|
|
2019-07-04 14:21:15 +02:00
|
|
|
int tick = atomic_fetch_add(&eventcnt, 1);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
2018-10-24 08:48:41 -07:00
|
|
|
if (verbose) {
|
2019-07-04 14:21:15 +02:00
|
|
|
print_message("# tick %d\n", tick);
|
2018-10-24 08:48:41 -07:00
|
|
|
}
|
2018-02-27 14:29:49 -08:00
|
|
|
|
2022-03-11 23:08:17 +01:00
|
|
|
expected_event_type = ISC_TIMEREVENT_ONCE;
|
2020-03-17 11:18:36 -07:00
|
|
|
if ((uintptr_t)event->ev_arg == isc_timertype_ticker) {
|
2018-02-27 14:29:49 -08:00
|
|
|
expected_event_type = ISC_TIMEREVENT_TICK;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (event->ev_type != expected_event_type) {
|
2018-10-24 08:48:41 -07:00
|
|
|
print_error("# expected event type %u, got %u\n",
|
|
|
|
expected_event_type, event->ev_type);
|
2018-02-27 14:29:49 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
result = isc_time_now(&now);
|
2019-11-12 12:15:10 +01:00
|
|
|
subthread_assert_result_equal(result, ISC_R_SUCCESS);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
|
|
|
isc_interval_set(&interval, seconds, nanoseconds);
|
2019-11-12 11:50:11 +01:00
|
|
|
isc_mutex_lock(&lasttime_mx);
|
2018-02-27 14:29:49 -08:00
|
|
|
result = isc_time_add(&lasttime, &interval, &base);
|
2019-11-12 11:50:11 +01:00
|
|
|
isc_mutex_unlock(&lasttime_mx);
|
2019-11-12 12:15:10 +01:00
|
|
|
subthread_assert_result_equal(result, ISC_R_SUCCESS);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
|
|
|
isc_interval_set(&interval, FUDGE_SECONDS, FUDGE_NANOSECONDS);
|
|
|
|
result = isc_time_add(&base, &interval, &ulim);
|
2019-11-12 12:15:10 +01:00
|
|
|
subthread_assert_result_equal(result, ISC_R_SUCCESS);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
|
|
|
result = isc_time_subtract(&base, &interval, &llim);
|
2019-11-12 12:15:10 +01:00
|
|
|
subthread_assert_result_equal(result, ISC_R_SUCCESS);
|
|
|
|
|
|
|
|
subthread_assert_true(isc_time_compare(&llim, &now) <= 0);
|
|
|
|
subthread_assert_true(isc_time_compare(&ulim, &now) >= 0);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
2019-11-12 11:50:11 +01:00
|
|
|
isc_interval_set(&interval, 0, 0);
|
|
|
|
isc_mutex_lock(&lasttime_mx);
|
2019-11-12 12:15:10 +01:00
|
|
|
result = isc_time_add(&now, &interval, &lasttime);
|
2019-11-12 11:50:11 +01:00
|
|
|
isc_mutex_unlock(&lasttime_mx);
|
2019-11-12 12:15:10 +01:00
|
|
|
subthread_assert_result_equal(result, ISC_R_SUCCESS);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
2022-04-02 00:42:20 +02:00
|
|
|
isc_event_free(&event);
|
|
|
|
|
2019-07-04 14:21:15 +02:00
|
|
|
if (atomic_load(&eventcnt) == nevents) {
|
2018-02-27 14:29:49 -08:00
|
|
|
result = isc_time_now(&endtime);
|
2019-11-12 12:15:10 +01:00
|
|
|
subthread_assert_result_equal(result, ISC_R_SUCCESS);
|
2022-04-02 00:42:20 +02:00
|
|
|
isc_timer_destroy(&timer);
|
2022-05-09 11:33:09 +02:00
|
|
|
test_shutdown();
|
2018-02-27 14:29:49 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Individual unit tests
|
|
|
|
*/
|
|
|
|
|
2018-10-24 08:48:41 -07:00
|
|
|
/* timer type ticker */
|
|
|
|
static void
|
2020-02-13 14:44:37 -08:00
|
|
|
ticker(void **state) {
|
2018-02-27 14:29:49 -08:00
|
|
|
isc_interval_t interval;
|
|
|
|
|
2018-10-24 08:48:41 -07:00
|
|
|
UNUSED(state);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
|
|
|
nevents = 12;
|
|
|
|
seconds = 0;
|
|
|
|
nanoseconds = 500000000;
|
|
|
|
|
|
|
|
isc_interval_set(&interval, seconds, nanoseconds);
|
|
|
|
|
2022-03-11 23:08:17 +01:00
|
|
|
setup_test(isc_timertype_ticker, &interval, ticktock);
|
2018-02-27 14:29:49 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-02-13 14:44:37 -08:00
|
|
|
test_idle(isc_task_t *task, isc_event_t *event) {
|
|
|
|
isc_result_t result;
|
|
|
|
isc_time_t now;
|
|
|
|
isc_time_t base;
|
|
|
|
isc_time_t ulim;
|
|
|
|
isc_time_t llim;
|
2018-02-27 14:29:49 -08:00
|
|
|
isc_interval_t interval;
|
|
|
|
|
2022-05-09 11:33:09 +02:00
|
|
|
UNUSED(task);
|
|
|
|
|
2019-07-04 14:21:15 +02:00
|
|
|
int tick = atomic_fetch_add(&eventcnt, 1);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
2018-10-24 08:48:41 -07:00
|
|
|
if (verbose) {
|
2019-07-04 14:21:15 +02:00
|
|
|
print_message("# tick %d\n", tick);
|
2018-10-24 08:48:41 -07:00
|
|
|
}
|
2018-02-27 14:29:49 -08:00
|
|
|
|
|
|
|
result = isc_time_now(&now);
|
2019-11-12 12:15:10 +01:00
|
|
|
subthread_assert_result_equal(result, ISC_R_SUCCESS);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
|
|
|
isc_interval_set(&interval, seconds, nanoseconds);
|
2019-11-12 11:50:11 +01:00
|
|
|
isc_mutex_lock(&lasttime_mx);
|
2018-02-27 14:29:49 -08:00
|
|
|
result = isc_time_add(&lasttime, &interval, &base);
|
2019-11-12 11:50:11 +01:00
|
|
|
isc_mutex_unlock(&lasttime_mx);
|
2019-11-12 12:15:10 +01:00
|
|
|
subthread_assert_result_equal(result, ISC_R_SUCCESS);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
|
|
|
isc_interval_set(&interval, FUDGE_SECONDS, FUDGE_NANOSECONDS);
|
|
|
|
result = isc_time_add(&base, &interval, &ulim);
|
2019-11-12 12:15:10 +01:00
|
|
|
subthread_assert_result_equal(result, ISC_R_SUCCESS);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
|
|
|
result = isc_time_subtract(&base, &interval, &llim);
|
2019-11-12 12:15:10 +01:00
|
|
|
subthread_assert_result_equal(result, ISC_R_SUCCESS);
|
|
|
|
|
|
|
|
subthread_assert_true(isc_time_compare(&llim, &now) <= 0);
|
|
|
|
subthread_assert_true(isc_time_compare(&ulim, &now) >= 0);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
2019-11-12 11:50:11 +01:00
|
|
|
isc_interval_set(&interval, 0, 0);
|
|
|
|
isc_mutex_lock(&lasttime_mx);
|
|
|
|
isc_time_add(&now, &interval, &lasttime);
|
|
|
|
isc_mutex_unlock(&lasttime_mx);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
2022-03-11 23:08:17 +01:00
|
|
|
subthread_assert_int_equal(event->ev_type, ISC_TIMEREVENT_ONCE);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
|
|
|
isc_event_free(&event);
|
2022-04-02 00:42:20 +02:00
|
|
|
|
|
|
|
isc_timer_destroy(&timer);
|
2022-05-09 11:33:09 +02:00
|
|
|
test_shutdown();
|
2018-02-27 14:29:49 -08:00
|
|
|
}
|
|
|
|
|
2018-10-24 08:48:41 -07:00
|
|
|
/* timer type once idles out */
|
|
|
|
static void
|
2020-02-13 14:44:37 -08:00
|
|
|
once_idle(void **state) {
|
2018-02-27 14:29:49 -08:00
|
|
|
isc_interval_t interval;
|
|
|
|
|
2018-10-24 08:48:41 -07:00
|
|
|
UNUSED(state);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
|
|
|
nevents = 1;
|
|
|
|
seconds = 1;
|
|
|
|
nanoseconds = 200000000;
|
|
|
|
|
|
|
|
isc_interval_set(&interval, seconds, nanoseconds);
|
|
|
|
|
2022-03-11 23:08:17 +01:00
|
|
|
setup_test(isc_timertype_once, &interval, test_idle);
|
2018-02-27 14:29:49 -08:00
|
|
|
}
|
|
|
|
|
2018-10-24 08:48:41 -07:00
|
|
|
/* timer reset */
|
2018-02-27 14:29:49 -08:00
|
|
|
static void
|
2020-02-13 14:44:37 -08:00
|
|
|
test_reset(isc_task_t *task, isc_event_t *event) {
|
|
|
|
isc_result_t result;
|
|
|
|
isc_time_t now;
|
|
|
|
isc_time_t base;
|
|
|
|
isc_time_t ulim;
|
|
|
|
isc_time_t llim;
|
2018-02-27 14:29:49 -08:00
|
|
|
isc_interval_t interval;
|
|
|
|
|
2022-05-09 11:33:09 +02:00
|
|
|
UNUSED(task);
|
|
|
|
|
2019-07-04 14:21:15 +02:00
|
|
|
int tick = atomic_fetch_add(&eventcnt, 1);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
2018-10-24 08:48:41 -07:00
|
|
|
if (verbose) {
|
2019-07-04 14:21:15 +02:00
|
|
|
print_message("# tick %d\n", tick);
|
2018-10-24 08:48:41 -07:00
|
|
|
}
|
2018-02-27 14:29:49 -08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check expired time.
|
|
|
|
*/
|
|
|
|
|
|
|
|
result = isc_time_now(&now);
|
2019-11-12 12:15:10 +01:00
|
|
|
subthread_assert_result_equal(result, ISC_R_SUCCESS);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
|
|
|
isc_interval_set(&interval, seconds, nanoseconds);
|
2019-11-12 11:50:11 +01:00
|
|
|
isc_mutex_lock(&lasttime_mx);
|
2018-02-27 14:29:49 -08:00
|
|
|
result = isc_time_add(&lasttime, &interval, &base);
|
2019-11-12 11:50:11 +01:00
|
|
|
isc_mutex_unlock(&lasttime_mx);
|
2019-11-12 12:15:10 +01:00
|
|
|
subthread_assert_result_equal(result, ISC_R_SUCCESS);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
|
|
|
isc_interval_set(&interval, FUDGE_SECONDS, FUDGE_NANOSECONDS);
|
|
|
|
result = isc_time_add(&base, &interval, &ulim);
|
2019-11-12 12:15:10 +01:00
|
|
|
subthread_assert_result_equal(result, ISC_R_SUCCESS);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
|
|
|
result = isc_time_subtract(&base, &interval, &llim);
|
2019-11-12 12:15:10 +01:00
|
|
|
subthread_assert_result_equal(result, ISC_R_SUCCESS);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
2019-11-12 12:15:10 +01:00
|
|
|
subthread_assert_true(isc_time_compare(&llim, &now) <= 0);
|
|
|
|
subthread_assert_true(isc_time_compare(&ulim, &now) >= 0);
|
|
|
|
|
2020-02-12 13:59:18 +01:00
|
|
|
isc_interval_set(&interval, 0, 0);
|
|
|
|
isc_mutex_lock(&lasttime_mx);
|
2019-11-12 11:50:11 +01:00
|
|
|
isc_time_add(&now, &interval, &lasttime);
|
|
|
|
isc_mutex_unlock(&lasttime_mx);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
2019-07-04 14:21:15 +02:00
|
|
|
int _eventcnt = atomic_load(&eventcnt);
|
|
|
|
|
|
|
|
if (_eventcnt < 3) {
|
2019-11-12 12:15:10 +01:00
|
|
|
subthread_assert_int_equal(event->ev_type, ISC_TIMEREVENT_TICK);
|
2019-07-04 14:21:15 +02:00
|
|
|
if (_eventcnt == 2) {
|
2018-02-27 14:29:49 -08:00
|
|
|
isc_interval_set(&interval, seconds, nanoseconds);
|
|
|
|
result = isc_timer_reset(timer, isc_timertype_once,
|
2022-03-11 23:08:17 +01:00
|
|
|
&interval, false);
|
2019-11-12 12:15:10 +01:00
|
|
|
subthread_assert_result_equal(result, ISC_R_SUCCESS);
|
2018-02-27 14:29:49 -08:00
|
|
|
}
|
2022-04-02 00:42:20 +02:00
|
|
|
|
|
|
|
isc_event_free(&event);
|
2018-02-27 14:29:49 -08:00
|
|
|
} else {
|
2022-03-11 23:08:17 +01:00
|
|
|
subthread_assert_int_equal(event->ev_type, ISC_TIMEREVENT_ONCE);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
2022-04-02 00:42:20 +02:00
|
|
|
isc_event_free(&event);
|
|
|
|
isc_timer_destroy(&timer);
|
2022-05-09 11:33:09 +02:00
|
|
|
test_shutdown();
|
2018-02-27 14:29:49 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-24 08:48:41 -07:00
|
|
|
static void
|
2020-02-13 14:44:37 -08:00
|
|
|
reset(void **state) {
|
2018-02-27 14:29:49 -08:00
|
|
|
isc_interval_t interval;
|
|
|
|
|
2018-10-24 08:48:41 -07:00
|
|
|
UNUSED(state);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
|
|
|
nevents = 3;
|
|
|
|
seconds = 0;
|
|
|
|
nanoseconds = 750000000;
|
|
|
|
|
|
|
|
isc_interval_set(&interval, seconds, nanoseconds);
|
|
|
|
|
2022-03-11 23:08:17 +01:00
|
|
|
setup_test(isc_timertype_ticker, &interval, test_reset);
|
2018-02-27 14:29:49 -08:00
|
|
|
}
|
|
|
|
|
Refactor taskmgr to run on top of netmgr
This commit changes the taskmgr to run the individual tasks on the
netmgr internal workers. While an effort has been put into keeping the
taskmgr interface intact, couple of changes have been made:
* The taskmgr has no concept of universal privileged mode - rather the
tasks are either privileged or unprivileged (normal). The privileged
tasks are run as a first thing when the netmgr is unpaused. There
are now four different queues in in the netmgr:
1. priority queue - netievent on the priority queue are run even when
the taskmgr enter exclusive mode and netmgr is paused. This is
needed to properly start listening on the interfaces, free
resources and resume.
2. privileged task queue - only privileged tasks are queued here and
this is the first queue that gets processed when network manager
is unpaused using isc_nm_resume(). All netmgr workers need to
clean the privileged task queue before they all proceed normal
operation. Both task queues are processed when the workers are
finished.
3. task queue - only (traditional) task are scheduled here and this
queue along with privileged task queues are process when the
netmgr workers are finishing. This is needed to process the task
shutdown events.
4. normal queue - this is the queue with netmgr events, e.g. reading,
sending, callbacks and pretty much everything is processed here.
* The isc_taskmgr_create() now requires initialized netmgr (isc_nm_t)
object.
* The isc_nm_destroy() function now waits for indefinite time, but it
will print out the active objects when in tracing mode
(-DNETMGR_TRACE=1 and -DNETMGR_TRACE_VERBOSE=1), the netmgr has been
made a little bit more asynchronous and it might take longer time to
shutdown all the active networking connections.
* Previously, the isc_nm_stoplistening() was a synchronous operation.
This has been changed and the isc_nm_stoplistening() just schedules
the child sockets to stop listening and exits. This was needed to
prevent a deadlock as the the (traditional) tasks are now executed on
the netmgr threads.
* The socket selection logic in isc__nm_udp_send() was flawed, but
fortunatelly, it was broken, so we never hit the problem where we
created uvreq_t on a socket from nmhandle_t, but then a different
socket could be picked up and then we were trying to run the send
callback on a socket that had different threadid than currently
running.
2021-04-09 11:31:19 +02:00
|
|
|
static atomic_bool startflag;
|
|
|
|
static atomic_bool shutdownflag;
|
2018-02-27 14:29:49 -08:00
|
|
|
static isc_timer_t *tickertimer = NULL;
|
|
|
|
static isc_timer_t *oncetimer = NULL;
|
2020-02-13 14:44:37 -08:00
|
|
|
static isc_task_t *task1 = NULL;
|
|
|
|
static isc_task_t *task2 = NULL;
|
2018-02-27 14:29:49 -08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* task1 blocks on mx while events accumulate
|
|
|
|
* in its queue, until signaled by task2.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void
|
2020-02-13 14:44:37 -08:00
|
|
|
tick_event(isc_task_t *task, isc_event_t *event) {
|
|
|
|
isc_result_t result;
|
|
|
|
isc_time_t expires;
|
2018-02-27 14:29:49 -08:00
|
|
|
isc_interval_t interval;
|
|
|
|
|
|
|
|
UNUSED(task);
|
|
|
|
|
Refactor taskmgr to run on top of netmgr
This commit changes the taskmgr to run the individual tasks on the
netmgr internal workers. While an effort has been put into keeping the
taskmgr interface intact, couple of changes have been made:
* The taskmgr has no concept of universal privileged mode - rather the
tasks are either privileged or unprivileged (normal). The privileged
tasks are run as a first thing when the netmgr is unpaused. There
are now four different queues in in the netmgr:
1. priority queue - netievent on the priority queue are run even when
the taskmgr enter exclusive mode and netmgr is paused. This is
needed to properly start listening on the interfaces, free
resources and resume.
2. privileged task queue - only privileged tasks are queued here and
this is the first queue that gets processed when network manager
is unpaused using isc_nm_resume(). All netmgr workers need to
clean the privileged task queue before they all proceed normal
operation. Both task queues are processed when the workers are
finished.
3. task queue - only (traditional) task are scheduled here and this
queue along with privileged task queues are process when the
netmgr workers are finishing. This is needed to process the task
shutdown events.
4. normal queue - this is the queue with netmgr events, e.g. reading,
sending, callbacks and pretty much everything is processed here.
* The isc_taskmgr_create() now requires initialized netmgr (isc_nm_t)
object.
* The isc_nm_destroy() function now waits for indefinite time, but it
will print out the active objects when in tracing mode
(-DNETMGR_TRACE=1 and -DNETMGR_TRACE_VERBOSE=1), the netmgr has been
made a little bit more asynchronous and it might take longer time to
shutdown all the active networking connections.
* Previously, the isc_nm_stoplistening() was a synchronous operation.
This has been changed and the isc_nm_stoplistening() just schedules
the child sockets to stop listening and exits. This was needed to
prevent a deadlock as the the (traditional) tasks are now executed on
the netmgr threads.
* The socket selection logic in isc__nm_udp_send() was flawed, but
fortunatelly, it was broken, so we never hit the problem where we
created uvreq_t on a socket from nmhandle_t, but then a different
socket could be picked up and then we were trying to run the send
callback on a socket that had different threadid than currently
running.
2021-04-09 11:31:19 +02:00
|
|
|
if (!atomic_load(&startflag)) {
|
|
|
|
if (verbose) {
|
|
|
|
print_message("# tick_event %d\n", -1);
|
|
|
|
}
|
|
|
|
isc_event_free(&event);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-07-04 14:21:15 +02:00
|
|
|
int tick = atomic_fetch_add(&eventcnt, 1);
|
2018-10-24 08:48:41 -07:00
|
|
|
if (verbose) {
|
2019-07-04 14:21:15 +02:00
|
|
|
print_message("# tick_event %d\n", tick);
|
2018-10-24 08:48:41 -07:00
|
|
|
}
|
2018-02-27 14:29:49 -08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* On the first tick, purge all remaining tick events
|
|
|
|
* and then shut down the task.
|
|
|
|
*/
|
2019-07-04 14:21:15 +02:00
|
|
|
if (tick == 0) {
|
2018-02-27 14:29:49 -08:00
|
|
|
isc_time_settoepoch(&expires);
|
|
|
|
isc_interval_set(&interval, seconds, 0);
|
|
|
|
result = isc_timer_reset(tickertimer, isc_timertype_ticker,
|
2022-03-11 23:08:17 +01:00
|
|
|
&interval, true);
|
2019-11-12 12:15:10 +01:00
|
|
|
subthread_assert_result_equal(result, ISC_R_SUCCESS);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
2022-05-09 11:33:09 +02:00
|
|
|
atomic_store(&shutdownflag, 1);
|
2018-02-27 14:29:49 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
isc_event_free(&event);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-02-13 14:44:37 -08:00
|
|
|
once_event(isc_task_t *task, isc_event_t *event) {
|
2022-05-09 11:33:09 +02:00
|
|
|
UNUSED(task);
|
|
|
|
|
2018-10-24 08:48:41 -07:00
|
|
|
if (verbose) {
|
|
|
|
print_message("# once_event\n");
|
|
|
|
}
|
2018-02-27 14:29:49 -08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Allow task1 to start processing events.
|
|
|
|
*/
|
Refactor taskmgr to run on top of netmgr
This commit changes the taskmgr to run the individual tasks on the
netmgr internal workers. While an effort has been put into keeping the
taskmgr interface intact, couple of changes have been made:
* The taskmgr has no concept of universal privileged mode - rather the
tasks are either privileged or unprivileged (normal). The privileged
tasks are run as a first thing when the netmgr is unpaused. There
are now four different queues in in the netmgr:
1. priority queue - netievent on the priority queue are run even when
the taskmgr enter exclusive mode and netmgr is paused. This is
needed to properly start listening on the interfaces, free
resources and resume.
2. privileged task queue - only privileged tasks are queued here and
this is the first queue that gets processed when network manager
is unpaused using isc_nm_resume(). All netmgr workers need to
clean the privileged task queue before they all proceed normal
operation. Both task queues are processed when the workers are
finished.
3. task queue - only (traditional) task are scheduled here and this
queue along with privileged task queues are process when the
netmgr workers are finishing. This is needed to process the task
shutdown events.
4. normal queue - this is the queue with netmgr events, e.g. reading,
sending, callbacks and pretty much everything is processed here.
* The isc_taskmgr_create() now requires initialized netmgr (isc_nm_t)
object.
* The isc_nm_destroy() function now waits for indefinite time, but it
will print out the active objects when in tracing mode
(-DNETMGR_TRACE=1 and -DNETMGR_TRACE_VERBOSE=1), the netmgr has been
made a little bit more asynchronous and it might take longer time to
shutdown all the active networking connections.
* Previously, the isc_nm_stoplistening() was a synchronous operation.
This has been changed and the isc_nm_stoplistening() just schedules
the child sockets to stop listening and exits. This was needed to
prevent a deadlock as the the (traditional) tasks are now executed on
the netmgr threads.
* The socket selection logic in isc__nm_udp_send() was flawed, but
fortunatelly, it was broken, so we never hit the problem where we
created uvreq_t on a socket from nmhandle_t, but then a different
socket could be picked up and then we were trying to run the send
callback on a socket that had different threadid than currently
running.
2021-04-09 11:31:19 +02:00
|
|
|
atomic_store(&startflag, true);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
|
|
|
isc_event_free(&event);
|
|
|
|
}
|
|
|
|
|
2018-10-24 08:48:41 -07:00
|
|
|
/* timer events purged */
|
|
|
|
static void
|
2020-02-13 14:44:37 -08:00
|
|
|
purge(void **state) {
|
|
|
|
isc_result_t result;
|
2018-02-27 14:29:49 -08:00
|
|
|
isc_interval_t interval;
|
|
|
|
|
2018-10-24 08:48:41 -07:00
|
|
|
UNUSED(state);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
Refactor taskmgr to run on top of netmgr
This commit changes the taskmgr to run the individual tasks on the
netmgr internal workers. While an effort has been put into keeping the
taskmgr interface intact, couple of changes have been made:
* The taskmgr has no concept of universal privileged mode - rather the
tasks are either privileged or unprivileged (normal). The privileged
tasks are run as a first thing when the netmgr is unpaused. There
are now four different queues in in the netmgr:
1. priority queue - netievent on the priority queue are run even when
the taskmgr enter exclusive mode and netmgr is paused. This is
needed to properly start listening on the interfaces, free
resources and resume.
2. privileged task queue - only privileged tasks are queued here and
this is the first queue that gets processed when network manager
is unpaused using isc_nm_resume(). All netmgr workers need to
clean the privileged task queue before they all proceed normal
operation. Both task queues are processed when the workers are
finished.
3. task queue - only (traditional) task are scheduled here and this
queue along with privileged task queues are process when the
netmgr workers are finishing. This is needed to process the task
shutdown events.
4. normal queue - this is the queue with netmgr events, e.g. reading,
sending, callbacks and pretty much everything is processed here.
* The isc_taskmgr_create() now requires initialized netmgr (isc_nm_t)
object.
* The isc_nm_destroy() function now waits for indefinite time, but it
will print out the active objects when in tracing mode
(-DNETMGR_TRACE=1 and -DNETMGR_TRACE_VERBOSE=1), the netmgr has been
made a little bit more asynchronous and it might take longer time to
shutdown all the active networking connections.
* Previously, the isc_nm_stoplistening() was a synchronous operation.
This has been changed and the isc_nm_stoplistening() just schedules
the child sockets to stop listening and exits. This was needed to
prevent a deadlock as the the (traditional) tasks are now executed on
the netmgr threads.
* The socket selection logic in isc__nm_udp_send() was flawed, but
fortunatelly, it was broken, so we never hit the problem where we
created uvreq_t on a socket from nmhandle_t, but then a different
socket could be picked up and then we were trying to run the send
callback on a socket that had different threadid than currently
running.
2021-04-09 11:31:19 +02:00
|
|
|
atomic_init(&startflag, 0);
|
|
|
|
atomic_init(&shutdownflag, 0);
|
2019-07-04 14:21:15 +02:00
|
|
|
atomic_init(&eventcnt, 0);
|
2018-02-27 14:29:49 -08:00
|
|
|
seconds = 1;
|
|
|
|
nanoseconds = 0;
|
|
|
|
|
2022-05-19 11:20:21 +02:00
|
|
|
result = isc_task_create(taskmgr, 0, &task1, 0);
|
2018-10-24 08:48:41 -07:00
|
|
|
assert_int_equal(result, ISC_R_SUCCESS);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
2022-05-19 11:20:21 +02:00
|
|
|
result = isc_task_create(taskmgr, 0, &task2, 0);
|
2018-10-24 08:48:41 -07:00
|
|
|
assert_int_equal(result, ISC_R_SUCCESS);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
|
|
|
isc_interval_set(&interval, seconds, 0);
|
|
|
|
|
|
|
|
tickertimer = NULL;
|
2022-03-11 12:09:35 +01:00
|
|
|
isc_timer_create(timermgr, task1, tick_event, NULL, &tickertimer);
|
2022-03-11 23:08:17 +01:00
|
|
|
result = isc_timer_reset(tickertimer, isc_timertype_ticker, &interval,
|
|
|
|
false);
|
2018-10-24 08:48:41 -07:00
|
|
|
assert_int_equal(result, ISC_R_SUCCESS);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
|
|
|
oncetimer = NULL;
|
|
|
|
|
|
|
|
isc_interval_set(&interval, (seconds * 2) + 1, 0);
|
|
|
|
|
2022-03-11 12:09:35 +01:00
|
|
|
isc_timer_create(timermgr, task2, once_event, NULL, &oncetimer);
|
2022-03-11 23:08:17 +01:00
|
|
|
result = isc_timer_reset(oncetimer, isc_timertype_once, &interval,
|
|
|
|
false);
|
2018-10-24 08:48:41 -07:00
|
|
|
assert_int_equal(result, ISC_R_SUCCESS);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Wait for shutdown processing to complete.
|
|
|
|
*/
|
Refactor taskmgr to run on top of netmgr
This commit changes the taskmgr to run the individual tasks on the
netmgr internal workers. While an effort has been put into keeping the
taskmgr interface intact, couple of changes have been made:
* The taskmgr has no concept of universal privileged mode - rather the
tasks are either privileged or unprivileged (normal). The privileged
tasks are run as a first thing when the netmgr is unpaused. There
are now four different queues in in the netmgr:
1. priority queue - netievent on the priority queue are run even when
the taskmgr enter exclusive mode and netmgr is paused. This is
needed to properly start listening on the interfaces, free
resources and resume.
2. privileged task queue - only privileged tasks are queued here and
this is the first queue that gets processed when network manager
is unpaused using isc_nm_resume(). All netmgr workers need to
clean the privileged task queue before they all proceed normal
operation. Both task queues are processed when the workers are
finished.
3. task queue - only (traditional) task are scheduled here and this
queue along with privileged task queues are process when the
netmgr workers are finishing. This is needed to process the task
shutdown events.
4. normal queue - this is the queue with netmgr events, e.g. reading,
sending, callbacks and pretty much everything is processed here.
* The isc_taskmgr_create() now requires initialized netmgr (isc_nm_t)
object.
* The isc_nm_destroy() function now waits for indefinite time, but it
will print out the active objects when in tracing mode
(-DNETMGR_TRACE=1 and -DNETMGR_TRACE_VERBOSE=1), the netmgr has been
made a little bit more asynchronous and it might take longer time to
shutdown all the active networking connections.
* Previously, the isc_nm_stoplistening() was a synchronous operation.
This has been changed and the isc_nm_stoplistening() just schedules
the child sockets to stop listening and exits. This was needed to
prevent a deadlock as the the (traditional) tasks are now executed on
the netmgr threads.
* The socket selection logic in isc__nm_udp_send() was flawed, but
fortunatelly, it was broken, so we never hit the problem where we
created uvreq_t on a socket from nmhandle_t, but then a different
socket could be picked up and then we were trying to run the send
callback on a socket that had different threadid than currently
running.
2021-04-09 11:31:19 +02:00
|
|
|
while (!atomic_load(&shutdownflag)) {
|
|
|
|
isc_test_nap(1000);
|
2018-02-27 14:29:49 -08:00
|
|
|
}
|
|
|
|
|
2019-11-12 12:15:10 +01:00
|
|
|
assert_int_equal(atomic_load(&errcnt), ISC_R_SUCCESS);
|
|
|
|
|
2019-07-04 14:21:15 +02:00
|
|
|
assert_int_equal(atomic_load(&eventcnt), 1);
|
2018-02-27 14:29:49 -08:00
|
|
|
|
2022-04-02 00:42:20 +02:00
|
|
|
isc_timer_destroy(&tickertimer);
|
|
|
|
isc_timer_destroy(&oncetimer);
|
2022-05-09 12:58:34 +02:00
|
|
|
isc_task_detach(&task1);
|
|
|
|
isc_task_detach(&task2);
|
2018-10-24 08:48:41 -07:00
|
|
|
}
|
2018-02-27 14:29:49 -08:00
|
|
|
|
2018-10-24 08:48:41 -07:00
|
|
|
int
|
2020-02-13 14:44:37 -08:00
|
|
|
main(int argc, char **argv) {
|
2018-10-24 08:48:41 -07:00
|
|
|
const struct CMUnitTest tests[] = {
|
2022-03-11 23:08:17 +01:00
|
|
|
cmocka_unit_test(ticker),
|
|
|
|
cmocka_unit_test(once_idle),
|
|
|
|
cmocka_unit_test(reset),
|
2019-11-12 23:13:49 -08:00
|
|
|
cmocka_unit_test(purge),
|
2018-10-24 08:48:41 -07:00
|
|
|
};
|
|
|
|
int c;
|
|
|
|
|
|
|
|
while ((c = isc_commandline_parse(argc, argv, "v")) != -1) {
|
|
|
|
switch (c) {
|
|
|
|
case 'v':
|
|
|
|
verbose = true;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-12 23:13:49 -08:00
|
|
|
return (cmocka_run_group_tests(tests, _setup, _teardown));
|
2018-02-27 14:29:49 -08:00
|
|
|
}
|
|
|
|
|
2018-10-24 08:48:41 -07:00
|
|
|
#else /* HAVE_CMOCKA */
|
|
|
|
|
|
|
|
#include <stdio.h>
|
|
|
|
|
|
|
|
int
|
2020-02-13 14:44:37 -08:00
|
|
|
main(void) {
|
2018-10-24 08:48:41 -07:00
|
|
|
printf("1..0 # Skipped: cmocka not available\n");
|
2021-01-18 19:15:44 +01:00
|
|
|
return (SKIPPED_TEST_EXIT_CODE);
|
2018-02-27 14:29:49 -08:00
|
|
|
}
|
2018-10-24 08:48:41 -07:00
|
|
|
|
2020-02-13 21:48:23 +01:00
|
|
|
#endif /* if HAVE_CMOCKA */
|