2
0
mirror of https://gitlab.isc.org/isc-projects/bind9 synced 2025-08-30 05:57:52 +00:00

REQUIRE should not have side effects

it's a style violation to have REQUIRE or INSIST contain code that
must run for the server to work. this was being done with some
atomic_compare_exchange calls. these have been cleaned up.  uses
of atomic_compare_exchange in assertions have been replaced with
a new macro atomic_compare_exchange_enforced, which uses RUNTIME_CHECK
to ensure that the exchange was successful.
This commit is contained in:
Evan Hunt 2021-10-21 02:28:48 -07:00
parent 7e49c5e38a
commit a499794984
12 changed files with 80 additions and 55 deletions

View File

@ -3979,19 +3979,24 @@ dns_adbentry_overquota(dns_adbentry_t *entry) {
void
dns_adb_beginudpfetch(dns_adb_t *adb, dns_adbaddrinfo_t *addr) {
uint_fast32_t active;
REQUIRE(DNS_ADB_VALID(adb));
REQUIRE(DNS_ADBADDRINFO_VALID(addr));
REQUIRE(atomic_fetch_add_relaxed(&addr->entry->active, 1) !=
UINT32_MAX);
active = atomic_fetch_add_relaxed(&addr->entry->active, 1);
INSIST(active != UINT32_MAX);
}
void
dns_adb_endudpfetch(dns_adb_t *adb, dns_adbaddrinfo_t *addr) {
uint_fast32_t active;
REQUIRE(DNS_ADB_VALID(adb));
REQUIRE(DNS_ADBADDRINFO_VALID(addr));
REQUIRE(atomic_fetch_sub_release(&addr->entry->active, 1) != 0);
active = atomic_fetch_sub_release(&addr->entry->active, 1);
INSIST(active != 0);
}
isc_stats_t *

View File

@ -1694,12 +1694,13 @@ startrecv(isc_nmhandle_t *handle, dns_dispatch_t *disp, dns_dispentry_t *resp) {
case isc_socktype_tcp:
REQUIRE(disp != NULL);
LOCK(&disp->lock);
REQUIRE(disp->handle == NULL);
REQUIRE(atomic_compare_exchange_strong(
atomic_compare_exchange_enforced(
&disp->tcpstate,
&(uint_fast32_t){ DNS_DISPATCHSTATE_CONNECTING },
DNS_DISPATCHSTATE_CONNECTED));
DNS_DISPATCHSTATE_CONNECTED);
isc_nmhandle_attach(handle, &disp->handle);
dns_dispatch_attach(disp, &(dns_dispatch_t *){ NULL });

View File

@ -2994,6 +2994,7 @@ fctx_finddone(isc_task_t *task, isc_event_t *event) {
dns_adbfind_t *find = event->ev_sender;
bool want_try = false;
bool want_done = false;
uint_fast32_t pending;
REQUIRE(VALID_FCTX(fctx));
@ -3002,7 +3003,8 @@ fctx_finddone(isc_task_t *task, isc_event_t *event) {
FCTXTRACE("finddone");
LOCK(&fctx->bucket->lock);
INSIST(atomic_fetch_sub_release(&fctx->pending, 1) > 0);
pending = atomic_fetch_sub_release(&fctx->pending, 1);
INSIST(pending > 0);
if (ADDRWAIT(fctx)) {
/*
@ -4346,6 +4348,7 @@ fctx_destroy(fetchctx_t *fctx) {
isc_sockaddr_t *sa = NULL, *next_sa = NULL;
struct tried *tried = NULL;
fctxbucket_t *bucket = NULL;
uint_fast32_t nfctx;
REQUIRE(VALID_FCTX(fctx));
REQUIRE(ISC_LIST_EMPTY(fctx->events));
@ -4365,7 +4368,8 @@ fctx_destroy(fetchctx_t *fctx) {
LOCK(&bucket->lock);
REQUIRE(fctx->state != fetchstate_active);
ISC_LIST_UNLINK(bucket->fctxs, fctx, link);
INSIST(atomic_fetch_sub_release(&res->nfctx, 1) > 0);
nfctx = atomic_fetch_sub_release(&res->nfctx, 1);
INSIST(nfctx > 0);
UNLOCK(&bucket->lock);
isc_refcount_destroy(&fctx->references);
@ -4668,8 +4672,9 @@ fctx_create(dns_resolver_t *res, const dns_name_t *name, dns_rdatatype_t type,
isc_interval_t interval;
unsigned int findoptions = 0;
char buf[DNS_NAME_FORMATSIZE + DNS_RDATATYPE_FORMATSIZE + 1];
size_t p;
int tid = isc_nm_tid();
uint_fast32_t nfctx;
size_t p;
if (tid == ISC_NETMGR_TID_UNKNOWN) {
tid = 0;
@ -4934,7 +4939,8 @@ fctx_create(dns_resolver_t *res, const dns_name_t *name, dns_rdatatype_t type,
ISC_LIST_APPEND(bucket->fctxs, fctx, link);
INSIST(atomic_fetch_add_relaxed(&res->nfctx, 1) < UINT32_MAX);
nfctx = atomic_fetch_add_relaxed(&res->nfctx, 1);
INSIST(nfctx < UINT32_MAX);
inc_stats(res, dns_resstatscounter_nfetch);
@ -10337,8 +10343,7 @@ prime_done(isc_task_t *task, isc_event_t *event) {
res->primefetch = NULL;
UNLOCK(&res->primelock);
INSIST(atomic_compare_exchange_strong_acq_rel(&res->priming,
&(bool){ true }, false));
atomic_compare_exchange_enforced(&res->priming, &(bool){ true }, false);
if (fevent->result == ISC_R_SUCCESS && res->view->cache != NULL &&
res->view->hints != NULL)
@ -10405,8 +10410,8 @@ dns_resolver_prime(dns_resolver_t *res) {
if (result != ISC_R_SUCCESS) {
isc_mem_put(res->mctx, rdataset, sizeof(*rdataset));
INSIST(atomic_compare_exchange_strong_acq_rel(
&res->priming, &(bool){ true }, false));
atomic_compare_exchange_enforced(
&res->priming, &(bool){ true }, false);
}
inc_stats(res, dns_resstatscounter_priming);
}

View File

@ -13411,8 +13411,9 @@ stub_request_nameserver_address(struct stub_cb_args *args, bool ipv4,
stub_glue_response_cb, request, &request->request);
if (result != ISC_R_SUCCESS) {
INSIST(atomic_fetch_sub_release(&args->stub->pending_requests,
1) > 1);
uint_fast32_t pr;
pr = atomic_fetch_sub_release(&args->stub->pending_requests, 1);
INSIST(pr > 1);
zone_debuglog(zone, __func__, 1,
"dns_request_createvia() failed: %s",
isc_result_totext(result));

View File

@ -84,6 +84,10 @@ handle_signal(int sig, void (*handler)(int)) {
isc_result_t
isc_app_ctxstart(isc_appctx_t *ctx) {
int presult;
sigset_t sset;
char strbuf[ISC_STRERRORSIZE];
REQUIRE(VALID_APPCTX(ctx));
/*
@ -103,10 +107,6 @@ isc_app_ctxstart(isc_appctx_t *ctx) {
atomic_init(&ctx->want_reload, false);
atomic_init(&ctx->blocked, false);
int presult;
sigset_t sset;
char strbuf[ISC_STRERRORSIZE];
/*
* Always ignore SIGPIPE.
*/
@ -283,8 +283,8 @@ isc_result_t
isc_app_run(void) {
isc_result_t result;
REQUIRE(atomic_compare_exchange_strong_acq_rel(&is_running,
&(bool){ false }, true));
atomic_compare_exchange_enforced(&is_running, &(bool){ false }, true);
result = isc_app_ctxrun(&isc_g_appctx);
atomic_store_release(&is_running, false);
@ -380,11 +380,13 @@ isc_app_finish(void) {
void
isc_app_block(void) {
REQUIRE(atomic_load_acquire(&isc_g_appctx.running));
REQUIRE(atomic_compare_exchange_strong_acq_rel(&isc_g_appctx.blocked,
&(bool){ false }, true));
sigset_t sset;
REQUIRE(atomic_load_acquire(&isc_g_appctx.running));
atomic_compare_exchange_enforced(&isc_g_appctx.blocked,
&(bool){ false }, true);
blockedthread = pthread_self();
RUNTIME_CHECK(sigemptyset(&sset) == 0 &&
sigaddset(&sset, SIGINT) == 0 &&
@ -394,13 +396,14 @@ isc_app_block(void) {
void
isc_app_unblock(void) {
REQUIRE(atomic_load_acquire(&isc_g_appctx.running));
REQUIRE(atomic_compare_exchange_strong_acq_rel(&isc_g_appctx.blocked,
&(bool){ true }, false));
sigset_t sset;
REQUIRE(atomic_load_acquire(&isc_g_appctx.running));
REQUIRE(blockedthread == pthread_self());
sigset_t sset;
atomic_compare_exchange_enforced(&isc_g_appctx.blocked, &(bool){ true },
false);
RUNTIME_CHECK(sigemptyset(&sset) == 0 &&
sigaddset(&sset, SIGINT) == 0 &&
sigaddset(&sset, SIGTERM) == 0);

View File

@ -19,6 +19,8 @@
#include <isc/stdatomic.h>
#endif
#include <isc/util.h>
/*
* We define a few additional macros to make things easier
*/
@ -71,3 +73,7 @@
#define atomic_compare_exchange_strong_acq_rel(o, e, d) \
atomic_compare_exchange_strong_explicit( \
(o), (e), (d), memory_order_acq_rel, memory_order_acquire)
/* compare/exchange that MUST succeed */
#define atomic_compare_exchange_enforced(o, e, d) \
RUNTIME_CHECK(atomic_compare_exchange_strong((o), (e), (d)))

View File

@ -414,12 +414,15 @@ mem_getstats(isc_mem_t *ctx, size_t size) {
static void
mem_putstats(isc_mem_t *ctx, void *ptr, size_t size) {
struct stats *stats = stats_bucket(ctx, size);
uint_fast32_t s, g;
UNUSED(ptr);
INSIST(atomic_fetch_sub_release(&ctx->inuse, size) >= size);
s = atomic_fetch_sub_release(&ctx->inuse, size);
INSIST(s >= size);
INSIST(atomic_fetch_sub_release(&stats->gets, 1) >= 1);
g = atomic_fetch_sub_release(&stats->gets, 1);
INSIST(g >= 1);
decrement_malloced(ctx, size);
}

View File

@ -427,8 +427,7 @@ isc_nm_pause(isc_nm_t *mgr) {
}
UNLOCK(&mgr->lock);
REQUIRE(atomic_compare_exchange_strong(&mgr->paused, &(bool){ false },
true));
atomic_compare_exchange_enforced(&mgr->paused, &(bool){ false }, true);
}
static void
@ -476,8 +475,7 @@ isc_nm_resume(isc_nm_t *mgr) {
}
UNLOCK(&mgr->lock);
REQUIRE(atomic_compare_exchange_strong(&mgr->paused, &(bool){ true },
false));
atomic_compare_exchange_enforced(&mgr->paused, &(bool){ true }, false);
isc__nm_drop_interlocked(mgr);
}
@ -1700,6 +1698,7 @@ nmhandle_free(isc_nmsocket_t *sock, isc_nmhandle_t *handle) {
static void
nmhandle_deactivate(isc_nmsocket_t *sock, isc_nmhandle_t *handle) {
bool reuse = false;
uint_fast32_t ah;
/*
* We do all of this under lock to avoid races with socket
@ -1712,7 +1711,8 @@ nmhandle_deactivate(isc_nmsocket_t *sock, isc_nmhandle_t *handle) {
ISC_LIST_UNLINK(sock->active_handles, handle, active_link);
#endif
INSIST(atomic_fetch_sub(&sock->ah, 1) > 0);
ah = atomic_fetch_sub(&sock->ah, 1);
INSIST(ah > 0);
#if !__SANITIZE_ADDRESS__ && !__SANITIZE_THREAD__
if (atomic_load(&sock->active)) {
@ -1906,8 +1906,8 @@ isc__nm_failed_connect_cb(isc_nmsocket_t *sock, isc__nm_uvreq_t *req,
isc__nmsocket_timer_stop(sock);
uv_handle_set_data((uv_handle_t *)&sock->read_timer, sock);
INSIST(atomic_compare_exchange_strong(&sock->connecting,
&(bool){ true }, false));
atomic_compare_exchange_enforced(&sock->connecting, &(bool){ true },
false);
isc__nmsocket_clearcb(sock);
isc__nm_connectcb(sock, req, eresult, async);
@ -1958,9 +1958,8 @@ isc__nmsocket_connecttimeout_cb(uv_timer_t *timer) {
/*
* Mark the connection as timed out and shutdown the socket.
*/
INSIST(atomic_compare_exchange_strong(&sock->timedout, &(bool){ false },
true));
atomic_compare_exchange_enforced(&sock->timedout, &(bool){ false },
true);
isc__nmsocket_clearcb(sock);
isc__nmsocket_shutdown(sock);
}

View File

@ -213,8 +213,8 @@ isc__nm_async_tlsdnsconnect(isc__networker_t *worker, isc__netievent_t *ev0) {
result = tlsdns_connect_direct(sock, req);
if (result != ISC_R_SUCCESS) {
INSIST(atomic_compare_exchange_strong(&sock->connecting,
&(bool){ true }, false));
atomic_compare_exchange_enforced(&sock->connecting,
&(bool){ true }, false);
isc__nmsocket_clearcb(sock);
isc__nm_connectcb(sock, req, result, true);
atomic_store(&sock->active, false);
@ -420,8 +420,8 @@ failure:
sock->tid = isc_nm_tid();
}
INSIST(atomic_compare_exchange_strong(&sock->connecting,
&(bool){ true }, false));
atomic_compare_exchange_enforced(&sock->connecting, &(bool){ true },
false);
isc__nmsocket_clearcb(sock);
isc__nm_connectcb(sock, req, result, true);
atomic_store(&sock->closed, true);
@ -1171,8 +1171,8 @@ tls_cycle_input(isc_nmsocket_t *sock) {
uv_handle_set_data((uv_handle_t *)&sock->read_timer,
sock);
INSIST(atomic_compare_exchange_strong(
&sock->connecting, &(bool){ true }, false));
atomic_compare_exchange_enforced(
&sock->connecting, &(bool){ true }, false);
isc__nm_connectcb(sock, req, ISC_R_SUCCESS, true);
}
async_tlsdns_cycle(sock);

View File

@ -121,6 +121,8 @@ dequeue(isc_quota_t *quota) {
static void
quota_release(isc_quota_t *quota) {
uint_fast32_t used;
/*
* This is opportunistic - we might race with a failing quota_attach_cb
* and not detect that something is waiting, but eventually someone will
@ -141,7 +143,8 @@ quota_release(isc_quota_t *quota) {
}
}
INSIST(atomic_fetch_sub_release(&quota->used, 1) > 0);
used = atomic_fetch_sub_release(&quota->used, 1);
INSIST(used > 0);
}
static isc_result_t

View File

@ -781,10 +781,11 @@ isc_task_beginexclusive(isc_task_t *task) {
void
isc_task_endexclusive(isc_task_t *task) {
isc_taskmgr_t *manager;
isc_taskmgr_t *manager = NULL;
REQUIRE(VALID_TASK(task));
REQUIRE(task->state == task_state_running);
manager = task->manager;
if (isc_log_wouldlog(isc_lctx, ISC_LOG_DEBUG(1))) {
@ -801,8 +802,8 @@ isc_task_endexclusive(isc_task_t *task) {
"exclusive task mode: %s", "ended");
}
REQUIRE(atomic_compare_exchange_strong(&manager->exclusive_req,
&(bool){ true }, false));
atomic_compare_exchange_enforced(&manager->exclusive_req,
&(bool){ true }, false);
}
#ifdef HAVE_LIBXML2

View File

@ -128,8 +128,7 @@ tls_initialize(void) {
"seeded' message in the OpenSSL FAQ)");
}
REQUIRE(atomic_compare_exchange_strong(&init_done, &(bool){ false },
true));
atomic_compare_exchange_enforced(&init_done, &(bool){ false }, true);
}
void
@ -167,8 +166,7 @@ tls_shutdown(void) {
}
#endif
REQUIRE(atomic_compare_exchange_strong(&shut_done, &(bool){ false },
true));
atomic_compare_exchange_enforced(&shut_done, &(bool){ false }, true);
}
void