2
0
mirror of https://gitlab.isc.org/isc-projects/bind9 synced 2025-08-31 06:25:31 +00:00

Replace non-loop usage of atomic_compare_exchange_weak with strong variant

While testing BIND 9 on arm64 8+ core machine, it was discovered that
the weak variants in fact does spuriously fail, we haven't observed that
on other architectures.

This commit replaces all non-loop usage of atomic_compare_exchange_weak
with atomic_compare_exchange_strong.
This commit is contained in:
Ondřej Surý
2020-02-12 09:17:55 +01:00
parent fa68a0d869
commit 4cf275ba8a
2 changed files with 36 additions and 50 deletions

View File

@@ -228,8 +228,6 @@ isc_result_t
isc_app_ctxrun(isc_appctx_t *ctx) { isc_app_ctxrun(isc_appctx_t *ctx) {
isc_event_t *event, *next_event; isc_event_t *event, *next_event;
isc_task_t *task; isc_task_t *task;
bool exp_false = false;
bool exp_true = true;
REQUIRE(VALID_APPCTX(ctx)); REQUIRE(VALID_APPCTX(ctx));
@@ -237,8 +235,8 @@ isc_app_ctxrun(isc_appctx_t *ctx) {
REQUIRE(main_thread == GetCurrentThread()); REQUIRE(main_thread == GetCurrentThread());
#endif /* ifdef WIN32 */ #endif /* ifdef WIN32 */
if (atomic_compare_exchange_weak_acq_rel(&ctx->running, &exp_false, if (atomic_compare_exchange_strong_acq_rel(
true) == true) &ctx->running, &(bool){ false }, true) == true)
{ {
/* /*
* Post any on-run events (in FIFO order). * Post any on-run events (in FIFO order).
@@ -344,9 +342,9 @@ isc_app_ctxrun(isc_appctx_t *ctx) {
} }
} }
#endif /* WIN32 */ #endif /* WIN32 */
exp_true = true; if (atomic_compare_exchange_strong_acq_rel(
if (atomic_compare_exchange_weak_acq_rel(&ctx->want_reload, &ctx->want_reload, &(bool){ true }, false))
&exp_true, false)) { {
return (ISC_R_RELOAD); return (ISC_R_RELOAD);
} }
@@ -363,10 +361,9 @@ isc_app_ctxrun(isc_appctx_t *ctx) {
isc_result_t isc_result_t
isc_app_run(void) { isc_app_run(void) {
isc_result_t result; isc_result_t result;
bool exp_false = false;
REQUIRE(atomic_compare_exchange_weak_acq_rel(&is_running, &exp_false, REQUIRE(atomic_compare_exchange_strong_acq_rel(
true) == true); &is_running, &(bool){ false }, true) == true);
result = isc_app_ctxrun(&isc_g_appctx); result = isc_app_ctxrun(&isc_g_appctx);
atomic_store_release(&is_running, false); atomic_store_release(&is_running, false);
@@ -380,8 +377,6 @@ isc_app_isrunning() {
void void
isc_app_ctxshutdown(isc_appctx_t *ctx) { isc_app_ctxshutdown(isc_appctx_t *ctx) {
bool exp_false = false;
REQUIRE(VALID_APPCTX(ctx)); REQUIRE(VALID_APPCTX(ctx));
REQUIRE(atomic_load_acquire(&ctx->running)); REQUIRE(atomic_load_acquire(&ctx->running));
@@ -389,8 +384,8 @@ isc_app_ctxshutdown(isc_appctx_t *ctx) {
/* If ctx->shutdown_requested == true, we are already shutting /* If ctx->shutdown_requested == true, we are already shutting
* down and we want to just bail out. * down and we want to just bail out.
*/ */
if (atomic_compare_exchange_weak_acq_rel(&ctx->shutdown_requested, if (atomic_compare_exchange_strong_acq_rel(&ctx->shutdown_requested,
&exp_false, true)) &(bool){ false }, true))
{ {
#ifdef WIN32 #ifdef WIN32
SetEvent(ctx->hEvents[SHUTDOWN_EVENT]); SetEvent(ctx->hEvents[SHUTDOWN_EVENT]);
@@ -480,10 +475,9 @@ isc_app_finish(void) {
void void
isc_app_block(void) { isc_app_block(void) {
bool exp_false = false;
REQUIRE(atomic_load_acquire(&isc_g_appctx.running)); REQUIRE(atomic_load_acquire(&isc_g_appctx.running));
REQUIRE(atomic_compare_exchange_weak_acq_rel(&isc_g_appctx.blocked, REQUIRE(atomic_compare_exchange_strong_acq_rel(&isc_g_appctx.blocked,
&exp_false, true)); &(bool){ false }, true));
#ifdef WIN32 #ifdef WIN32
blockedthread = GetCurrentThread(); blockedthread = GetCurrentThread();
@@ -499,11 +493,9 @@ isc_app_block(void) {
void void
isc_app_unblock(void) { isc_app_unblock(void) {
bool exp_true = true;
REQUIRE(atomic_load_acquire(&isc_g_appctx.running)); REQUIRE(atomic_load_acquire(&isc_g_appctx.running));
REQUIRE(atomic_compare_exchange_weak_acq_rel(&isc_g_appctx.blocked, REQUIRE(atomic_compare_exchange_strong_acq_rel(&isc_g_appctx.blocked,
&exp_true, false)); &(bool){ true }, false));
#ifdef WIN32 #ifdef WIN32
REQUIRE(blockedthread == GetCurrentThread()); REQUIRE(blockedthread == GetCurrentThread());

View File

@@ -400,14 +400,10 @@ isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
break; break;
} }
while (1) { while (!atomic_compare_exchange_weak_acq_rel(
int_fast32_t zero = 0; &rwl->cnt_and_flag, &(int_fast32_t){ 0 },
if (atomic_compare_exchange_weak_acq_rel( WRITER_ACTIVE))
&rwl->cnt_and_flag, &zero, WRITER_ACTIVE)) {
{
break;
}
/* Another active reader or writer is working. */ /* Another active reader or writer is working. */
LOCK(&rwl->lock); LOCK(&rwl->lock);
if (atomic_load_acquire(&rwl->cnt_and_flag) != 0) { if (atomic_load_acquire(&rwl->cnt_and_flag) != 0) {
@@ -494,8 +490,8 @@ isc_rwlock_trylock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
} else { } else {
/* Try locking without entering the waiting queue. */ /* Try locking without entering the waiting queue. */
int_fast32_t zero = 0; int_fast32_t zero = 0;
if (!atomic_compare_exchange_weak_acq_rel(&rwl->cnt_and_flag, if (!atomic_compare_exchange_strong_acq_rel(
&zero, WRITER_ACTIVE)) &rwl->cnt_and_flag, &zero, WRITER_ACTIVE))
{ {
return (ISC_R_LOCKBUSY); return (ISC_R_LOCKBUSY);
} }
@@ -519,28 +515,26 @@ isc_result_t
isc_rwlock_tryupgrade(isc_rwlock_t *rwl) { isc_rwlock_tryupgrade(isc_rwlock_t *rwl) {
REQUIRE(VALID_RWLOCK(rwl)); REQUIRE(VALID_RWLOCK(rwl));
{ int_fast32_t reader_incr = READER_INCR;
int_fast32_t reader_incr = READER_INCR;
/* Try to acquire write access. */ /* Try to acquire write access. */
atomic_compare_exchange_weak_acq_rel( atomic_compare_exchange_strong_acq_rel(&rwl->cnt_and_flag, &reader_incr,
&rwl->cnt_and_flag, &reader_incr, WRITER_ACTIVE); WRITER_ACTIVE);
/*
* There must have been no writer, and there must have
* been at least one reader.
*/
INSIST((reader_incr & WRITER_ACTIVE) == 0 &&
(reader_incr & ~WRITER_ACTIVE) != 0);
if (reader_incr == READER_INCR) {
/* /*
* There must have been no writer, and there must have * We are the only reader and have been upgraded.
* been at least one reader. * Now jump into the head of the writer waiting queue.
*/ */
INSIST((reader_incr & WRITER_ACTIVE) == 0 && atomic_fetch_sub_release(&rwl->write_completions, 1);
(reader_incr & ~WRITER_ACTIVE) != 0); } else {
return (ISC_R_LOCKBUSY);
if (reader_incr == READER_INCR) {
/*
* We are the only reader and have been upgraded.
* Now jump into the head of the writer waiting queue.
*/
atomic_fetch_sub_release(&rwl->write_completions, 1);
} else {
return (ISC_R_LOCKBUSY);
}
} }
return (ISC_R_SUCCESS); return (ISC_R_SUCCESS);