2
0
mirror of https://gitlab.isc.org/isc-projects/bind9 synced 2025-08-30 05:57:52 +00:00

Replace non-loop usage of atomic_compare_exchange_weak with strong variant

While testing BIND 9 on arm64 8+ core machine, it was discovered that
the weak variants in fact does spuriously fail, we haven't observed that
on other architectures.

This commit replaces all non-loop usage of atomic_compare_exchange_weak
with atomic_compare_exchange_strong.
This commit is contained in:
Ondřej Surý 2020-02-12 09:17:55 +01:00
parent fa68a0d869
commit 4cf275ba8a
2 changed files with 36 additions and 50 deletions

View File

@ -228,8 +228,6 @@ isc_result_t
isc_app_ctxrun(isc_appctx_t *ctx) {
isc_event_t *event, *next_event;
isc_task_t *task;
bool exp_false = false;
bool exp_true = true;
REQUIRE(VALID_APPCTX(ctx));
@ -237,8 +235,8 @@ isc_app_ctxrun(isc_appctx_t *ctx) {
REQUIRE(main_thread == GetCurrentThread());
#endif /* ifdef WIN32 */
if (atomic_compare_exchange_weak_acq_rel(&ctx->running, &exp_false,
true) == true)
if (atomic_compare_exchange_strong_acq_rel(
&ctx->running, &(bool){ false }, true) == true)
{
/*
* Post any on-run events (in FIFO order).
@ -344,9 +342,9 @@ isc_app_ctxrun(isc_appctx_t *ctx) {
}
}
#endif /* WIN32 */
exp_true = true;
if (atomic_compare_exchange_weak_acq_rel(&ctx->want_reload,
&exp_true, false)) {
if (atomic_compare_exchange_strong_acq_rel(
&ctx->want_reload, &(bool){ true }, false))
{
return (ISC_R_RELOAD);
}
@ -363,10 +361,9 @@ isc_app_ctxrun(isc_appctx_t *ctx) {
isc_result_t
isc_app_run(void) {
isc_result_t result;
bool exp_false = false;
REQUIRE(atomic_compare_exchange_weak_acq_rel(&is_running, &exp_false,
true) == true);
REQUIRE(atomic_compare_exchange_strong_acq_rel(
&is_running, &(bool){ false }, true) == true);
result = isc_app_ctxrun(&isc_g_appctx);
atomic_store_release(&is_running, false);
@ -380,8 +377,6 @@ isc_app_isrunning() {
void
isc_app_ctxshutdown(isc_appctx_t *ctx) {
bool exp_false = false;
REQUIRE(VALID_APPCTX(ctx));
REQUIRE(atomic_load_acquire(&ctx->running));
@ -389,8 +384,8 @@ isc_app_ctxshutdown(isc_appctx_t *ctx) {
/* If ctx->shutdown_requested == true, we are already shutting
* down and we want to just bail out.
*/
if (atomic_compare_exchange_weak_acq_rel(&ctx->shutdown_requested,
&exp_false, true))
if (atomic_compare_exchange_strong_acq_rel(&ctx->shutdown_requested,
&(bool){ false }, true))
{
#ifdef WIN32
SetEvent(ctx->hEvents[SHUTDOWN_EVENT]);
@ -480,10 +475,9 @@ isc_app_finish(void) {
void
isc_app_block(void) {
bool exp_false = false;
REQUIRE(atomic_load_acquire(&isc_g_appctx.running));
REQUIRE(atomic_compare_exchange_weak_acq_rel(&isc_g_appctx.blocked,
&exp_false, true));
REQUIRE(atomic_compare_exchange_strong_acq_rel(&isc_g_appctx.blocked,
&(bool){ false }, true));
#ifdef WIN32
blockedthread = GetCurrentThread();
@ -499,11 +493,9 @@ isc_app_block(void) {
void
isc_app_unblock(void) {
bool exp_true = true;
REQUIRE(atomic_load_acquire(&isc_g_appctx.running));
REQUIRE(atomic_compare_exchange_weak_acq_rel(&isc_g_appctx.blocked,
&exp_true, false));
REQUIRE(atomic_compare_exchange_strong_acq_rel(&isc_g_appctx.blocked,
&(bool){ true }, false));
#ifdef WIN32
REQUIRE(blockedthread == GetCurrentThread());

View File

@ -400,14 +400,10 @@ isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
break;
}
while (1) {
int_fast32_t zero = 0;
if (atomic_compare_exchange_weak_acq_rel(
&rwl->cnt_and_flag, &zero, WRITER_ACTIVE))
{
break;
}
while (!atomic_compare_exchange_weak_acq_rel(
&rwl->cnt_and_flag, &(int_fast32_t){ 0 },
WRITER_ACTIVE))
{
/* Another active reader or writer is working. */
LOCK(&rwl->lock);
if (atomic_load_acquire(&rwl->cnt_and_flag) != 0) {
@ -494,8 +490,8 @@ isc_rwlock_trylock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
} else {
/* Try locking without entering the waiting queue. */
int_fast32_t zero = 0;
if (!atomic_compare_exchange_weak_acq_rel(&rwl->cnt_and_flag,
&zero, WRITER_ACTIVE))
if (!atomic_compare_exchange_strong_acq_rel(
&rwl->cnt_and_flag, &zero, WRITER_ACTIVE))
{
return (ISC_R_LOCKBUSY);
}
@ -519,28 +515,26 @@ isc_result_t
isc_rwlock_tryupgrade(isc_rwlock_t *rwl) {
REQUIRE(VALID_RWLOCK(rwl));
{
int_fast32_t reader_incr = READER_INCR;
int_fast32_t reader_incr = READER_INCR;
/* Try to acquire write access. */
atomic_compare_exchange_weak_acq_rel(
&rwl->cnt_and_flag, &reader_incr, WRITER_ACTIVE);
/* Try to acquire write access. */
atomic_compare_exchange_strong_acq_rel(&rwl->cnt_and_flag, &reader_incr,
WRITER_ACTIVE);
/*
* There must have been no writer, and there must have
* been at least one reader.
*/
INSIST((reader_incr & WRITER_ACTIVE) == 0 &&
(reader_incr & ~WRITER_ACTIVE) != 0);
if (reader_incr == READER_INCR) {
/*
* There must have been no writer, and there must have
* been at least one reader.
* We are the only reader and have been upgraded.
* Now jump into the head of the writer waiting queue.
*/
INSIST((reader_incr & WRITER_ACTIVE) == 0 &&
(reader_incr & ~WRITER_ACTIVE) != 0);
if (reader_incr == READER_INCR) {
/*
* We are the only reader and have been upgraded.
* Now jump into the head of the writer waiting queue.
*/
atomic_fetch_sub_release(&rwl->write_completions, 1);
} else {
return (ISC_R_LOCKBUSY);
}
atomic_fetch_sub_release(&rwl->write_completions, 1);
} else {
return (ISC_R_LOCKBUSY);
}
return (ISC_R_SUCCESS);