mirror of
https://gitlab.isc.org/isc-projects/bind9
synced 2025-08-31 06:25:31 +00:00
Make isc_rwlock.c thread-safe
The ThreadSanitizer found several possible data races in our rwlock implementation. This commit changes all the unprotected variables to atomic and also changes the explicit memory ordering (atomic_<foo>_explicit(..., <order>) functions to use our convenience macros (atomic_<foo>_<order>).
This commit is contained in:
152
lib/isc/rwlock.c
152
lib/isc/rwlock.c
@@ -177,11 +177,11 @@ print_lock(const char *operation, isc_rwlock_t *rwl, isc_rwlocktype_t type) {
|
||||
"write_granted=%u, write_quota=%u\n",
|
||||
rwl, isc_thread_self(), operation,
|
||||
(type == isc_rwlocktype_read ? "read" : "write"),
|
||||
atomic_load_explicit(&rwl->write_requests, memory_order_relaxed),
|
||||
atomic_load_explicit(&rwl->write_completions, memory_order_relaxed),
|
||||
atomic_load_explicit(&rwl->cnt_and_flag, memory_order_relaxed),
|
||||
atomic_load_relaxed(&rwl->write_requests),
|
||||
atomic_load_relaxed(&rwl->write_completions),
|
||||
atomic_load_relaxed(&rwl->cnt_and_flag),
|
||||
rwl->readers_waiting,
|
||||
rwl->write_granted, rwl->write_quota);
|
||||
atomic_load_relaxed(&rwl->write_granted), rwl->write_quota);
|
||||
}
|
||||
#endif /* ISC_RWLOCK_TRACE */
|
||||
|
||||
@@ -197,12 +197,12 @@ isc_rwlock_init(isc_rwlock_t *rwl, unsigned int read_quota,
|
||||
*/
|
||||
rwl->magic = 0;
|
||||
|
||||
rwl->spins = 0;
|
||||
atomic_init(&rwl->spins, 0);
|
||||
atomic_init(&rwl->write_requests, 0);
|
||||
atomic_init(&rwl->write_completions, 0);
|
||||
atomic_init(&rwl->cnt_and_flag, 0);
|
||||
rwl->readers_waiting = 0;
|
||||
rwl->write_granted = 0;
|
||||
atomic_init(&rwl->write_granted, 0);
|
||||
if (read_quota != 0) {
|
||||
UNEXPECTED_ERROR(__FILE__, __LINE__,
|
||||
"read quota is not supported");
|
||||
@@ -225,9 +225,9 @@ void
|
||||
isc_rwlock_destroy(isc_rwlock_t *rwl) {
|
||||
REQUIRE(VALID_RWLOCK(rwl));
|
||||
|
||||
REQUIRE(atomic_load_explicit(&rwl->write_requests, memory_order_relaxed) ==
|
||||
atomic_load_explicit(&rwl->write_completions, memory_order_relaxed) &&
|
||||
atomic_load_explicit(&rwl->cnt_and_flag, memory_order_relaxed) == 0 && rwl->readers_waiting == 0);
|
||||
REQUIRE(atomic_load_relaxed(&rwl->write_requests) ==
|
||||
atomic_load_relaxed(&rwl->write_completions) &&
|
||||
atomic_load_relaxed(&rwl->cnt_and_flag) == 0 && rwl->readers_waiting == 0);
|
||||
|
||||
rwl->magic = 0;
|
||||
(void)isc_condition_destroy(&rwl->readable);
|
||||
@@ -311,13 +311,13 @@ isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
|
||||
#endif
|
||||
|
||||
if (type == isc_rwlocktype_read) {
|
||||
if (atomic_load_explicit(&rwl->write_requests, memory_order_relaxed) !=
|
||||
atomic_load_explicit(&rwl->write_completions, memory_order_relaxed))
|
||||
if (atomic_load_relaxed(&rwl->write_requests) !=
|
||||
atomic_load_relaxed(&rwl->write_completions))
|
||||
{
|
||||
/* there is a waiting or active writer */
|
||||
LOCK(&rwl->lock);
|
||||
if (atomic_load_explicit(&rwl->write_requests, memory_order_relaxed) !=
|
||||
atomic_load_explicit(&rwl->write_completions, memory_order_relaxed)) {
|
||||
if (atomic_load_relaxed(&rwl->write_requests) !=
|
||||
atomic_load_relaxed(&rwl->write_completions)) {
|
||||
rwl->readers_waiting++;
|
||||
WAIT(&rwl->readable, &rwl->lock);
|
||||
rwl->readers_waiting--;
|
||||
@@ -325,18 +325,22 @@ isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
|
||||
UNLOCK(&rwl->lock);
|
||||
}
|
||||
|
||||
cntflag = atomic_fetch_add_explicit(&rwl->cnt_and_flag,
|
||||
READER_INCR,
|
||||
memory_order_relaxed);
|
||||
cntflag = atomic_fetch_add_relaxed(&rwl->cnt_and_flag,
|
||||
READER_INCR);
|
||||
POST(cntflag);
|
||||
while (1) {
|
||||
if ((atomic_load_explicit(&rwl->cnt_and_flag, memory_order_relaxed) & WRITER_ACTIVE) == 0)
|
||||
if ((atomic_load_relaxed(&rwl->cnt_and_flag)
|
||||
& WRITER_ACTIVE) == 0)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
/* A writer is still working */
|
||||
LOCK(&rwl->lock);
|
||||
rwl->readers_waiting++;
|
||||
if ((atomic_load_explicit(&rwl->cnt_and_flag, memory_order_relaxed) & WRITER_ACTIVE) != 0) {
|
||||
if ((atomic_load_relaxed(&rwl->cnt_and_flag)
|
||||
& WRITER_ACTIVE) != 0)
|
||||
{
|
||||
WAIT(&rwl->readable, &rwl->lock);
|
||||
}
|
||||
rwl->readers_waiting--;
|
||||
@@ -373,16 +377,19 @@ isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
|
||||
* quota, reset the condition (race among readers doesn't
|
||||
* matter).
|
||||
*/
|
||||
rwl->write_granted = 0;
|
||||
atomic_store_relaxed(&rwl->write_granted, 0);
|
||||
} else {
|
||||
int32_t prev_writer;
|
||||
|
||||
/* enter the waiting queue, and wait for our turn */
|
||||
prev_writer = atomic_fetch_add_explicit(&rwl->write_requests, 1,
|
||||
memory_order_relaxed);
|
||||
while (atomic_load_explicit(&rwl->write_completions, memory_order_relaxed) != prev_writer) {
|
||||
prev_writer = atomic_fetch_add_relaxed(&rwl->write_requests, 1);
|
||||
while (atomic_load_relaxed(&rwl->write_completions)
|
||||
!= prev_writer)
|
||||
{
|
||||
LOCK(&rwl->lock);
|
||||
if (atomic_load_explicit(&rwl->write_completions, memory_order_relaxed) != prev_writer) {
|
||||
if (atomic_load_relaxed(&rwl->write_completions)
|
||||
!= prev_writer)
|
||||
{
|
||||
WAIT(&rwl->writeable, &rwl->lock);
|
||||
UNLOCK(&rwl->lock);
|
||||
continue;
|
||||
@@ -393,23 +400,23 @@ isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
|
||||
|
||||
while (1) {
|
||||
int_fast32_t zero = 0;
|
||||
if (atomic_compare_exchange_strong_explicit
|
||||
(&rwl->cnt_and_flag, &zero, WRITER_ACTIVE,
|
||||
memory_order_relaxed, memory_order_relaxed))
|
||||
if (atomic_compare_exchange_strong_relaxed(
|
||||
&rwl->cnt_and_flag, &zero, WRITER_ACTIVE))
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
/* Another active reader or writer is working. */
|
||||
LOCK(&rwl->lock);
|
||||
if (atomic_load_explicit(&rwl->cnt_and_flag, memory_order_relaxed) != 0) {
|
||||
if (atomic_load_relaxed(&rwl->cnt_and_flag) != 0) {
|
||||
WAIT(&rwl->writeable, &rwl->lock);
|
||||
}
|
||||
UNLOCK(&rwl->lock);
|
||||
}
|
||||
|
||||
INSIST((atomic_load_explicit(&rwl->cnt_and_flag, memory_order_relaxed) & WRITER_ACTIVE));
|
||||
rwl->write_granted++;
|
||||
INSIST((atomic_load_relaxed(&rwl->cnt_and_flag)
|
||||
& WRITER_ACTIVE));
|
||||
atomic_fetch_add_relaxed(&rwl->write_granted, 1);
|
||||
}
|
||||
|
||||
#ifdef ISC_RWLOCK_TRACE
|
||||
@@ -422,12 +429,10 @@ isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
|
||||
isc_result_t
|
||||
isc_rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
|
||||
int32_t cnt = 0;
|
||||
int32_t max_cnt = rwl->spins * 2 + 10;
|
||||
int32_t spins = atomic_load_relaxed(&rwl->spins) * 2 + 10;
|
||||
int32_t max_cnt = ISC_MAX(spins, RWLOCK_MAX_ADAPTIVE_COUNT);
|
||||
isc_result_t result = ISC_R_SUCCESS;
|
||||
|
||||
if (max_cnt > RWLOCK_MAX_ADAPTIVE_COUNT)
|
||||
max_cnt = RWLOCK_MAX_ADAPTIVE_COUNT;
|
||||
|
||||
do {
|
||||
if (cnt++ >= max_cnt) {
|
||||
result = isc__rwlock_lock(rwl, type);
|
||||
@@ -436,7 +441,7 @@ isc_rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
|
||||
isc_rwlock_pause();
|
||||
} while (isc_rwlock_trylock(rwl, type) != ISC_R_SUCCESS);
|
||||
|
||||
rwl->spins += (cnt - rwl->spins) / 8;
|
||||
atomic_fetch_add_relaxed(&rwl->spins, (cnt - spins) / 8);
|
||||
|
||||
return (result);
|
||||
}
|
||||
@@ -453,29 +458,28 @@ isc_rwlock_trylock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
|
||||
|
||||
if (type == isc_rwlocktype_read) {
|
||||
/* If a writer is waiting or working, we fail. */
|
||||
if (atomic_load_explicit(&rwl->write_requests, memory_order_relaxed) !=
|
||||
atomic_load_explicit(&rwl->write_completions, memory_order_relaxed))
|
||||
if (atomic_load_relaxed(&rwl->write_requests) !=
|
||||
atomic_load_relaxed(&rwl->write_completions))
|
||||
return (ISC_R_LOCKBUSY);
|
||||
|
||||
/* Otherwise, be ready for reading. */
|
||||
cntflag = atomic_fetch_add_explicit(&rwl->cnt_and_flag,
|
||||
READER_INCR,
|
||||
memory_order_relaxed);
|
||||
cntflag = atomic_fetch_add_relaxed(&rwl->cnt_and_flag,
|
||||
READER_INCR);
|
||||
if ((cntflag & WRITER_ACTIVE) != 0) {
|
||||
/*
|
||||
* A writer is working. We lose, and cancel the read
|
||||
* request.
|
||||
*/
|
||||
cntflag = atomic_fetch_sub_explicit
|
||||
(&rwl->cnt_and_flag, READER_INCR,
|
||||
memory_order_relaxed);
|
||||
cntflag = atomic_fetch_sub_relaxed(
|
||||
&rwl->cnt_and_flag, READER_INCR);
|
||||
/*
|
||||
* If no other readers are waiting and we've suspended
|
||||
* new writers in this short period, wake them up.
|
||||
*/
|
||||
if (cntflag == READER_INCR &&
|
||||
atomic_load_explicit(&rwl->write_completions, memory_order_relaxed) !=
|
||||
atomic_load_explicit(&rwl->write_requests, memory_order_relaxed)) {
|
||||
atomic_load_relaxed(&rwl->write_completions) !=
|
||||
atomic_load_relaxed(&rwl->write_requests))
|
||||
{
|
||||
LOCK(&rwl->lock);
|
||||
BROADCAST(&rwl->writeable);
|
||||
UNLOCK(&rwl->lock);
|
||||
@@ -486,9 +490,8 @@ isc_rwlock_trylock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
|
||||
} else {
|
||||
/* Try locking without entering the waiting queue. */
|
||||
int_fast32_t zero = 0;
|
||||
if (!atomic_compare_exchange_strong_explicit
|
||||
(&rwl->cnt_and_flag, &zero, WRITER_ACTIVE,
|
||||
memory_order_relaxed, memory_order_relaxed))
|
||||
if (!atomic_compare_exchange_strong_relaxed(
|
||||
&rwl->cnt_and_flag, &zero, WRITER_ACTIVE))
|
||||
{
|
||||
return (ISC_R_LOCKBUSY);
|
||||
}
|
||||
@@ -497,10 +500,8 @@ isc_rwlock_trylock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
|
||||
* XXXJT: jump into the queue, possibly breaking the writer
|
||||
* order.
|
||||
*/
|
||||
atomic_fetch_sub_explicit(&rwl->write_completions, 1,
|
||||
memory_order_relaxed);
|
||||
|
||||
rwl->write_granted++;
|
||||
atomic_fetch_sub_relaxed(&rwl->write_completions, 1);
|
||||
atomic_fetch_add_relaxed(&rwl->write_granted, 1);
|
||||
}
|
||||
|
||||
#ifdef ISC_RWLOCK_TRACE
|
||||
@@ -518,9 +519,8 @@ isc_rwlock_tryupgrade(isc_rwlock_t *rwl) {
|
||||
int_fast32_t reader_incr = READER_INCR;
|
||||
|
||||
/* Try to acquire write access. */
|
||||
atomic_compare_exchange_strong_explicit
|
||||
(&rwl->cnt_and_flag, &reader_incr, WRITER_ACTIVE,
|
||||
memory_order_relaxed, memory_order_relaxed);
|
||||
atomic_compare_exchange_strong_relaxed(
|
||||
&rwl->cnt_and_flag, &reader_incr, WRITER_ACTIVE);
|
||||
/*
|
||||
* There must have been no writer, and there must have
|
||||
* been at least one reader.
|
||||
@@ -533,8 +533,7 @@ isc_rwlock_tryupgrade(isc_rwlock_t *rwl) {
|
||||
* We are the only reader and have been upgraded.
|
||||
* Now jump into the head of the writer waiting queue.
|
||||
*/
|
||||
atomic_fetch_sub_explicit(&rwl->write_completions, 1,
|
||||
memory_order_relaxed);
|
||||
atomic_fetch_sub_relaxed(&rwl->write_completions, 1);
|
||||
} else
|
||||
return (ISC_R_LOCKBUSY);
|
||||
|
||||
@@ -551,17 +550,14 @@ isc_rwlock_downgrade(isc_rwlock_t *rwl) {
|
||||
|
||||
{
|
||||
/* Become an active reader. */
|
||||
prev_readers = atomic_fetch_add_explicit(&rwl->cnt_and_flag,
|
||||
READER_INCR,
|
||||
memory_order_relaxed);
|
||||
prev_readers = atomic_fetch_add_relaxed(&rwl->cnt_and_flag,
|
||||
READER_INCR);
|
||||
/* We must have been a writer. */
|
||||
INSIST((prev_readers & WRITER_ACTIVE) != 0);
|
||||
|
||||
/* Complete write */
|
||||
atomic_fetch_sub_explicit(&rwl->cnt_and_flag, WRITER_ACTIVE,
|
||||
memory_order_relaxed);
|
||||
atomic_fetch_add_explicit(&rwl->write_completions, 1,
|
||||
memory_order_relaxed);
|
||||
atomic_fetch_sub_relaxed(&rwl->cnt_and_flag, WRITER_ACTIVE);
|
||||
atomic_fetch_add_relaxed(&rwl->write_completions, 1);
|
||||
}
|
||||
|
||||
/* Resume other readers */
|
||||
@@ -582,17 +578,16 @@ isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
|
||||
#endif
|
||||
|
||||
if (type == isc_rwlocktype_read) {
|
||||
prev_cnt = atomic_fetch_sub_explicit(&rwl->cnt_and_flag,
|
||||
READER_INCR,
|
||||
memory_order_relaxed);
|
||||
prev_cnt = atomic_fetch_sub_relaxed(&rwl->cnt_and_flag,
|
||||
READER_INCR);
|
||||
/*
|
||||
* If we're the last reader and any writers are waiting, wake
|
||||
* them up. We need to wake up all of them to ensure the
|
||||
* FIFO order.
|
||||
*/
|
||||
if (prev_cnt == READER_INCR &&
|
||||
atomic_load_explicit(&rwl->write_completions, memory_order_relaxed) !=
|
||||
atomic_load_explicit(&rwl->write_requests, memory_order_relaxed)) {
|
||||
atomic_load_relaxed(&rwl->write_completions) !=
|
||||
atomic_load_relaxed(&rwl->write_requests)) {
|
||||
LOCK(&rwl->lock);
|
||||
BROADCAST(&rwl->writeable);
|
||||
UNLOCK(&rwl->lock);
|
||||
@@ -604,15 +599,16 @@ isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
|
||||
* Reset the flag, and (implicitly) tell other writers
|
||||
* we are done.
|
||||
*/
|
||||
atomic_fetch_sub_explicit(&rwl->cnt_and_flag, WRITER_ACTIVE,
|
||||
memory_order_relaxed);
|
||||
atomic_fetch_add_explicit(&rwl->write_completions, 1,
|
||||
memory_order_relaxed);
|
||||
atomic_fetch_sub_relaxed(&rwl->cnt_and_flag, WRITER_ACTIVE);
|
||||
atomic_fetch_add_relaxed(&rwl->write_completions, 1);
|
||||
|
||||
if (rwl->write_granted >= rwl->write_quota ||
|
||||
(atomic_load_explicit(&rwl->write_requests, memory_order_relaxed) ==
|
||||
atomic_load_explicit(&rwl->write_completions, memory_order_relaxed)) ||
|
||||
(atomic_load_explicit(&rwl->cnt_and_flag, memory_order_relaxed) & ~WRITER_ACTIVE)) {
|
||||
if ((atomic_load_relaxed(&rwl->write_granted) >=
|
||||
rwl->write_quota) ||
|
||||
(atomic_load_relaxed(&rwl->write_requests) ==
|
||||
atomic_load_relaxed(&rwl->write_completions)) ||
|
||||
(atomic_load_relaxed(&rwl->cnt_and_flag)
|
||||
& ~WRITER_ACTIVE))
|
||||
{
|
||||
/*
|
||||
* We have passed the write quota, no writer is
|
||||
* waiting, or some readers are almost ready, pending
|
||||
@@ -629,8 +625,8 @@ isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
|
||||
UNLOCK(&rwl->lock);
|
||||
}
|
||||
|
||||
if ((atomic_load_explicit(&rwl->write_requests, memory_order_relaxed) !=
|
||||
atomic_load_explicit(&rwl->write_completions, memory_order_relaxed)) &&
|
||||
if ((atomic_load_relaxed(&rwl->write_requests) !=
|
||||
atomic_load_relaxed(&rwl->write_completions)) &&
|
||||
wakeup_writers) {
|
||||
LOCK(&rwl->lock);
|
||||
BROADCAST(&rwl->writeable);
|
||||
|
Reference in New Issue
Block a user