1998-12-12 20:48:14 +00:00
|
|
|
/*
|
2018-02-23 09:53:12 +01:00
|
|
|
* Copyright (C) Internet Systems Consortium, Inc. ("ISC")
|
2000-08-01 01:33:37 +00:00
|
|
|
*
|
2016-06-27 14:56:38 +10:00
|
|
|
* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
2018-02-23 09:53:12 +01:00
|
|
|
*
|
|
|
|
* See the COPYRIGHT file distributed with this work for additional
|
|
|
|
* information regarding copyright ownership.
|
1998-12-12 20:48:14 +00:00
|
|
|
*/
|
1998-11-12 02:02:52 +00:00
|
|
|
|
2005-04-27 04:57:32 +00:00
|
|
|
|
|
|
|
/*! \file */
|
2000-06-22 22:00:42 +00:00
|
|
|
|
1998-12-12 19:25:20 +00:00
|
|
|
#include <config.h>
|
|
|
|
|
2018-04-17 08:29:14 -07:00
|
|
|
#include <stdbool.h>
|
2000-08-24 23:26:13 +00:00
|
|
|
#include <stddef.h>
|
2018-03-28 14:19:37 +02:00
|
|
|
#include <inttypes.h>
|
2000-08-24 23:26:13 +00:00
|
|
|
|
2005-06-04 05:32:50 +00:00
|
|
|
#include <isc/atomic.h>
|
2000-05-08 14:38:29 +00:00
|
|
|
#include <isc/magic.h>
|
2000-12-06 00:30:32 +00:00
|
|
|
#include <isc/msgs.h>
|
2000-08-29 00:33:36 +00:00
|
|
|
#include <isc/platform.h>
|
2015-05-23 14:21:51 +02:00
|
|
|
#include <isc/print.h>
|
1998-11-12 02:02:52 +00:00
|
|
|
#include <isc/rwlock.h>
|
1999-12-16 22:24:22 +00:00
|
|
|
#include <isc/util.h>
|
1998-11-12 02:02:52 +00:00
|
|
|
|
2001-04-17 14:36:45 +00:00
|
|
|
#define RWLOCK_MAGIC ISC_MAGIC('R', 'W', 'L', 'k')
|
2000-05-08 14:38:29 +00:00
|
|
|
#define VALID_RWLOCK(rwl) ISC_MAGIC_VALID(rwl, RWLOCK_MAGIC)
|
1998-11-12 02:02:52 +00:00
|
|
|
|
2000-07-30 17:57:48 +00:00
|
|
|
#ifndef RWLOCK_DEFAULT_READ_QUOTA
|
|
|
|
#define RWLOCK_DEFAULT_READ_QUOTA 4
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef RWLOCK_DEFAULT_WRITE_QUOTA
|
|
|
|
#define RWLOCK_DEFAULT_WRITE_QUOTA 4
|
|
|
|
#endif
|
|
|
|
|
2016-03-22 17:59:21 -07:00
|
|
|
#ifndef RWLOCK_MAX_ADAPTIVE_COUNT
|
|
|
|
#define RWLOCK_MAX_ADAPTIVE_COUNT 100
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static isc_result_t
|
|
|
|
isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type);
|
|
|
|
|
1998-11-12 23:30:46 +00:00
|
|
|
#ifdef ISC_RWLOCK_TRACE
|
2000-05-08 14:38:29 +00:00
|
|
|
#include <stdio.h> /* Required for fprintf/stderr. */
|
2009-01-18 00:50:21 +00:00
|
|
|
#include <isc/thread.h> /* Required for isc_thread_self(). */
|
2000-05-08 14:38:29 +00:00
|
|
|
|
1998-11-12 02:02:52 +00:00
|
|
|
static void
|
2000-08-24 01:38:46 +00:00
|
|
|
print_lock(const char *operation, isc_rwlock_t *rwl, isc_rwlocktype_t type) {
|
2017-04-22 08:25:10 +05:30
|
|
|
fprintf(stderr,
|
|
|
|
isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
|
|
|
|
ISC_MSG_PRINTLOCK2,
|
|
|
|
"rwlock %p thread %lu %s(%s): "
|
|
|
|
"write_requests=%u, write_completions=%u, "
|
|
|
|
"cnt_and_flag=0x%x, readers_waiting=%u, "
|
|
|
|
"write_granted=%u, write_quota=%u\n"),
|
|
|
|
rwl, isc_thread_self(), operation,
|
|
|
|
(type == isc_rwlocktype_read ?
|
|
|
|
isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
|
|
|
|
ISC_MSG_READ, "read") :
|
|
|
|
isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
|
|
|
|
ISC_MSG_WRITE, "write")),
|
2018-08-17 19:21:12 +02:00
|
|
|
atomic_load_explicit(&rwl->write_requests, memory_order_relaxed),
|
|
|
|
atomic_load_explicit(&rwl->write_completions, memory_order_relaxed),
|
|
|
|
atomic_load_explicit(&rwl->cnt_and_flag, memory_order_relaxed),
|
|
|
|
rwl->readers_waiting,
|
2017-04-22 08:25:10 +05:30
|
|
|
rwl->write_granted, rwl->write_quota);
|
|
|
|
}
|
|
|
|
#endif /* ISC_RWLOCK_TRACE */
|
1998-11-12 02:02:52 +00:00
|
|
|
|
|
|
|
isc_result_t
|
2000-05-08 14:38:29 +00:00
|
|
|
isc_rwlock_init(isc_rwlock_t *rwl, unsigned int read_quota,
|
1998-11-12 22:27:30 +00:00
|
|
|
unsigned int write_quota)
|
|
|
|
{
|
1998-11-12 02:02:52 +00:00
|
|
|
isc_result_t result;
|
|
|
|
|
|
|
|
REQUIRE(rwl != NULL);
|
|
|
|
|
1998-11-12 23:30:46 +00:00
|
|
|
/*
|
|
|
|
* In case there's trouble initializing, we zero magic now. If all
|
|
|
|
* goes well, we'll set it to RWLOCK_MAGIC.
|
|
|
|
*/
|
|
|
|
rwl->magic = 0;
|
|
|
|
|
2016-03-22 17:59:21 -07:00
|
|
|
rwl->spins = 0;
|
2018-08-17 19:21:12 +02:00
|
|
|
atomic_init(&rwl->write_requests, 0);
|
|
|
|
atomic_init(&rwl->write_completions, 0);
|
|
|
|
atomic_init(&rwl->cnt_and_flag, 0);
|
2005-06-04 05:32:50 +00:00
|
|
|
rwl->readers_waiting = 0;
|
|
|
|
rwl->write_granted = 0;
|
|
|
|
if (read_quota != 0) {
|
|
|
|
UNEXPECTED_ERROR(__FILE__, __LINE__,
|
|
|
|
"read quota is not supported");
|
|
|
|
}
|
|
|
|
if (write_quota == 0)
|
|
|
|
write_quota = RWLOCK_DEFAULT_WRITE_QUOTA;
|
|
|
|
rwl->write_quota = write_quota;
|
|
|
|
|
1998-11-12 02:02:52 +00:00
|
|
|
result = isc_mutex_init(&rwl->lock);
|
2005-07-12 01:00:20 +00:00
|
|
|
if (result != ISC_R_SUCCESS)
|
|
|
|
return (result);
|
|
|
|
|
1998-11-12 02:02:52 +00:00
|
|
|
result = isc_condition_init(&rwl->readable);
|
|
|
|
if (result != ISC_R_SUCCESS) {
|
|
|
|
UNEXPECTED_ERROR(__FILE__, __LINE__,
|
2000-12-06 00:30:32 +00:00
|
|
|
"isc_condition_init(readable) %s: %s",
|
|
|
|
isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
|
|
|
|
ISC_MSG_FAILED, "failed"),
|
1998-11-12 02:02:52 +00:00
|
|
|
isc_result_totext(result));
|
2005-03-15 02:03:11 +00:00
|
|
|
result = ISC_R_UNEXPECTED;
|
|
|
|
goto destroy_lock;
|
1998-11-12 02:02:52 +00:00
|
|
|
}
|
|
|
|
result = isc_condition_init(&rwl->writeable);
|
|
|
|
if (result != ISC_R_SUCCESS) {
|
|
|
|
UNEXPECTED_ERROR(__FILE__, __LINE__,
|
2000-12-06 00:30:32 +00:00
|
|
|
"isc_condition_init(writeable) %s: %s",
|
|
|
|
isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
|
|
|
|
ISC_MSG_FAILED, "failed"),
|
1998-11-12 02:02:52 +00:00
|
|
|
isc_result_totext(result));
|
2005-03-15 02:03:11 +00:00
|
|
|
result = ISC_R_UNEXPECTED;
|
|
|
|
goto destroy_rcond;
|
1998-11-12 02:02:52 +00:00
|
|
|
}
|
|
|
|
|
1998-11-12 23:30:46 +00:00
|
|
|
rwl->magic = RWLOCK_MAGIC;
|
|
|
|
|
1998-11-12 02:02:52 +00:00
|
|
|
return (ISC_R_SUCCESS);
|
2005-03-15 02:03:11 +00:00
|
|
|
|
|
|
|
destroy_rcond:
|
|
|
|
(void)isc_condition_destroy(&rwl->readable);
|
|
|
|
destroy_lock:
|
|
|
|
DESTROYLOCK(&rwl->lock);
|
|
|
|
|
|
|
|
return (result);
|
1998-11-12 02:02:52 +00:00
|
|
|
}
|
|
|
|
|
2005-06-04 05:32:50 +00:00
|
|
|
void
|
|
|
|
isc_rwlock_destroy(isc_rwlock_t *rwl) {
|
|
|
|
REQUIRE(VALID_RWLOCK(rwl));
|
|
|
|
|
2018-08-17 19:21:12 +02:00
|
|
|
REQUIRE(atomic_load_explicit(&rwl->write_requests, memory_order_relaxed) ==
|
|
|
|
atomic_load_explicit(&rwl->write_completions, memory_order_relaxed) &&
|
|
|
|
atomic_load_explicit(&rwl->cnt_and_flag, memory_order_relaxed) == 0 && rwl->readers_waiting == 0);
|
2005-06-04 05:32:50 +00:00
|
|
|
|
|
|
|
rwl->magic = 0;
|
|
|
|
(void)isc_condition_destroy(&rwl->readable);
|
|
|
|
(void)isc_condition_destroy(&rwl->writeable);
|
|
|
|
DESTROYLOCK(&rwl->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When some architecture-dependent atomic operations are available,
|
|
|
|
* rwlock can be more efficient than the generic algorithm defined below.
|
|
|
|
* The basic algorithm is described in the following URL:
|
|
|
|
* http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html
|
|
|
|
*
|
|
|
|
* The key is to use the following integer variables modified atomically:
|
|
|
|
* write_requests, write_completions, and cnt_and_flag.
|
|
|
|
*
|
|
|
|
* write_requests and write_completions act as a waiting queue for writers
|
|
|
|
* in order to ensure the FIFO order. Both variables begin with the initial
|
|
|
|
* value of 0. When a new writer tries to get a write lock, it increments
|
|
|
|
* write_requests and gets the previous value of the variable as a "ticket".
|
|
|
|
* When write_completions reaches the ticket number, the new writer can start
|
|
|
|
* writing. When the writer completes its work, it increments
|
|
|
|
* write_completions so that another new writer can start working. If the
|
|
|
|
* write_requests is not equal to write_completions, it means a writer is now
|
|
|
|
* working or waiting. In this case, a new readers cannot start reading, or
|
|
|
|
* in other words, this algorithm basically prefers writers.
|
|
|
|
*
|
|
|
|
* cnt_and_flag is a "lock" shared by all readers and writers. This integer
|
|
|
|
* variable is a kind of structure with two members: writer_flag (1 bit) and
|
|
|
|
* reader_count (31 bits). The writer_flag shows whether a writer is working,
|
|
|
|
* and the reader_count shows the number of readers currently working or almost
|
|
|
|
* ready for working. A writer who has the current "ticket" tries to get the
|
|
|
|
* lock by exclusively setting the writer_flag to 1, provided that the whole
|
|
|
|
* 32-bit is 0 (meaning no readers or writers working). On the other hand,
|
|
|
|
* a new reader tries to increment the "reader_count" field provided that
|
|
|
|
* the writer_flag is 0 (meaning there is no writer working).
|
|
|
|
*
|
|
|
|
* If some of the above operations fail, the reader or the writer sleeps
|
|
|
|
* until the related condition changes. When a working reader or writer
|
|
|
|
* completes its work, some readers or writers are sleeping, and the condition
|
|
|
|
* that suspended the reader or writer has changed, it wakes up the sleeping
|
|
|
|
* readers or writers.
|
|
|
|
*
|
|
|
|
* As already noted, this algorithm basically prefers writers. In order to
|
|
|
|
* prevent readers from starving, however, the algorithm also introduces the
|
|
|
|
* "writer quota" (Q). When Q consecutive writers have completed their work,
|
|
|
|
* suspending readers, the last writer will wake up the readers, even if a new
|
|
|
|
* writer is waiting.
|
|
|
|
*
|
|
|
|
* Implementation specific note: due to the combination of atomic operations
|
|
|
|
* and a mutex lock, ordering between the atomic operation and locks can be
|
|
|
|
* very sensitive in some cases. In particular, it is generally very important
|
|
|
|
* to check the atomic variable that requires a reader or writer to sleep after
|
|
|
|
* locking the mutex and before actually sleeping; otherwise, it could be very
|
|
|
|
* likely to cause a deadlock. For example, assume "var" is a variable
|
|
|
|
* atomically modified, then the corresponding code would be:
|
|
|
|
* if (var == need_sleep) {
|
|
|
|
* LOCK(lock);
|
|
|
|
* if (var == need_sleep)
|
|
|
|
* WAIT(cond, lock);
|
|
|
|
* UNLOCK(lock);
|
|
|
|
* }
|
|
|
|
* The second check is important, since "var" is protected by the atomic
|
|
|
|
* operation, not by the mutex, and can be changed just before sleeping.
|
|
|
|
* (The first "if" could be omitted, but this is also important in order to
|
|
|
|
* make the code efficient by avoiding the use of the mutex unless it is
|
|
|
|
* really necessary.)
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define WRITER_ACTIVE 0x1
|
|
|
|
#define READER_INCR 0x2
|
|
|
|
|
2016-03-22 17:59:21 -07:00
|
|
|
static isc_result_t
|
|
|
|
isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
|
2018-03-28 14:19:37 +02:00
|
|
|
int32_t cntflag;
|
2005-06-04 05:32:50 +00:00
|
|
|
|
|
|
|
REQUIRE(VALID_RWLOCK(rwl));
|
|
|
|
|
|
|
|
#ifdef ISC_RWLOCK_TRACE
|
|
|
|
print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
|
|
|
|
ISC_MSG_PRELOCK, "prelock"), rwl, type);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (type == isc_rwlocktype_read) {
|
2018-08-17 19:21:12 +02:00
|
|
|
if (atomic_load_explicit(&rwl->write_requests, memory_order_relaxed) !=
|
|
|
|
atomic_load_explicit(&rwl->write_completions, memory_order_relaxed))
|
|
|
|
{
|
2005-06-04 05:32:50 +00:00
|
|
|
/* there is a waiting or active writer */
|
|
|
|
LOCK(&rwl->lock);
|
2018-08-17 19:21:12 +02:00
|
|
|
if (atomic_load_explicit(&rwl->write_requests, memory_order_relaxed) !=
|
|
|
|
atomic_load_explicit(&rwl->write_completions, memory_order_relaxed)) {
|
2005-06-04 05:32:50 +00:00
|
|
|
rwl->readers_waiting++;
|
|
|
|
WAIT(&rwl->readable, &rwl->lock);
|
|
|
|
rwl->readers_waiting--;
|
|
|
|
}
|
|
|
|
UNLOCK(&rwl->lock);
|
|
|
|
}
|
|
|
|
|
2017-09-19 15:42:54 +05:30
|
|
|
cntflag = atomic_fetch_add_explicit(&rwl->cnt_and_flag,
|
|
|
|
READER_INCR,
|
|
|
|
memory_order_relaxed);
|
2011-03-11 06:11:27 +00:00
|
|
|
POST(cntflag);
|
2005-06-04 05:32:50 +00:00
|
|
|
while (1) {
|
2018-08-17 19:21:12 +02:00
|
|
|
if ((atomic_load_explicit(&rwl->cnt_and_flag, memory_order_relaxed) & WRITER_ACTIVE) == 0)
|
2005-06-04 05:32:50 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* A writer is still working */
|
|
|
|
LOCK(&rwl->lock);
|
|
|
|
rwl->readers_waiting++;
|
2018-08-17 19:21:12 +02:00
|
|
|
if ((atomic_load_explicit(&rwl->cnt_and_flag, memory_order_relaxed) & WRITER_ACTIVE) != 0) {
|
2005-06-04 05:32:50 +00:00
|
|
|
WAIT(&rwl->readable, &rwl->lock);
|
2018-08-17 19:21:12 +02:00
|
|
|
}
|
2005-06-04 05:32:50 +00:00
|
|
|
rwl->readers_waiting--;
|
|
|
|
UNLOCK(&rwl->lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Typically, the reader should be able to get a lock
|
|
|
|
* at this stage:
|
|
|
|
* (1) there should have been no pending writer when
|
|
|
|
* the reader was trying to increment the
|
|
|
|
* counter; otherwise, the writer should be in
|
|
|
|
* the waiting queue, preventing the reader from
|
|
|
|
* proceeding to this point.
|
|
|
|
* (2) once the reader increments the counter, no
|
|
|
|
* more writer can get a lock.
|
|
|
|
* Still, it is possible another writer can work at
|
|
|
|
* this point, e.g. in the following scenario:
|
|
|
|
* A previous writer unlocks the writer lock.
|
|
|
|
* This reader proceeds to point (1).
|
|
|
|
* A new writer appears, and gets a new lock before
|
|
|
|
* the reader increments the counter.
|
|
|
|
* The reader then increments the counter.
|
|
|
|
* The previous writer notices there is a waiting
|
|
|
|
* reader who is almost ready, and wakes it up.
|
|
|
|
* So, the reader needs to confirm whether it can now
|
|
|
|
* read explicitly (thus we loop). Note that this is
|
|
|
|
* not an infinite process, since the reader has
|
|
|
|
* incremented the counter at this point.
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we are temporarily preferred to writers due to the writer
|
|
|
|
* quota, reset the condition (race among readers doesn't
|
|
|
|
* matter).
|
|
|
|
*/
|
|
|
|
rwl->write_granted = 0;
|
|
|
|
} else {
|
2018-03-28 14:19:37 +02:00
|
|
|
int32_t prev_writer;
|
2005-06-04 05:32:50 +00:00
|
|
|
|
|
|
|
/* enter the waiting queue, and wait for our turn */
|
2017-09-19 15:42:54 +05:30
|
|
|
prev_writer = atomic_fetch_add_explicit(&rwl->write_requests, 1,
|
|
|
|
memory_order_relaxed);
|
2018-08-17 19:21:12 +02:00
|
|
|
while (atomic_load_explicit(&rwl->write_completions, memory_order_relaxed) != prev_writer) {
|
2005-06-04 05:32:50 +00:00
|
|
|
LOCK(&rwl->lock);
|
2018-08-17 19:21:12 +02:00
|
|
|
if (atomic_load_explicit(&rwl->write_completions, memory_order_relaxed) != prev_writer) {
|
2005-06-04 05:32:50 +00:00
|
|
|
WAIT(&rwl->writeable, &rwl->lock);
|
|
|
|
UNLOCK(&rwl->lock);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
UNLOCK(&rwl->lock);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (1) {
|
2017-09-21 11:46:40 +10:00
|
|
|
int_fast32_t cntflag2 = 0;
|
2017-09-19 15:42:54 +05:30
|
|
|
atomic_compare_exchange_strong_explicit
|
|
|
|
(&rwl->cnt_and_flag, &cntflag2, WRITER_ACTIVE,
|
|
|
|
memory_order_relaxed, memory_order_relaxed);
|
|
|
|
|
|
|
|
if (cntflag2 == 0)
|
2005-06-04 05:32:50 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* Another active reader or writer is working. */
|
|
|
|
LOCK(&rwl->lock);
|
2018-08-17 19:21:12 +02:00
|
|
|
if (atomic_load_explicit(&rwl->cnt_and_flag, memory_order_relaxed) != 0) {
|
2005-06-04 05:32:50 +00:00
|
|
|
WAIT(&rwl->writeable, &rwl->lock);
|
2018-08-17 19:21:12 +02:00
|
|
|
}
|
2005-06-04 05:32:50 +00:00
|
|
|
UNLOCK(&rwl->lock);
|
|
|
|
}
|
|
|
|
|
2018-08-17 19:21:12 +02:00
|
|
|
INSIST((atomic_load_explicit(&rwl->cnt_and_flag, memory_order_relaxed) & WRITER_ACTIVE));
|
2005-06-04 05:32:50 +00:00
|
|
|
rwl->write_granted++;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef ISC_RWLOCK_TRACE
|
|
|
|
print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
|
|
|
|
ISC_MSG_POSTLOCK, "postlock"), rwl, type);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return (ISC_R_SUCCESS);
|
|
|
|
}
|
|
|
|
|
2016-03-22 17:59:21 -07:00
|
|
|
isc_result_t
|
|
|
|
isc_rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
|
2018-03-28 14:19:37 +02:00
|
|
|
int32_t cnt = 0;
|
|
|
|
int32_t max_cnt = rwl->spins * 2 + 10;
|
2016-03-22 17:59:21 -07:00
|
|
|
isc_result_t result = ISC_R_SUCCESS;
|
|
|
|
|
|
|
|
if (max_cnt > RWLOCK_MAX_ADAPTIVE_COUNT)
|
|
|
|
max_cnt = RWLOCK_MAX_ADAPTIVE_COUNT;
|
|
|
|
|
|
|
|
do {
|
|
|
|
if (cnt++ >= max_cnt) {
|
|
|
|
result = isc__rwlock_lock(rwl, type);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
#ifdef ISC_PLATFORM_BUSYWAITNOP
|
|
|
|
ISC_PLATFORM_BUSYWAITNOP;
|
|
|
|
#endif
|
|
|
|
} while (isc_rwlock_trylock(rwl, type) != ISC_R_SUCCESS);
|
|
|
|
|
|
|
|
rwl->spins += (cnt - rwl->spins) / 8;
|
|
|
|
|
|
|
|
return (result);
|
|
|
|
}
|
|
|
|
|
2005-06-04 05:32:50 +00:00
|
|
|
isc_result_t
|
|
|
|
isc_rwlock_trylock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
|
2018-03-28 14:19:37 +02:00
|
|
|
int32_t cntflag;
|
2005-06-04 05:32:50 +00:00
|
|
|
|
|
|
|
REQUIRE(VALID_RWLOCK(rwl));
|
|
|
|
|
|
|
|
#ifdef ISC_RWLOCK_TRACE
|
|
|
|
print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
|
|
|
|
ISC_MSG_PRELOCK, "prelock"), rwl, type);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (type == isc_rwlocktype_read) {
|
|
|
|
/* If a writer is waiting or working, we fail. */
|
2018-08-17 19:21:12 +02:00
|
|
|
if (atomic_load_explicit(&rwl->write_requests, memory_order_relaxed) !=
|
|
|
|
atomic_load_explicit(&rwl->write_completions, memory_order_relaxed))
|
2005-06-04 05:32:50 +00:00
|
|
|
return (ISC_R_LOCKBUSY);
|
|
|
|
|
|
|
|
/* Otherwise, be ready for reading. */
|
2017-09-19 15:42:54 +05:30
|
|
|
cntflag = atomic_fetch_add_explicit(&rwl->cnt_and_flag,
|
|
|
|
READER_INCR,
|
|
|
|
memory_order_relaxed);
|
2005-06-04 05:32:50 +00:00
|
|
|
if ((cntflag & WRITER_ACTIVE) != 0) {
|
|
|
|
/*
|
|
|
|
* A writer is working. We lose, and cancel the read
|
|
|
|
* request.
|
|
|
|
*/
|
2017-09-19 15:42:54 +05:30
|
|
|
cntflag = atomic_fetch_sub_explicit
|
|
|
|
(&rwl->cnt_and_flag, READER_INCR,
|
|
|
|
memory_order_relaxed);
|
2005-06-04 05:32:50 +00:00
|
|
|
/*
|
|
|
|
* If no other readers are waiting and we've suspended
|
|
|
|
* new writers in this short period, wake them up.
|
|
|
|
*/
|
|
|
|
if (cntflag == READER_INCR &&
|
2018-08-17 19:21:12 +02:00
|
|
|
atomic_load_explicit(&rwl->write_completions, memory_order_relaxed) !=
|
|
|
|
atomic_load_explicit(&rwl->write_requests, memory_order_relaxed)) {
|
2005-06-04 05:32:50 +00:00
|
|
|
LOCK(&rwl->lock);
|
|
|
|
BROADCAST(&rwl->writeable);
|
|
|
|
UNLOCK(&rwl->lock);
|
|
|
|
}
|
2009-01-18 23:48:14 +00:00
|
|
|
|
2005-06-04 05:32:50 +00:00
|
|
|
return (ISC_R_LOCKBUSY);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* Try locking without entering the waiting queue. */
|
2017-09-21 11:46:40 +10:00
|
|
|
int_fast32_t zero = 0;
|
2017-09-19 15:42:54 +05:30
|
|
|
if (!atomic_compare_exchange_strong_explicit
|
|
|
|
(&rwl->cnt_and_flag, &zero, WRITER_ACTIVE,
|
|
|
|
memory_order_relaxed, memory_order_relaxed))
|
|
|
|
return (ISC_R_LOCKBUSY);
|
2005-06-04 05:32:50 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* XXXJT: jump into the queue, possibly breaking the writer
|
|
|
|
* order.
|
|
|
|
*/
|
2017-09-19 15:42:54 +05:30
|
|
|
atomic_fetch_sub_explicit(&rwl->write_completions, 1,
|
|
|
|
memory_order_relaxed);
|
2005-06-04 05:32:50 +00:00
|
|
|
|
|
|
|
rwl->write_granted++;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef ISC_RWLOCK_TRACE
|
|
|
|
print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
|
|
|
|
ISC_MSG_POSTLOCK, "postlock"), rwl, type);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return (ISC_R_SUCCESS);
|
|
|
|
}
|
|
|
|
|
|
|
|
isc_result_t
|
|
|
|
isc_rwlock_tryupgrade(isc_rwlock_t *rwl) {
|
|
|
|
REQUIRE(VALID_RWLOCK(rwl));
|
|
|
|
|
2017-09-19 15:42:54 +05:30
|
|
|
{
|
2017-09-21 11:46:40 +10:00
|
|
|
int_fast32_t reader_incr = READER_INCR;
|
2005-06-04 05:32:50 +00:00
|
|
|
|
2017-09-19 15:42:54 +05:30
|
|
|
/* Try to acquire write access. */
|
|
|
|
atomic_compare_exchange_strong_explicit
|
|
|
|
(&rwl->cnt_and_flag, &reader_incr, WRITER_ACTIVE,
|
|
|
|
memory_order_relaxed, memory_order_relaxed);
|
2005-06-04 05:32:50 +00:00
|
|
|
/*
|
2017-09-19 15:42:54 +05:30
|
|
|
* There must have been no writer, and there must have
|
|
|
|
* been at least one reader.
|
2005-06-04 05:32:50 +00:00
|
|
|
*/
|
2017-09-19 15:42:54 +05:30
|
|
|
INSIST((reader_incr & WRITER_ACTIVE) == 0 &&
|
|
|
|
(reader_incr & ~WRITER_ACTIVE) != 0);
|
2005-06-04 05:32:50 +00:00
|
|
|
|
2017-09-19 15:42:54 +05:30
|
|
|
if (reader_incr == READER_INCR) {
|
|
|
|
/*
|
|
|
|
* We are the only reader and have been upgraded.
|
|
|
|
* Now jump into the head of the writer waiting queue.
|
|
|
|
*/
|
|
|
|
atomic_fetch_sub_explicit(&rwl->write_completions, 1,
|
|
|
|
memory_order_relaxed);
|
|
|
|
} else
|
|
|
|
return (ISC_R_LOCKBUSY);
|
2009-01-18 23:48:14 +00:00
|
|
|
|
2017-09-19 15:42:54 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
return (ISC_R_SUCCESS);
|
2005-06-04 05:32:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
isc_rwlock_downgrade(isc_rwlock_t *rwl) {
|
2018-03-28 14:19:37 +02:00
|
|
|
int32_t prev_readers;
|
2005-06-04 05:32:50 +00:00
|
|
|
|
|
|
|
REQUIRE(VALID_RWLOCK(rwl));
|
|
|
|
|
2017-09-19 15:42:54 +05:30
|
|
|
{
|
|
|
|
/* Become an active reader. */
|
|
|
|
prev_readers = atomic_fetch_add_explicit(&rwl->cnt_and_flag,
|
|
|
|
READER_INCR,
|
|
|
|
memory_order_relaxed);
|
|
|
|
/* We must have been a writer. */
|
|
|
|
INSIST((prev_readers & WRITER_ACTIVE) != 0);
|
|
|
|
|
|
|
|
/* Complete write */
|
|
|
|
atomic_fetch_sub_explicit(&rwl->cnt_and_flag, WRITER_ACTIVE,
|
|
|
|
memory_order_relaxed);
|
|
|
|
atomic_fetch_add_explicit(&rwl->write_completions, 1,
|
|
|
|
memory_order_relaxed);
|
|
|
|
}
|
2005-06-04 05:32:50 +00:00
|
|
|
|
|
|
|
/* Resume other readers */
|
|
|
|
LOCK(&rwl->lock);
|
|
|
|
if (rwl->readers_waiting > 0)
|
|
|
|
BROADCAST(&rwl->readable);
|
|
|
|
UNLOCK(&rwl->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
isc_result_t
|
|
|
|
isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
|
2018-03-28 14:19:37 +02:00
|
|
|
int32_t prev_cnt;
|
2005-06-04 05:32:50 +00:00
|
|
|
|
|
|
|
REQUIRE(VALID_RWLOCK(rwl));
|
|
|
|
|
|
|
|
#ifdef ISC_RWLOCK_TRACE
|
|
|
|
print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
|
|
|
|
ISC_MSG_PREUNLOCK, "preunlock"), rwl, type);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (type == isc_rwlocktype_read) {
|
2017-09-19 15:42:54 +05:30
|
|
|
prev_cnt = atomic_fetch_sub_explicit(&rwl->cnt_and_flag,
|
|
|
|
READER_INCR,
|
|
|
|
memory_order_relaxed);
|
2005-06-04 05:32:50 +00:00
|
|
|
/*
|
|
|
|
* If we're the last reader and any writers are waiting, wake
|
|
|
|
* them up. We need to wake up all of them to ensure the
|
|
|
|
* FIFO order.
|
|
|
|
*/
|
|
|
|
if (prev_cnt == READER_INCR &&
|
2018-08-17 19:21:12 +02:00
|
|
|
atomic_load_explicit(&rwl->write_completions, memory_order_relaxed) !=
|
|
|
|
atomic_load_explicit(&rwl->write_requests, memory_order_relaxed)) {
|
2005-06-04 05:32:50 +00:00
|
|
|
LOCK(&rwl->lock);
|
|
|
|
BROADCAST(&rwl->writeable);
|
|
|
|
UNLOCK(&rwl->lock);
|
|
|
|
}
|
|
|
|
} else {
|
2018-04-17 08:29:14 -07:00
|
|
|
bool wakeup_writers = true;
|
2005-06-04 05:32:50 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Reset the flag, and (implicitly) tell other writers
|
|
|
|
* we are done.
|
|
|
|
*/
|
2017-09-19 15:42:54 +05:30
|
|
|
atomic_fetch_sub_explicit(&rwl->cnt_and_flag, WRITER_ACTIVE,
|
|
|
|
memory_order_relaxed);
|
|
|
|
atomic_fetch_add_explicit(&rwl->write_completions, 1,
|
|
|
|
memory_order_relaxed);
|
2005-06-04 05:32:50 +00:00
|
|
|
|
|
|
|
if (rwl->write_granted >= rwl->write_quota ||
|
2018-08-17 19:21:12 +02:00
|
|
|
(atomic_load_explicit(&rwl->write_requests, memory_order_relaxed) ==
|
|
|
|
atomic_load_explicit(&rwl->write_completions, memory_order_relaxed)) ||
|
|
|
|
(atomic_load_explicit(&rwl->cnt_and_flag, memory_order_relaxed) & ~WRITER_ACTIVE)) {
|
2005-06-04 05:32:50 +00:00
|
|
|
/*
|
|
|
|
* We have passed the write quota, no writer is
|
|
|
|
* waiting, or some readers are almost ready, pending
|
|
|
|
* possible writers. Note that the last case can
|
|
|
|
* happen even if write_requests != write_completions
|
|
|
|
* (which means a new writer in the queue), so we need
|
|
|
|
* to catch the case explicitly.
|
|
|
|
*/
|
|
|
|
LOCK(&rwl->lock);
|
|
|
|
if (rwl->readers_waiting > 0) {
|
2018-04-17 08:29:14 -07:00
|
|
|
wakeup_writers = false;
|
2005-06-04 05:32:50 +00:00
|
|
|
BROADCAST(&rwl->readable);
|
|
|
|
}
|
|
|
|
UNLOCK(&rwl->lock);
|
|
|
|
}
|
|
|
|
|
2018-08-17 19:21:12 +02:00
|
|
|
if ((atomic_load_explicit(&rwl->write_requests, memory_order_relaxed) !=
|
|
|
|
atomic_load_explicit(&rwl->write_completions, memory_order_relaxed)) &&
|
2005-06-04 05:32:50 +00:00
|
|
|
wakeup_writers) {
|
|
|
|
LOCK(&rwl->lock);
|
|
|
|
BROADCAST(&rwl->writeable);
|
|
|
|
UNLOCK(&rwl->lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef ISC_RWLOCK_TRACE
|
|
|
|
print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
|
|
|
|
ISC_MSG_POSTUNLOCK, "postunlock"),
|
|
|
|
rwl, type);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return (ISC_R_SUCCESS);
|
|
|
|
}
|