2
0
mirror of https://gitlab.isc.org/isc-projects/bind9 synced 2025-08-28 13:08:06 +00:00

Remove isc_atomic usage from rwlock.c and stats.c

This commit is contained in:
Ondřej Surý 2018-08-14 11:42:06 +02:00
parent e119de4169
commit e9e55cbd03
4 changed files with 23 additions and 759 deletions

View File

@ -9,9 +9,7 @@
* information regarding copyright ownership.
*/
#ifndef ISC_REFCOUNT_H
#define ISC_REFCOUNT_H 1
#pragma once
#include <inttypes.h>
@ -23,10 +21,6 @@
#include <isc/platform.h>
#include <isc/types.h>
#if defined(ISC_PLATFORM_HAVESTDATOMIC)
#include <stdatomic.h>
#endif
/*! \file isc/refcount.h
* \brief Implements a locked reference counter.
*
@ -94,33 +88,26 @@ ISC_LANG_BEGINDECLS
/*
* Sample implementations
*/
#if (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_INT_LOCK_FREE)) || defined(ISC_PLATFORM_HAVEXADD)
#define ISC_REFCOUNT_HAVEATOMIC 1
#if (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_INT_LOCK_FREE))
#define ISC_REFCOUNT_HAVESTDATOMIC 1
#endif
typedef struct isc_refcount {
#if defined(ISC_REFCOUNT_HAVESTDATOMIC)
atomic_int_fast32_t refs;
#else
int32_t refs;
#endif
} isc_refcount_t;
#if defined(ISC_REFCOUNT_HAVESTDATOMIC)
#define isc_refcount_init(rp, n) \
atomic_init(&(rp)->refs, n)
#define isc_refcount_current(rp) \
((unsigned int)(atomic_load_explicit(&(rp)->refs, \
memory_order_relaxed)))
#define isc_refcount_destroy(rp) ISC_REQUIRE(isc_refcount_current(rp) == 0)
#define isc_refcount_current(rp) \
atomic_load_explicit(&(rp)->refs, memory_order_relaxed)
#define isc_refcount_destroy(rp) \
ISC_REQUIRE(isc_refcount_current(rp) == 0)
#define isc_refcount_increment0(rp, tp) \
do { \
unsigned int *_tmp = (unsigned int *)(tp); \
int32_t prev; \
int32_t prev; \
prev = atomic_fetch_add_explicit \
(&(rp)->refs, 1, memory_order_relaxed); \
(&(rp)->refs, 1, memory_order_relaxed); \
if (_tmp != NULL) \
*_tmp = prev + 1; \
} while (0)
@ -128,9 +115,9 @@ typedef struct isc_refcount {
#define isc_refcount_increment(rp, tp) \
do { \
unsigned int *_tmp = (unsigned int *)(tp); \
int32_t prev; \
int32_t prev; \
prev = atomic_fetch_add_explicit \
(&(rp)->refs, 1, memory_order_relaxed); \
(&(rp)->refs, 1, memory_order_relaxed); \
ISC_REQUIRE(prev > 0); \
if (_tmp != NULL) \
*_tmp = prev + 1; \
@ -139,7 +126,7 @@ typedef struct isc_refcount {
#define isc_refcount_decrement(rp, tp) \
do { \
unsigned int *_tmp = (unsigned int *)(tp); \
int32_t prev; \
int32_t prev; \
prev = atomic_fetch_sub_explicit \
(&(rp)->refs, 1, memory_order_relaxed); \
ISC_REQUIRE(prev > 0); \
@ -147,115 +134,4 @@ typedef struct isc_refcount {
*_tmp = prev - 1; \
} while (0)
#else /* ISC_REFCOUNT_HAVESTDATOMIC */
#define isc_refcount_current(rp) \
((unsigned int)(isc_atomic_xadd(&(rp)->refs, 0)))
#define isc_refcount_destroy(rp) ISC_REQUIRE(isc_refcount_current(rp) == 0)
#define isc_refcount_increment0(rp, tp) \
do { \
unsigned int *_tmp = (unsigned int *)(tp); \
int32_t prev; \
prev = isc_atomic_xadd(&(rp)->refs, 1); \
if (_tmp != NULL) \
*_tmp = prev + 1; \
} while (0)
#define isc_refcount_increment(rp, tp) \
do { \
unsigned int *_tmp = (unsigned int *)(tp); \
int32_t prev; \
prev = isc_atomic_xadd(&(rp)->refs, 1); \
ISC_REQUIRE(prev > 0); \
if (_tmp != NULL) \
*_tmp = prev + 1; \
} while (0)
#define isc_refcount_decrement(rp, tp) \
do { \
unsigned int *_tmp = (unsigned int *)(tp); \
int32_t prev; \
prev = isc_atomic_xadd(&(rp)->refs, -1); \
ISC_REQUIRE(prev > 0); \
if (_tmp != NULL) \
*_tmp = prev - 1; \
} while (0)
#endif /* ISC_REFCOUNT_HAVESTDATOMIC */
#else /* ISC_PLATFORM_HAVEXADD */
typedef struct isc_refcount {
int refs;
isc_mutex_t lock;
} isc_refcount_t;
/*% Destroys a reference counter. */
#define isc_refcount_destroy(rp) \
do { \
isc_result_t _result; \
ISC_REQUIRE((rp)->refs == 0); \
_result = isc_mutex_destroy(&(rp)->lock); \
ISC_ERROR_RUNTIMECHECK(_result == ISC_R_SUCCESS); \
} while (0)
#define isc_refcount_current(rp) ((unsigned int)((rp)->refs))
/*%
* Increments the reference count, returning the new value in
* 'tp' if it's not NULL.
*/
#define isc_refcount_increment0(rp, tp) \
do { \
isc_result_t _result; \
unsigned int *_tmp = (unsigned int *)(tp); \
_result = isc_mutex_lock(&(rp)->lock); \
ISC_ERROR_RUNTIMECHECK(_result == ISC_R_SUCCESS); \
++((rp)->refs); \
if (_tmp != NULL) \
*_tmp = ((rp)->refs); \
_result = isc_mutex_unlock(&(rp)->lock); \
ISC_ERROR_RUNTIMECHECK(_result == ISC_R_SUCCESS); \
} while (0)
#define isc_refcount_increment(rp, tp) \
do { \
isc_result_t _result; \
unsigned int *_tmp = (unsigned int *)(tp); \
_result = isc_mutex_lock(&(rp)->lock); \
ISC_ERROR_RUNTIMECHECK(_result == ISC_R_SUCCESS); \
ISC_REQUIRE((rp)->refs > 0); \
++((rp)->refs); \
if (_tmp != NULL) \
*_tmp = ((rp)->refs); \
_result = isc_mutex_unlock(&(rp)->lock); \
ISC_ERROR_RUNTIMECHECK(_result == ISC_R_SUCCESS); \
} while (0)
/*%
* Decrements the reference count, returning the new value in 'tp'
* if it's not NULL.
*/
#define isc_refcount_decrement(rp, tp) \
do { \
isc_result_t _result; \
unsigned int *_tmp = (unsigned int *)(tp); \
_result = isc_mutex_lock(&(rp)->lock); \
ISC_ERROR_RUNTIMECHECK(_result == ISC_R_SUCCESS); \
ISC_REQUIRE((rp)->refs > 0); \
--((rp)->refs); \
if (_tmp != NULL) \
*_tmp = ((rp)->refs); \
_result = isc_mutex_unlock(&(rp)->lock); \
ISC_ERROR_RUNTIMECHECK(_result == ISC_R_SUCCESS); \
} while (0)
#endif /* (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_INT_LOCK_FREE)) || defined(ISC_PLATFORM_HAVEXADD) */
isc_result_t
isc_refcount_init(isc_refcount_t *ref, unsigned int n);
ISC_LANG_ENDDECLS
#endif /* ISC_REFCOUNT_H */

View File

@ -17,15 +17,12 @@
/*! \file isc/rwlock.h */
#include <isc/atomic.h>
#include <isc/condition.h>
#include <isc/lang.h>
#include <isc/platform.h>
#include <isc/types.h>
#if defined(ISC_PLATFORM_HAVESTDATOMIC)
#include <stdatomic.h>
#endif
ISC_LANG_BEGINDECLS
typedef enum {
@ -34,20 +31,12 @@ typedef enum {
isc_rwlocktype_write
} isc_rwlocktype_t;
#if (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_INT_LOCK_FREE)) || (defined(ISC_PLATFORM_HAVEXADD) && defined(ISC_PLATFORM_HAVECMPXCHG))
#define ISC_RWLOCK_USEATOMIC 1
#if (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_INT_LOCK_FREE))
#define ISC_RWLOCK_USESTDATOMIC 1
#endif
#endif
struct isc_rwlock {
/* Unlocked. */
unsigned int magic;
isc_mutex_t lock;
int32_t spins;
#if defined(ISC_RWLOCK_USEATOMIC)
/*
* When some atomic instructions with hardware assistance are
* available, rwlock will use those so that concurrent readers do not
@ -62,15 +51,9 @@ struct isc_rwlock {
*/
/* Read or modified atomically. */
#if defined(ISC_RWLOCK_USESTDATOMIC)
atomic_int_fast32_t write_requests;
atomic_int_fast32_t write_completions;
atomic_int_fast32_t cnt_and_flag;
#else
int32_t write_requests;
int32_t write_completions;
int32_t cnt_and_flag;
#endif
/* Locked by lock. */
isc_condition_t readable;
@ -83,29 +66,6 @@ struct isc_rwlock {
/* Unlocked. */
unsigned int write_quota;
#else /* ISC_RWLOCK_USEATOMIC */
/*%< Locked by lock. */
isc_condition_t readable;
isc_condition_t writeable;
isc_rwlocktype_t type;
/*% The number of threads that have the lock. */
unsigned int active;
/*%
* The number of lock grants made since the lock was last switched
* from reading to writing or vice versa; used in determining
* when the quota is reached and it is time to switch.
*/
unsigned int granted;
unsigned int readers_waiting;
unsigned int writers_waiting;
unsigned int read_quota;
unsigned int write_quota;
isc_rwlocktype_t original;
#endif /* ISC_RWLOCK_USEATOMIC */
};
isc_result_t

View File

@ -41,10 +41,8 @@
#define RWLOCK_MAX_ADAPTIVE_COUNT 100
#endif
#if defined(ISC_RWLOCK_USEATOMIC)
static isc_result_t
isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type);
#endif
#ifdef ISC_RWLOCK_TRACE
#include <stdio.h> /* Required for fprintf/stderr. */
@ -52,7 +50,6 @@ isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type);
static void
print_lock(const char *operation, isc_rwlock_t *rwl, isc_rwlocktype_t type) {
#if defined(ISC_RWLOCK_USEATOMIC)
fprintf(stderr,
isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
ISC_MSG_PRINTLOCK2,
@ -69,26 +66,6 @@ print_lock(const char *operation, isc_rwlock_t *rwl, isc_rwlocktype_t type) {
rwl->write_requests, rwl->write_completions,
rwl->cnt_and_flag, rwl->readers_waiting,
rwl->write_granted, rwl->write_quota);
#else
fprintf(stderr,
isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
ISC_MSG_PRINTLOCK,
"rwlock %p thread %lu %s(%s): %s, %u active, "
"%u granted, %u rwaiting, %u wwaiting\n"),
rwl, isc_thread_self(), operation,
(type == isc_rwlocktype_read ?
isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
ISC_MSG_READ, "read") :
isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
ISC_MSG_WRITE, "write")),
(rwl->type == isc_rwlocktype_read ?
isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
ISC_MSG_READING, "reading") :
isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
ISC_MSG_WRITING, "writing")),
rwl->active, rwl->granted,
rwl->readers_waiting, rwl->writers_waiting);
#endif
}
#endif /* ISC_RWLOCK_TRACE */
@ -107,7 +84,6 @@ isc_rwlock_init(isc_rwlock_t *rwl, unsigned int read_quota,
rwl->magic = 0;
rwl->spins = 0;
#if defined(ISC_RWLOCK_USEATOMIC)
rwl->write_requests = 0;
rwl->write_completions = 0;
rwl->cnt_and_flag = 0;
@ -120,20 +96,6 @@ isc_rwlock_init(isc_rwlock_t *rwl, unsigned int read_quota,
if (write_quota == 0)
write_quota = RWLOCK_DEFAULT_WRITE_QUOTA;
rwl->write_quota = write_quota;
#else
rwl->type = isc_rwlocktype_read;
rwl->original = isc_rwlocktype_none;
rwl->active = 0;
rwl->granted = 0;
rwl->readers_waiting = 0;
rwl->writers_waiting = 0;
if (read_quota == 0)
read_quota = RWLOCK_DEFAULT_READ_QUOTA;
rwl->read_quota = read_quota;
if (write_quota == 0)
write_quota = RWLOCK_DEFAULT_WRITE_QUOTA;
rwl->write_quota = write_quota;
#endif
result = isc_mutex_init(&rwl->lock);
if (result != ISC_R_SUCCESS)
@ -176,16 +138,8 @@ void
isc_rwlock_destroy(isc_rwlock_t *rwl) {
REQUIRE(VALID_RWLOCK(rwl));
#if defined(ISC_RWLOCK_USEATOMIC)
REQUIRE(rwl->write_requests == rwl->write_completions &&
rwl->cnt_and_flag == 0 && rwl->readers_waiting == 0);
#else
LOCK(&rwl->lock);
REQUIRE(rwl->active == 0 &&
rwl->readers_waiting == 0 &&
rwl->writers_waiting == 0);
UNLOCK(&rwl->lock);
#endif
rwl->magic = 0;
(void)isc_condition_destroy(&rwl->readable);
@ -193,8 +147,6 @@ isc_rwlock_destroy(isc_rwlock_t *rwl) {
DESTROYLOCK(&rwl->lock);
}
#if defined(ISC_RWLOCK_USEATOMIC)
/*
* When some architecture-dependent atomic operations are available,
* rwlock can be more efficient than the generic algorithm defined below.
@ -283,13 +235,9 @@ isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
UNLOCK(&rwl->lock);
}
#if defined(ISC_RWLOCK_USESTDATOMIC)
cntflag = atomic_fetch_add_explicit(&rwl->cnt_and_flag,
READER_INCR,
memory_order_relaxed);
#else
cntflag = isc_atomic_xadd(&rwl->cnt_and_flag, READER_INCR);
#endif
POST(cntflag);
while (1) {
if ((rwl->cnt_and_flag & WRITER_ACTIVE) == 0)
@ -339,12 +287,8 @@ isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
int32_t prev_writer;
/* enter the waiting queue, and wait for our turn */
#if defined(ISC_RWLOCK_USESTDATOMIC)
prev_writer = atomic_fetch_add_explicit(&rwl->write_requests, 1,
memory_order_relaxed);
#else
prev_writer = isc_atomic_xadd(&rwl->write_requests, 1);
#endif
while (rwl->write_completions != prev_writer) {
LOCK(&rwl->lock);
if (rwl->write_completions != prev_writer) {
@ -357,16 +301,10 @@ isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
}
while (1) {
#if defined(ISC_RWLOCK_USESTDATOMIC)
int_fast32_t cntflag2 = 0;
atomic_compare_exchange_strong_explicit
(&rwl->cnt_and_flag, &cntflag2, WRITER_ACTIVE,
memory_order_relaxed, memory_order_relaxed);
#else
int32_t cntflag2;
cntflag2 = isc_atomic_cmpxchg(&rwl->cnt_and_flag, 0,
WRITER_ACTIVE);
#endif
if (cntflag2 == 0)
break;
@ -431,26 +369,17 @@ isc_rwlock_trylock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
return (ISC_R_LOCKBUSY);
/* Otherwise, be ready for reading. */
#if defined(ISC_RWLOCK_USESTDATOMIC)
cntflag = atomic_fetch_add_explicit(&rwl->cnt_and_flag,
READER_INCR,
memory_order_relaxed);
#else
cntflag = isc_atomic_xadd(&rwl->cnt_and_flag, READER_INCR);
#endif
if ((cntflag & WRITER_ACTIVE) != 0) {
/*
* A writer is working. We lose, and cancel the read
* request.
*/
#if defined(ISC_RWLOCK_USESTDATOMIC)
cntflag = atomic_fetch_sub_explicit
(&rwl->cnt_and_flag, READER_INCR,
memory_order_relaxed);
#else
cntflag = isc_atomic_xadd(&rwl->cnt_and_flag,
-READER_INCR);
#endif
/*
* If no other readers are waiting and we've suspended
* new writers in this short period, wake them up.
@ -466,29 +395,18 @@ isc_rwlock_trylock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
}
} else {
/* Try locking without entering the waiting queue. */
#if defined(ISC_RWLOCK_USESTDATOMIC)
int_fast32_t zero = 0;
if (!atomic_compare_exchange_strong_explicit
(&rwl->cnt_and_flag, &zero, WRITER_ACTIVE,
memory_order_relaxed, memory_order_relaxed))
return (ISC_R_LOCKBUSY);
#else
cntflag = isc_atomic_cmpxchg(&rwl->cnt_and_flag, 0,
WRITER_ACTIVE);
if (cntflag != 0)
return (ISC_R_LOCKBUSY);
#endif
/*
* XXXJT: jump into the queue, possibly breaking the writer
* order.
*/
#if defined(ISC_RWLOCK_USESTDATOMIC)
atomic_fetch_sub_explicit(&rwl->write_completions, 1,
memory_order_relaxed);
#else
(void)isc_atomic_xadd(&rwl->write_completions, -1);
#endif
rwl->write_granted++;
}
@ -505,7 +423,6 @@ isc_result_t
isc_rwlock_tryupgrade(isc_rwlock_t *rwl) {
REQUIRE(VALID_RWLOCK(rwl));
#if defined(ISC_RWLOCK_USESTDATOMIC)
{
int_fast32_t reader_incr = READER_INCR;
@ -531,30 +448,6 @@ isc_rwlock_tryupgrade(isc_rwlock_t *rwl) {
return (ISC_R_LOCKBUSY);
}
#else
{
int32_t prevcnt;
/* Try to acquire write access. */
prevcnt = isc_atomic_cmpxchg(&rwl->cnt_and_flag,
READER_INCR, WRITER_ACTIVE);
/*
* There must have been no writer, and there must have
* been at least one reader.
*/
INSIST((prevcnt & WRITER_ACTIVE) == 0 &&
(prevcnt & ~WRITER_ACTIVE) != 0);
if (prevcnt == READER_INCR) {
/*
* We are the only reader and have been upgraded.
* Now jump into the head of the writer waiting queue.
*/
(void)isc_atomic_xadd(&rwl->write_completions, -1);
} else
return (ISC_R_LOCKBUSY);
}
#endif
return (ISC_R_SUCCESS);
}
@ -565,7 +458,6 @@ isc_rwlock_downgrade(isc_rwlock_t *rwl) {
REQUIRE(VALID_RWLOCK(rwl));
#if defined(ISC_RWLOCK_USESTDATOMIC)
{
/* Become an active reader. */
prev_readers = atomic_fetch_add_explicit(&rwl->cnt_and_flag,
@ -580,18 +472,6 @@ isc_rwlock_downgrade(isc_rwlock_t *rwl) {
atomic_fetch_add_explicit(&rwl->write_completions, 1,
memory_order_relaxed);
}
#else
{
/* Become an active reader. */
prev_readers = isc_atomic_xadd(&rwl->cnt_and_flag, READER_INCR);
/* We must have been a writer. */
INSIST((prev_readers & WRITER_ACTIVE) != 0);
/* Complete write */
(void)isc_atomic_xadd(&rwl->cnt_and_flag, -WRITER_ACTIVE);
(void)isc_atomic_xadd(&rwl->write_completions, 1);
}
#endif
/* Resume other readers */
LOCK(&rwl->lock);
@ -612,13 +492,9 @@ isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
#endif
if (type == isc_rwlocktype_read) {
#if defined(ISC_RWLOCK_USESTDATOMIC)
prev_cnt = atomic_fetch_sub_explicit(&rwl->cnt_and_flag,
READER_INCR,
memory_order_relaxed);
#else
prev_cnt = isc_atomic_xadd(&rwl->cnt_and_flag, -READER_INCR);
#endif
/*
* If we're the last reader and any writers are waiting, wake
* them up. We need to wake up all of them to ensure the
@ -637,15 +513,10 @@ isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
* Reset the flag, and (implicitly) tell other writers
* we are done.
*/
#if defined(ISC_RWLOCK_USESTDATOMIC)
atomic_fetch_sub_explicit(&rwl->cnt_and_flag, WRITER_ACTIVE,
memory_order_relaxed);
atomic_fetch_add_explicit(&rwl->write_completions, 1,
memory_order_relaxed);
#else
(void)isc_atomic_xadd(&rwl->cnt_and_flag, -WRITER_ACTIVE);
(void)isc_atomic_xadd(&rwl->write_completions, 1);
#endif
if (rwl->write_granted >= rwl->write_quota ||
rwl->write_requests == rwl->write_completions ||
@ -682,211 +553,3 @@ isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
return (ISC_R_SUCCESS);
}
#else /* ISC_RWLOCK_USEATOMIC */
static isc_result_t
doit(isc_rwlock_t *rwl, isc_rwlocktype_t type, bool nonblock) {
bool skip = false;
bool done = false;
isc_result_t result = ISC_R_SUCCESS;
REQUIRE(VALID_RWLOCK(rwl));
LOCK(&rwl->lock);
#ifdef ISC_RWLOCK_TRACE
print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
ISC_MSG_PRELOCK, "prelock"), rwl, type);
#endif
if (type == isc_rwlocktype_read) {
if (rwl->readers_waiting != 0)
skip = true;
while (!done) {
if (!skip &&
((rwl->active == 0 ||
(rwl->type == isc_rwlocktype_read &&
(rwl->writers_waiting == 0 ||
rwl->granted < rwl->read_quota)))))
{
rwl->type = isc_rwlocktype_read;
rwl->active++;
rwl->granted++;
done = true;
} else if (nonblock) {
result = ISC_R_LOCKBUSY;
done = true;
} else {
skip = false;
rwl->readers_waiting++;
WAIT(&rwl->readable, &rwl->lock);
rwl->readers_waiting--;
}
}
} else {
if (rwl->writers_waiting != 0)
skip = true;
while (!done) {
if (!skip && rwl->active == 0) {
rwl->type = isc_rwlocktype_write;
rwl->active = 1;
rwl->granted++;
done = true;
} else if (nonblock) {
result = ISC_R_LOCKBUSY;
done = true;
} else {
skip = false;
rwl->writers_waiting++;
WAIT(&rwl->writeable, &rwl->lock);
rwl->writers_waiting--;
}
}
}
#ifdef ISC_RWLOCK_TRACE
print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
ISC_MSG_POSTLOCK, "postlock"), rwl, type);
#endif
UNLOCK(&rwl->lock);
return (result);
}
isc_result_t
isc_rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
int32_t cnt = 0;
int32_t max_cnt = rwl->spins * 2 + 10;
isc_result_t result = ISC_R_SUCCESS;
if (max_cnt > RWLOCK_MAX_ADAPTIVE_COUNT)
max_cnt = RWLOCK_MAX_ADAPTIVE_COUNT;
do {
if (cnt++ >= max_cnt) {
result = doit(rwl, type, false);
break;
}
#ifdef ISC_PLATFORM_BUSYWAITNOP
ISC_PLATFORM_BUSYWAITNOP;
#endif
} while (doit(rwl, type, true) != ISC_R_SUCCESS);
rwl->spins += (cnt - rwl->spins) / 8;
return (result);
}
isc_result_t
isc_rwlock_trylock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
return (doit(rwl, type, true));
}
isc_result_t
isc_rwlock_tryupgrade(isc_rwlock_t *rwl) {
isc_result_t result = ISC_R_SUCCESS;
REQUIRE(VALID_RWLOCK(rwl));
LOCK(&rwl->lock);
REQUIRE(rwl->type == isc_rwlocktype_read);
REQUIRE(rwl->active != 0);
/* If we are the only reader then succeed. */
if (rwl->active == 1) {
rwl->original = (rwl->original == isc_rwlocktype_none) ?
isc_rwlocktype_read : isc_rwlocktype_none;
rwl->type = isc_rwlocktype_write;
} else
result = ISC_R_LOCKBUSY;
UNLOCK(&rwl->lock);
return (result);
}
void
isc_rwlock_downgrade(isc_rwlock_t *rwl) {
REQUIRE(VALID_RWLOCK(rwl));
LOCK(&rwl->lock);
REQUIRE(rwl->type == isc_rwlocktype_write);
REQUIRE(rwl->active == 1);
rwl->type = isc_rwlocktype_read;
rwl->original = (rwl->original == isc_rwlocktype_none) ?
isc_rwlocktype_write : isc_rwlocktype_none;
/*
* Resume processing any read request that were blocked when
* we upgraded.
*/
if (rwl->original == isc_rwlocktype_none &&
(rwl->writers_waiting == 0 || rwl->granted < rwl->read_quota) &&
rwl->readers_waiting > 0)
BROADCAST(&rwl->readable);
UNLOCK(&rwl->lock);
}
isc_result_t
isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
REQUIRE(VALID_RWLOCK(rwl));
LOCK(&rwl->lock);
REQUIRE(rwl->type == type);
UNUSED(type);
#ifdef ISC_RWLOCK_TRACE
print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
ISC_MSG_PREUNLOCK, "preunlock"), rwl, type);
#endif
INSIST(rwl->active > 0);
rwl->active--;
if (rwl->active == 0) {
if (rwl->original != isc_rwlocktype_none) {
rwl->type = rwl->original;
rwl->original = isc_rwlocktype_none;
}
if (rwl->type == isc_rwlocktype_read) {
rwl->granted = 0;
if (rwl->writers_waiting > 0) {
rwl->type = isc_rwlocktype_write;
SIGNAL(&rwl->writeable);
} else if (rwl->readers_waiting > 0) {
/* Does this case ever happen? */
BROADCAST(&rwl->readable);
}
} else {
if (rwl->readers_waiting > 0) {
if (rwl->writers_waiting > 0 &&
rwl->granted < rwl->write_quota) {
SIGNAL(&rwl->writeable);
} else {
rwl->granted = 0;
rwl->type = isc_rwlocktype_read;
BROADCAST(&rwl->readable);
}
} else if (rwl->writers_waiting > 0) {
rwl->granted = 0;
SIGNAL(&rwl->writeable);
} else {
rwl->granted = 0;
}
}
}
INSIST(rwl->original == isc_rwlocktype_none);
#ifdef ISC_RWLOCK_TRACE
print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
ISC_MSG_POSTUNLOCK, "postunlock"),
rwl, type);
#endif
UNLOCK(&rwl->lock);
return (ISC_R_SUCCESS);
}
#endif /* ISC_RWLOCK_USEATOMIC */

View File

@ -27,77 +27,10 @@
#include <isc/stats.h>
#include <isc/util.h>
#if defined(ISC_PLATFORM_HAVESTDATOMIC)
#include <stdatomic.h>
#endif
#define ISC_STATS_MAGIC ISC_MAGIC('S', 't', 'a', 't')
#define ISC_STATS_VALID(x) ISC_MAGIC_VALID(x, ISC_STATS_MAGIC)
/*%
* Local macro confirming prescence of 64-bit
* increment and store operations, just to make
* the later macros simpler
*/
#if (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_LONG_LOCK_FREE)) || \
(defined(ISC_PLATFORM_HAVEXADDQ) && defined(ISC_PLATFORM_HAVEATOMICSTOREQ))
#define ISC_STATS_HAVEATOMICQ 1
#if (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_LONG_LOCK_FREE))
#define ISC_STATS_HAVESTDATOMICQ 1
#endif
#else
#define ISC_STATS_HAVEATOMICQ 0
#endif
/*%
* Only lock the counters if 64-bit atomic operations are
* not available but cheap atomic lock operations are.
* On a modern 64-bit system this should never be the case.
*
* Normal locks are too expensive to be used whenever a counter
* is updated.
*/
#if !ISC_STATS_HAVEATOMICQ && defined(ISC_RWLOCK_HAVEATOMIC)
#define ISC_STATS_LOCKCOUNTERS 1
#else
#define ISC_STATS_LOCKCOUNTERS 0
#endif
/*%
* If 64-bit atomic operations are not available but
* 32-bit operations are then split the counter into two,
* using the atomic operations to try to ensure that any carry
* from the low word is correctly carried into the high word.
*
* Otherwise, just rely on standard 64-bit data types
* and operations
*/
#if !ISC_STATS_HAVEATOMICQ && ((defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_INT_LOCK_FREE)) || defined(ISC_PLATFORM_HAVEXADD))
#define ISC_STATS_USEMULTIFIELDS 1
#if (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_INT_LOCK_FREE))
#define ISC_STATS_HAVESTDATOMIC 1
#endif
#else
#define ISC_STATS_USEMULTIFIELDS 0
#endif
#if ISC_STATS_USEMULTIFIELDS
typedef struct {
#if defined(ISC_STATS_HAVESTDATOMIC)
atomic_int_fast32_t hi;
atomic_int_fast32_t lo;
#else
uint32_t hi;
uint32_t lo;
#endif
} isc_stat_t;
#else
#if defined(ISC_STATS_HAVESTDATOMICQ)
typedef atomic_int_fast64_t isc_stat_t;
#else
typedef uint64_t isc_stat_t;
#endif
#endif
struct isc_stats {
/*% Unlocked */
@ -112,9 +45,6 @@ struct isc_stats {
* Locked by counterlock or unlocked if efficient rwlock is not
* available.
*/
#if ISC_STATS_LOCKCOUNTERS
isc_rwlock_t counterlock;
#endif
isc_stat_t *counters;
/*%
@ -158,12 +88,6 @@ create_stats(isc_mem_t *mctx, int ncounters, isc_stats_t **statsp) {
goto clean_counters;
}
#if ISC_STATS_LOCKCOUNTERS
result = isc_rwlock_init(&stats->counterlock, 0, 0);
if (result != ISC_R_SUCCESS)
goto clean_copiedcounters;
#endif
stats->references = 1;
memset(stats->counters, 0, sizeof(isc_stat_t) * ncounters);
stats->mctx = NULL;
@ -178,12 +102,6 @@ create_stats(isc_mem_t *mctx, int ncounters, isc_stats_t **statsp) {
clean_counters:
isc_mem_put(mctx, stats->counters, sizeof(isc_stat_t) * ncounters);
#if ISC_STATS_LOCKCOUNTERS
clean_copiedcounters:
isc_mem_put(mctx, stats->copiedcounters,
sizeof(isc_stat_t) * ncounters);
#endif
clean_mutex:
DESTROYLOCK(&stats->lock);
@ -224,9 +142,6 @@ isc_stats_detach(isc_stats_t **statsp) {
sizeof(isc_stat_t) * stats->ncounters);
UNLOCK(&stats->lock);
DESTROYLOCK(&stats->lock);
#if ISC_STATS_LOCKCOUNTERS
isc_rwlock_destroy(&stats->counterlock);
#endif
isc_mem_putanddetach(&stats->mctx, stats, sizeof(*stats));
return;
}
@ -241,139 +156,6 @@ isc_stats_ncounters(isc_stats_t *stats) {
return (stats->ncounters);
}
static inline void
incrementcounter(isc_stats_t *stats, int counter) {
int32_t prev;
#if ISC_STATS_LOCKCOUNTERS
/*
* We use a "read" lock to prevent other threads from reading the
* counter while we "writing" a counter field. The write access itself
* is protected by the atomic operation.
*/
isc_rwlock_lock(&stats->counterlock, isc_rwlocktype_read);
#endif
#if ISC_STATS_USEMULTIFIELDS
#if defined(ISC_STATS_HAVESTDATOMIC)
prev = atomic_fetch_add_explicit(&stats->counters[counter].lo, 1,
memory_order_relaxed);
#else
prev = isc_atomic_xadd((int32_t *)&stats->counters[counter].lo, 1);
#endif
/*
* If the lower 32-bit field overflows, increment the higher field.
* Note that it's *theoretically* possible that the lower field
* overlaps again before the higher field is incremented. It doesn't
* matter, however, because we don't read the value until
* isc_stats_copy() is called where the whole process is protected
* by the write (exclusive) lock.
*/
if (prev == (int32_t)0xffffffff) {
#if defined(ISC_STATS_HAVESTDATOMIC)
atomic_fetch_add_explicit(&stats->counters[counter].hi, 1,
memory_order_relaxed);
#else
isc_atomic_xadd((int32_t *)&stats->counters[counter].hi, 1);
#endif
}
#elif ISC_STATS_HAVEATOMICQ
UNUSED(prev);
#if defined(ISC_STATS_HAVESTDATOMICQ)
atomic_fetch_add_explicit(&stats->counters[counter], 1,
memory_order_relaxed);
#else
isc_atomic_xaddq((int64_t *)&stats->counters[counter], 1);
#endif
#else
UNUSED(prev);
stats->counters[counter]++;
#endif
#if ISC_STATS_LOCKCOUNTERS
isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_read);
#endif
}
static inline void
decrementcounter(isc_stats_t *stats, int counter) {
int32_t prev;
#if ISC_STATS_LOCKCOUNTERS
isc_rwlock_lock(&stats->counterlock, isc_rwlocktype_read);
#endif
#if ISC_STATS_USEMULTIFIELDS
#if defined(ISC_STATS_HAVESTDATOMIC)
prev = atomic_fetch_sub_explicit(&stats->counters[counter].lo, 1,
memory_order_relaxed);
#else
prev = isc_atomic_xadd((int32_t *)&stats->counters[counter].lo, -1);
#endif
if (prev == 0) {
#if defined(ISC_STATS_HAVESTDATOMIC)
atomic_fetch_sub_explicit(&stats->counters[counter].hi, 1,
memory_order_relaxed);
#else
isc_atomic_xadd((int32_t *)&stats->counters[counter].hi,
-1);
#endif
}
#elif ISC_STATS_HAVEATOMICQ
UNUSED(prev);
#if defined(ISC_STATS_HAVESTDATOMICQ)
atomic_fetch_sub_explicit(&stats->counters[counter], 1,
memory_order_relaxed);
#else
isc_atomic_xaddq((int64_t *)&stats->counters[counter], -1);
#endif
#else
UNUSED(prev);
stats->counters[counter]--;
#endif
#if ISC_STATS_LOCKCOUNTERS
isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_read);
#endif
}
static void
copy_counters(isc_stats_t *stats) {
int i;
#if ISC_STATS_LOCKCOUNTERS
/*
* We use a "write" lock before "reading" the statistics counters as
* an exclusive lock.
*/
isc_rwlock_lock(&stats->counterlock, isc_rwlocktype_write);
#endif
for (i = 0; i < stats->ncounters; i++) {
#if ISC_STATS_USEMULTIFIELDS
stats->copiedcounters[i] =
(uint64_t)(stats->counters[i].hi) << 32 |
stats->counters[i].lo;
#elif ISC_STATS_HAVEATOMICQ
#if defined(ISC_STATS_HAVESTDATOMICQ)
stats->copiedcounters[i] =
atomic_load_explicit(&stats->counters[i],
memory_order_relaxed);
#else
/* use xaddq(..., 0) as an atomic load */
stats->copiedcounters[i] =
(uint64_t)isc_atomic_xaddq((int64_t *)&stats->counters[i], 0);
#endif
#else
stats->copiedcounters[i] = stats->counters[i];
#endif
}
#if ISC_STATS_LOCKCOUNTERS
isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_write);
#endif
}
isc_result_t
isc_stats_create(isc_mem_t *mctx, isc_stats_t **statsp, int ncounters) {
REQUIRE(statsp != NULL && *statsp == NULL);
@ -386,7 +168,8 @@ isc_stats_increment(isc_stats_t *stats, isc_statscounter_t counter) {
REQUIRE(ISC_STATS_VALID(stats));
REQUIRE(counter < stats->ncounters);
incrementcounter(stats, (int)counter);
atomic_fetch_add_explicit(&stats->counters[counter], 1,
memory_order_relaxed);
}
void
@ -394,7 +177,8 @@ isc_stats_decrement(isc_stats_t *stats, isc_statscounter_t counter) {
REQUIRE(ISC_STATS_VALID(stats));
REQUIRE(counter < stats->ncounters);
decrementcounter(stats, (int)counter);
atomic_fetch_sub_explicit(&stats->counters[counter], 1,
memory_order_relaxed);
}
void
@ -405,7 +189,11 @@ isc_stats_dump(isc_stats_t *stats, isc_stats_dumper_t dump_fn,
REQUIRE(ISC_STATS_VALID(stats));
copy_counters(stats);
for (i = 0; i < stats->ncounters; i++) {
stats->copiedcounters[i] =
atomic_load_explicit(&stats->counters[i],
memory_order_relaxed);
}
for (i = 0; i < stats->ncounters; i++) {
if ((options & ISC_STATSDUMP_VERBOSE) == 0 &&
@ -422,29 +210,6 @@ isc_stats_set(isc_stats_t *stats, uint64_t val,
REQUIRE(ISC_STATS_VALID(stats));
REQUIRE(counter < stats->ncounters);
#if ISC_STATS_LOCKCOUNTERS
/*
* We use a "write" lock before "reading" the statistics counters as
* an exclusive lock.
*/
isc_rwlock_lock(&stats->counterlock, isc_rwlocktype_write);
#endif
#if ISC_STATS_USEMULTIFIELDS
stats->counters[counter].hi = (uint32_t)((val >> 32) & 0xffffffff);
stats->counters[counter].lo = (uint32_t)(val & 0xffffffff);
#elif ISC_STATS_HAVEATOMICQ
#if defined(ISC_STATS_HAVESTDATOMICQ)
atomic_store_explicit(&stats->counters[counter], val,
memory_order_relaxed);
#else
isc_atomic_storeq((int64_t *)&stats->counters[counter], val);
#endif
#else
stats->counters[counter] = val;
#endif
#if ISC_STATS_LOCKCOUNTERS
isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_write);
#endif
}