mirror of
https://gitlab.isc.org/isc-projects/bind9
synced 2025-08-31 22:45:39 +00:00
Remove isc_atomic usage from rwlock.c and stats.c
This commit is contained in:
253
lib/isc/stats.c
253
lib/isc/stats.c
@@ -27,77 +27,10 @@
|
||||
#include <isc/stats.h>
|
||||
#include <isc/util.h>
|
||||
|
||||
#if defined(ISC_PLATFORM_HAVESTDATOMIC)
|
||||
#include <stdatomic.h>
|
||||
#endif
|
||||
|
||||
#define ISC_STATS_MAGIC ISC_MAGIC('S', 't', 'a', 't')
|
||||
#define ISC_STATS_VALID(x) ISC_MAGIC_VALID(x, ISC_STATS_MAGIC)
|
||||
|
||||
/*%
|
||||
* Local macro confirming prescence of 64-bit
|
||||
* increment and store operations, just to make
|
||||
* the later macros simpler
|
||||
*/
|
||||
#if (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_LONG_LOCK_FREE)) || \
|
||||
(defined(ISC_PLATFORM_HAVEXADDQ) && defined(ISC_PLATFORM_HAVEATOMICSTOREQ))
|
||||
#define ISC_STATS_HAVEATOMICQ 1
|
||||
#if (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_LONG_LOCK_FREE))
|
||||
#define ISC_STATS_HAVESTDATOMICQ 1
|
||||
#endif
|
||||
#else
|
||||
#define ISC_STATS_HAVEATOMICQ 0
|
||||
#endif
|
||||
|
||||
/*%
|
||||
* Only lock the counters if 64-bit atomic operations are
|
||||
* not available but cheap atomic lock operations are.
|
||||
* On a modern 64-bit system this should never be the case.
|
||||
*
|
||||
* Normal locks are too expensive to be used whenever a counter
|
||||
* is updated.
|
||||
*/
|
||||
#if !ISC_STATS_HAVEATOMICQ && defined(ISC_RWLOCK_HAVEATOMIC)
|
||||
#define ISC_STATS_LOCKCOUNTERS 1
|
||||
#else
|
||||
#define ISC_STATS_LOCKCOUNTERS 0
|
||||
#endif
|
||||
|
||||
/*%
|
||||
* If 64-bit atomic operations are not available but
|
||||
* 32-bit operations are then split the counter into two,
|
||||
* using the atomic operations to try to ensure that any carry
|
||||
* from the low word is correctly carried into the high word.
|
||||
*
|
||||
* Otherwise, just rely on standard 64-bit data types
|
||||
* and operations
|
||||
*/
|
||||
#if !ISC_STATS_HAVEATOMICQ && ((defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_INT_LOCK_FREE)) || defined(ISC_PLATFORM_HAVEXADD))
|
||||
#define ISC_STATS_USEMULTIFIELDS 1
|
||||
#if (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_INT_LOCK_FREE))
|
||||
#define ISC_STATS_HAVESTDATOMIC 1
|
||||
#endif
|
||||
#else
|
||||
#define ISC_STATS_USEMULTIFIELDS 0
|
||||
#endif
|
||||
|
||||
#if ISC_STATS_USEMULTIFIELDS
|
||||
typedef struct {
|
||||
#if defined(ISC_STATS_HAVESTDATOMIC)
|
||||
atomic_int_fast32_t hi;
|
||||
atomic_int_fast32_t lo;
|
||||
#else
|
||||
uint32_t hi;
|
||||
uint32_t lo;
|
||||
#endif
|
||||
} isc_stat_t;
|
||||
#else
|
||||
#if defined(ISC_STATS_HAVESTDATOMICQ)
|
||||
typedef atomic_int_fast64_t isc_stat_t;
|
||||
#else
|
||||
typedef uint64_t isc_stat_t;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
struct isc_stats {
|
||||
/*% Unlocked */
|
||||
@@ -112,9 +45,6 @@ struct isc_stats {
|
||||
* Locked by counterlock or unlocked if efficient rwlock is not
|
||||
* available.
|
||||
*/
|
||||
#if ISC_STATS_LOCKCOUNTERS
|
||||
isc_rwlock_t counterlock;
|
||||
#endif
|
||||
isc_stat_t *counters;
|
||||
|
||||
/*%
|
||||
@@ -158,12 +88,6 @@ create_stats(isc_mem_t *mctx, int ncounters, isc_stats_t **statsp) {
|
||||
goto clean_counters;
|
||||
}
|
||||
|
||||
#if ISC_STATS_LOCKCOUNTERS
|
||||
result = isc_rwlock_init(&stats->counterlock, 0, 0);
|
||||
if (result != ISC_R_SUCCESS)
|
||||
goto clean_copiedcounters;
|
||||
#endif
|
||||
|
||||
stats->references = 1;
|
||||
memset(stats->counters, 0, sizeof(isc_stat_t) * ncounters);
|
||||
stats->mctx = NULL;
|
||||
@@ -178,12 +102,6 @@ create_stats(isc_mem_t *mctx, int ncounters, isc_stats_t **statsp) {
|
||||
clean_counters:
|
||||
isc_mem_put(mctx, stats->counters, sizeof(isc_stat_t) * ncounters);
|
||||
|
||||
#if ISC_STATS_LOCKCOUNTERS
|
||||
clean_copiedcounters:
|
||||
isc_mem_put(mctx, stats->copiedcounters,
|
||||
sizeof(isc_stat_t) * ncounters);
|
||||
#endif
|
||||
|
||||
clean_mutex:
|
||||
DESTROYLOCK(&stats->lock);
|
||||
|
||||
@@ -224,9 +142,6 @@ isc_stats_detach(isc_stats_t **statsp) {
|
||||
sizeof(isc_stat_t) * stats->ncounters);
|
||||
UNLOCK(&stats->lock);
|
||||
DESTROYLOCK(&stats->lock);
|
||||
#if ISC_STATS_LOCKCOUNTERS
|
||||
isc_rwlock_destroy(&stats->counterlock);
|
||||
#endif
|
||||
isc_mem_putanddetach(&stats->mctx, stats, sizeof(*stats));
|
||||
return;
|
||||
}
|
||||
@@ -241,139 +156,6 @@ isc_stats_ncounters(isc_stats_t *stats) {
|
||||
return (stats->ncounters);
|
||||
}
|
||||
|
||||
static inline void
|
||||
incrementcounter(isc_stats_t *stats, int counter) {
|
||||
int32_t prev;
|
||||
|
||||
#if ISC_STATS_LOCKCOUNTERS
|
||||
/*
|
||||
* We use a "read" lock to prevent other threads from reading the
|
||||
* counter while we "writing" a counter field. The write access itself
|
||||
* is protected by the atomic operation.
|
||||
*/
|
||||
isc_rwlock_lock(&stats->counterlock, isc_rwlocktype_read);
|
||||
#endif
|
||||
|
||||
#if ISC_STATS_USEMULTIFIELDS
|
||||
#if defined(ISC_STATS_HAVESTDATOMIC)
|
||||
prev = atomic_fetch_add_explicit(&stats->counters[counter].lo, 1,
|
||||
memory_order_relaxed);
|
||||
#else
|
||||
prev = isc_atomic_xadd((int32_t *)&stats->counters[counter].lo, 1);
|
||||
#endif
|
||||
/*
|
||||
* If the lower 32-bit field overflows, increment the higher field.
|
||||
* Note that it's *theoretically* possible that the lower field
|
||||
* overlaps again before the higher field is incremented. It doesn't
|
||||
* matter, however, because we don't read the value until
|
||||
* isc_stats_copy() is called where the whole process is protected
|
||||
* by the write (exclusive) lock.
|
||||
*/
|
||||
if (prev == (int32_t)0xffffffff) {
|
||||
#if defined(ISC_STATS_HAVESTDATOMIC)
|
||||
atomic_fetch_add_explicit(&stats->counters[counter].hi, 1,
|
||||
memory_order_relaxed);
|
||||
#else
|
||||
isc_atomic_xadd((int32_t *)&stats->counters[counter].hi, 1);
|
||||
#endif
|
||||
}
|
||||
#elif ISC_STATS_HAVEATOMICQ
|
||||
UNUSED(prev);
|
||||
#if defined(ISC_STATS_HAVESTDATOMICQ)
|
||||
atomic_fetch_add_explicit(&stats->counters[counter], 1,
|
||||
memory_order_relaxed);
|
||||
#else
|
||||
isc_atomic_xaddq((int64_t *)&stats->counters[counter], 1);
|
||||
#endif
|
||||
#else
|
||||
UNUSED(prev);
|
||||
stats->counters[counter]++;
|
||||
#endif
|
||||
|
||||
#if ISC_STATS_LOCKCOUNTERS
|
||||
isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_read);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
decrementcounter(isc_stats_t *stats, int counter) {
|
||||
int32_t prev;
|
||||
|
||||
#if ISC_STATS_LOCKCOUNTERS
|
||||
isc_rwlock_lock(&stats->counterlock, isc_rwlocktype_read);
|
||||
#endif
|
||||
|
||||
#if ISC_STATS_USEMULTIFIELDS
|
||||
#if defined(ISC_STATS_HAVESTDATOMIC)
|
||||
prev = atomic_fetch_sub_explicit(&stats->counters[counter].lo, 1,
|
||||
memory_order_relaxed);
|
||||
#else
|
||||
prev = isc_atomic_xadd((int32_t *)&stats->counters[counter].lo, -1);
|
||||
#endif
|
||||
if (prev == 0) {
|
||||
#if defined(ISC_STATS_HAVESTDATOMIC)
|
||||
atomic_fetch_sub_explicit(&stats->counters[counter].hi, 1,
|
||||
memory_order_relaxed);
|
||||
#else
|
||||
isc_atomic_xadd((int32_t *)&stats->counters[counter].hi,
|
||||
-1);
|
||||
#endif
|
||||
}
|
||||
#elif ISC_STATS_HAVEATOMICQ
|
||||
UNUSED(prev);
|
||||
#if defined(ISC_STATS_HAVESTDATOMICQ)
|
||||
atomic_fetch_sub_explicit(&stats->counters[counter], 1,
|
||||
memory_order_relaxed);
|
||||
#else
|
||||
isc_atomic_xaddq((int64_t *)&stats->counters[counter], -1);
|
||||
#endif
|
||||
#else
|
||||
UNUSED(prev);
|
||||
stats->counters[counter]--;
|
||||
#endif
|
||||
|
||||
#if ISC_STATS_LOCKCOUNTERS
|
||||
isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_read);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void
|
||||
copy_counters(isc_stats_t *stats) {
|
||||
int i;
|
||||
|
||||
#if ISC_STATS_LOCKCOUNTERS
|
||||
/*
|
||||
* We use a "write" lock before "reading" the statistics counters as
|
||||
* an exclusive lock.
|
||||
*/
|
||||
isc_rwlock_lock(&stats->counterlock, isc_rwlocktype_write);
|
||||
#endif
|
||||
|
||||
for (i = 0; i < stats->ncounters; i++) {
|
||||
#if ISC_STATS_USEMULTIFIELDS
|
||||
stats->copiedcounters[i] =
|
||||
(uint64_t)(stats->counters[i].hi) << 32 |
|
||||
stats->counters[i].lo;
|
||||
#elif ISC_STATS_HAVEATOMICQ
|
||||
#if defined(ISC_STATS_HAVESTDATOMICQ)
|
||||
stats->copiedcounters[i] =
|
||||
atomic_load_explicit(&stats->counters[i],
|
||||
memory_order_relaxed);
|
||||
#else
|
||||
/* use xaddq(..., 0) as an atomic load */
|
||||
stats->copiedcounters[i] =
|
||||
(uint64_t)isc_atomic_xaddq((int64_t *)&stats->counters[i], 0);
|
||||
#endif
|
||||
#else
|
||||
stats->copiedcounters[i] = stats->counters[i];
|
||||
#endif
|
||||
}
|
||||
|
||||
#if ISC_STATS_LOCKCOUNTERS
|
||||
isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_write);
|
||||
#endif
|
||||
}
|
||||
|
||||
isc_result_t
|
||||
isc_stats_create(isc_mem_t *mctx, isc_stats_t **statsp, int ncounters) {
|
||||
REQUIRE(statsp != NULL && *statsp == NULL);
|
||||
@@ -386,7 +168,8 @@ isc_stats_increment(isc_stats_t *stats, isc_statscounter_t counter) {
|
||||
REQUIRE(ISC_STATS_VALID(stats));
|
||||
REQUIRE(counter < stats->ncounters);
|
||||
|
||||
incrementcounter(stats, (int)counter);
|
||||
atomic_fetch_add_explicit(&stats->counters[counter], 1,
|
||||
memory_order_relaxed);
|
||||
}
|
||||
|
||||
void
|
||||
@@ -394,7 +177,8 @@ isc_stats_decrement(isc_stats_t *stats, isc_statscounter_t counter) {
|
||||
REQUIRE(ISC_STATS_VALID(stats));
|
||||
REQUIRE(counter < stats->ncounters);
|
||||
|
||||
decrementcounter(stats, (int)counter);
|
||||
atomic_fetch_sub_explicit(&stats->counters[counter], 1,
|
||||
memory_order_relaxed);
|
||||
}
|
||||
|
||||
void
|
||||
@@ -405,7 +189,11 @@ isc_stats_dump(isc_stats_t *stats, isc_stats_dumper_t dump_fn,
|
||||
|
||||
REQUIRE(ISC_STATS_VALID(stats));
|
||||
|
||||
copy_counters(stats);
|
||||
for (i = 0; i < stats->ncounters; i++) {
|
||||
stats->copiedcounters[i] =
|
||||
atomic_load_explicit(&stats->counters[i],
|
||||
memory_order_relaxed);
|
||||
}
|
||||
|
||||
for (i = 0; i < stats->ncounters; i++) {
|
||||
if ((options & ISC_STATSDUMP_VERBOSE) == 0 &&
|
||||
@@ -422,29 +210,6 @@ isc_stats_set(isc_stats_t *stats, uint64_t val,
|
||||
REQUIRE(ISC_STATS_VALID(stats));
|
||||
REQUIRE(counter < stats->ncounters);
|
||||
|
||||
#if ISC_STATS_LOCKCOUNTERS
|
||||
/*
|
||||
* We use a "write" lock before "reading" the statistics counters as
|
||||
* an exclusive lock.
|
||||
*/
|
||||
isc_rwlock_lock(&stats->counterlock, isc_rwlocktype_write);
|
||||
#endif
|
||||
|
||||
#if ISC_STATS_USEMULTIFIELDS
|
||||
stats->counters[counter].hi = (uint32_t)((val >> 32) & 0xffffffff);
|
||||
stats->counters[counter].lo = (uint32_t)(val & 0xffffffff);
|
||||
#elif ISC_STATS_HAVEATOMICQ
|
||||
#if defined(ISC_STATS_HAVESTDATOMICQ)
|
||||
atomic_store_explicit(&stats->counters[counter], val,
|
||||
memory_order_relaxed);
|
||||
#else
|
||||
isc_atomic_storeq((int64_t *)&stats->counters[counter], val);
|
||||
#endif
|
||||
#else
|
||||
stats->counters[counter] = val;
|
||||
#endif
|
||||
|
||||
#if ISC_STATS_LOCKCOUNTERS
|
||||
isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_write);
|
||||
#endif
|
||||
}
|
||||
|
Reference in New Issue
Block a user