2
0
mirror of https://gitlab.isc.org/isc-projects/bind9 synced 2025-09-01 15:05:23 +00:00

Use C11's stdatomic.h instead of isc_atomic where available

This commit is contained in:
Mukund Sivaraman
2017-09-19 15:42:54 +05:30
parent fb088a00cf
commit 404c9b1c53
10 changed files with 315 additions and 49 deletions

View File

@@ -1,3 +1,6 @@
4728. [func] Use C11's stdatomic.h instead of isc_atomic
where available. [RT #40668]
4727. [bug] Retransferring an inline-signed slave using NSEC3 4727. [bug] Retransferring an inline-signed slave using NSEC3
around the time its NSEC3 salt was changed could result around the time its NSEC3 salt was changed could result
in an infinite signing loop. [RT #45080] in an infinite signing loop. [RT #45080]

View File

@@ -449,6 +449,9 @@ int sigwait(const unsigned int *set, int *sig);
/* Define to 1 if you have the `setresuid' function. */ /* Define to 1 if you have the `setresuid' function. */
#undef HAVE_SETRESUID #undef HAVE_SETRESUID
/* Define to 1 if you have the <stdatomic.h> header file. */
#undef HAVE_STDATOMIC_H
/* Define to 1 if you have the <stdint.h> header file. */ /* Define to 1 if you have the <stdint.h> header file. */
#undef HAVE_STDINT_H #undef HAVE_STDINT_H

19
configure vendored
View File

@@ -718,6 +718,7 @@ ISC_PLATFORM_HAVEATOMICSTORE
ISC_PLATFORM_HAVECMPXCHG ISC_PLATFORM_HAVECMPXCHG
ISC_PLATFORM_HAVEXADDQ ISC_PLATFORM_HAVEXADDQ
ISC_PLATFORM_HAVEXADD ISC_PLATFORM_HAVEXADD
ISC_PLATFORM_HAVESTDATOMIC
ISC_PLATFORM_HAVEIFNAMETOINDEX ISC_PLATFORM_HAVEIFNAMETOINDEX
ISC_PLATFORM_HAVESTRINGSH ISC_PLATFORM_HAVESTRINGSH
ISC_PLATFORM_BRACEPTHREADONCEINIT ISC_PLATFORM_BRACEPTHREADONCEINIT
@@ -20126,6 +20127,21 @@ done
# #
# Machine architecture dependent features # Machine architecture dependent features
# #
for ac_header in stdatomic.h
do :
ac_fn_c_check_header_mongrel "$LINENO" "stdatomic.h" "ac_cv_header_stdatomic_h" "$ac_includes_default"
if test "x$ac_cv_header_stdatomic_h" = xyes; then :
cat >>confdefs.h <<_ACEOF
#define HAVE_STDATOMIC_H 1
_ACEOF
ISC_PLATFORM_HAVESTDATOMIC="#define ISC_PLATFORM_HAVESTDATOMIC 1"
else
ISC_PLATFORM_HAVESTDATOMIC="#undef ISC_PLATFORM_HAVESTDATOMIC"
fi
done
# Check whether --enable-atomic was given. # Check whether --enable-atomic was given.
if test "${enable_atomic+set}" = set; then : if test "${enable_atomic+set}" = set; then :
enableval=$enable_atomic; enable_atomic="$enableval" enableval=$enable_atomic; enable_atomic="$enableval"
@@ -20201,11 +20217,14 @@ rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
esac esac
;; ;;
no) no)
ISC_PLATFORM_HAVESTDATOMIC="#undef ISC_PLATFORM_HAVESTDATOMIC"
use_atomic=no use_atomic=no
arch=noatomic arch=noatomic
;; ;;
esac esac
ISC_PLATFORM_USEOSFASM="#undef ISC_PLATFORM_USEOSFASM" ISC_PLATFORM_USEOSFASM="#undef ISC_PLATFORM_USEOSFASM"
ISC_PLATFORM_USEGCCASM="#undef ISC_PLATFORM_USEGCCASM" ISC_PLATFORM_USEGCCASM="#undef ISC_PLATFORM_USEGCCASM"
ISC_PLATFORM_USESTDASM="#undef ISC_PLATFORM_USESTDASM" ISC_PLATFORM_USESTDASM="#undef ISC_PLATFORM_USESTDASM"

View File

@@ -4006,6 +4006,10 @@ AC_CHECK_FUNCS(nanosleep usleep)
# #
# Machine architecture dependent features # Machine architecture dependent features
# #
AC_CHECK_HEADERS(stdatomic.h,
[ISC_PLATFORM_HAVESTDATOMIC="#define ISC_PLATFORM_HAVESTDATOMIC 1"],
[ISC_PLATFORM_HAVESTDATOMIC="#undef ISC_PLATFORM_HAVESTDATOMIC"])
AC_ARG_ENABLE(atomic, AC_ARG_ENABLE(atomic,
[ --enable-atomic enable machine specific atomic operations [ --enable-atomic enable machine specific atomic operations
[[default=autodetect]]], [[default=autodetect]]],
@@ -4048,11 +4052,14 @@ case "$enable_atomic" in
esac esac
;; ;;
no) no)
ISC_PLATFORM_HAVESTDATOMIC="#undef ISC_PLATFORM_HAVESTDATOMIC"
use_atomic=no use_atomic=no
arch=noatomic arch=noatomic
;; ;;
esac esac
AC_SUBST(ISC_PLATFORM_HAVESTDATOMIC)
ISC_PLATFORM_USEOSFASM="#undef ISC_PLATFORM_USEOSFASM" ISC_PLATFORM_USEOSFASM="#undef ISC_PLATFORM_USEOSFASM"
ISC_PLATFORM_USEGCCASM="#undef ISC_PLATFORM_USEGCCASM" ISC_PLATFORM_USEGCCASM="#undef ISC_PLATFORM_USEGCCASM"
ISC_PLATFORM_USESTDASM="#undef ISC_PLATFORM_USESTDASM" ISC_PLATFORM_USESTDASM="#undef ISC_PLATFORM_USESTDASM"

View File

@@ -306,6 +306,12 @@
*/ */
@ISC_PLATFORM_HAVECMPXCHG@ @ISC_PLATFORM_HAVECMPXCHG@
/*
* If <stdatomic.h> is available on this architecture,
* ISC_PLATFORM_HAVESTDATOMIC will be defined.
*/
@ISC_PLATFORM_HAVESTDATOMIC@
/* /*
* Define if gcc ASM extension is available * Define if gcc ASM extension is available
*/ */

View File

@@ -18,6 +18,10 @@
#include <isc/types.h> #include <isc/types.h>
#include <isc/util.h> #include <isc/util.h>
#if defined(ISC_PLATFORM_HAVESTDATOMIC)
#include <stdatomic.h>
#endif
/*! \file isc/refcount.h /*! \file isc/refcount.h
* \brief Implements a locked reference counter. * \brief Implements a locked reference counter.
* *
@@ -86,17 +90,59 @@ ISC_LANG_BEGINDECLS
* Sample implementations * Sample implementations
*/ */
#ifdef ISC_PLATFORM_USETHREADS #ifdef ISC_PLATFORM_USETHREADS
#ifdef ISC_PLATFORM_HAVEXADD #if (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_INT_LOCK_FREE)) || defined(ISC_PLATFORM_HAVEXADD)
#define ISC_REFCOUNT_HAVEATOMIC 1 #define ISC_REFCOUNT_HAVEATOMIC 1
#if (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_INT_LOCK_FREE))
#define ISC_REFCOUNT_HAVESTDATOMIC 1
#endif
typedef struct isc_refcount { typedef struct isc_refcount {
#if defined(ISC_REFCOUNT_HAVESTDATOMIC)
atomic_int_fast32_t refs;
#else
isc_int32_t refs; isc_int32_t refs;
#endif
} isc_refcount_t; } isc_refcount_t;
#define isc_refcount_destroy(rp) REQUIRE((rp)->refs == 0) #define isc_refcount_destroy(rp) REQUIRE((rp)->refs == 0)
#define isc_refcount_current(rp) ((unsigned int)((rp)->refs)) #define isc_refcount_current(rp) ((unsigned int)((rp)->refs))
#if defined(ISC_REFCOUNT_HAVESTDATOMIC)
#define isc_refcount_increment0(rp, tp) \
do { \
unsigned int *_tmp = (unsigned int *)(tp); \
isc_int32_t prev; \
prev = atomic_fetch_add_explicit \
(&(rp)->refs, 1, memory_order_relaxed); \
if (_tmp != NULL) \
*_tmp = prev + 1; \
} while (0)
#define isc_refcount_increment(rp, tp) \
do { \
unsigned int *_tmp = (unsigned int *)(tp); \
isc_int32_t prev; \
prev = atomic_fetch_add_explicit \
(&(rp)->refs, 1, memory_order_relaxed); \
REQUIRE(prev > 0); \
if (_tmp != NULL) \
*_tmp = prev + 1; \
} while (0)
#define isc_refcount_decrement(rp, tp) \
do { \
unsigned int *_tmp = (unsigned int *)(tp); \
isc_int32_t prev; \
prev = atomic_fetch_sub_explicit \
(&(rp)->refs, 1, memory_order_relaxed); \
REQUIRE(prev > 0); \
if (_tmp != NULL) \
*_tmp = prev - 1; \
} while (0)
#else /* ISC_REFCOUNT_HAVESTDATOMIC */
#define isc_refcount_increment0(rp, tp) \ #define isc_refcount_increment0(rp, tp) \
do { \ do { \
unsigned int *_tmp = (unsigned int *)(tp); \ unsigned int *_tmp = (unsigned int *)(tp); \
@@ -126,6 +172,8 @@ typedef struct isc_refcount {
*_tmp = prev - 1; \ *_tmp = prev - 1; \
} while (0) } while (0)
#endif /* ISC_REFCOUNT_HAVESTDATOMIC */
#else /* ISC_PLATFORM_HAVEXADD */ #else /* ISC_PLATFORM_HAVEXADD */
typedef struct isc_refcount { typedef struct isc_refcount {
@@ -176,7 +224,7 @@ typedef struct isc_refcount {
UNLOCK(&(rp)->lock); \ UNLOCK(&(rp)->lock); \
} while (0) } while (0)
#endif /* ISC_PLATFORM_HAVEXADD */ #endif /* (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_INT_LOCK_FREE)) || defined(ISC_PLATFORM_HAVEXADD) */
#else /* ISC_PLATFORM_USETHREADS */ #else /* ISC_PLATFORM_USETHREADS */
typedef struct isc_refcount { typedef struct isc_refcount {

View File

@@ -18,6 +18,10 @@
#include <isc/platform.h> #include <isc/platform.h>
#include <isc/types.h> #include <isc/types.h>
#if defined(ISC_PLATFORM_HAVESTDATOMIC)
#include <stdatomic.h>
#endif
ISC_LANG_BEGINDECLS ISC_LANG_BEGINDECLS
typedef enum { typedef enum {
@@ -27,8 +31,11 @@ typedef enum {
} isc_rwlocktype_t; } isc_rwlocktype_t;
#ifdef ISC_PLATFORM_USETHREADS #ifdef ISC_PLATFORM_USETHREADS
#if defined(ISC_PLATFORM_HAVEXADD) && defined(ISC_PLATFORM_HAVECMPXCHG) #if (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_INT_LOCK_FREE)) || (defined(ISC_PLATFORM_HAVEXADD) && defined(ISC_PLATFORM_HAVECMPXCHG))
#define ISC_RWLOCK_USEATOMIC 1 #define ISC_RWLOCK_USEATOMIC 1
#if (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_INT_LOCK_FREE))
#define ISC_RWLOCK_USESTDATOMIC 1
#endif
#endif #endif
struct isc_rwlock { struct isc_rwlock {
@@ -37,7 +44,7 @@ struct isc_rwlock {
isc_mutex_t lock; isc_mutex_t lock;
isc_int32_t spins; isc_int32_t spins;
#if defined(ISC_PLATFORM_HAVEXADD) && defined(ISC_PLATFORM_HAVECMPXCHG) #if defined(ISC_RWLOCK_USEATOMIC)
/* /*
* When some atomic instructions with hardware assistance are * When some atomic instructions with hardware assistance are
* available, rwlock will use those so that concurrent readers do not * available, rwlock will use those so that concurrent readers do not
@@ -52,9 +59,15 @@ struct isc_rwlock {
*/ */
/* Read or modified atomically. */ /* Read or modified atomically. */
#if defined(ISC_RWLOCK_USESTDATOMIC)
atomic_int_fast32_t write_requests;
atomic_int_fast32_t write_completions;
atomic_int_fast32_t cnt_and_flag;
#else
isc_int32_t write_requests; isc_int32_t write_requests;
isc_int32_t write_completions; isc_int32_t write_completions;
isc_int32_t cnt_and_flag; isc_int32_t cnt_and_flag;
#endif
/* Locked by lock. */ /* Locked by lock. */
isc_condition_t readable; isc_condition_t readable;
@@ -67,7 +80,7 @@ struct isc_rwlock {
/* Unlocked. */ /* Unlocked. */
unsigned int write_quota; unsigned int write_quota;
#else /* ISC_PLATFORM_HAVEXADD && ISC_PLATFORM_HAVECMPXCHG */ #else /* ISC_RWLOCK_USEATOMIC */
/*%< Locked by lock. */ /*%< Locked by lock. */
isc_condition_t readable; isc_condition_t readable;
@@ -89,7 +102,7 @@ struct isc_rwlock {
unsigned int read_quota; unsigned int read_quota;
unsigned int write_quota; unsigned int write_quota;
isc_rwlocktype_t original; isc_rwlocktype_t original;
#endif /* ISC_PLATFORM_HAVEXADD && ISC_PLATFORM_HAVECMPXCHG */ #endif /* ISC_RWLOCK_USEATOMIC */
}; };
#else /* ISC_PLATFORM_USETHREADS */ #else /* ISC_PLATFORM_USETHREADS */
struct isc_rwlock { struct isc_rwlock {

View File

@@ -21,7 +21,7 @@ isc_refcount_init(isc_refcount_t *ref, unsigned int n) {
REQUIRE(ref != NULL); REQUIRE(ref != NULL);
ref->refs = n; ref->refs = n;
#if defined(ISC_PLATFORM_USETHREADS) && !defined(ISC_PLATFORM_HAVEXADD) #if defined(ISC_PLATFORM_USETHREADS) && !defined(ISC_REFCOUNT_HAVEATOMIC)
return (isc_mutex_init(&ref->lock)); return (isc_mutex_init(&ref->lock));
#else #else
return (ISC_R_SUCCESS); return (ISC_R_SUCCESS);

View File

@@ -39,7 +39,7 @@
#define RWLOCK_MAX_ADAPTIVE_COUNT 100 #define RWLOCK_MAX_ADAPTIVE_COUNT 100
#endif #endif
#if defined(ISC_PLATFORM_HAVEXADD) && defined(ISC_PLATFORM_HAVECMPXCHG) #if defined(ISC_RWLOCK_USEATOMIC)
static isc_result_t static isc_result_t
isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type); isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type);
#endif #endif
@@ -50,7 +50,7 @@ isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type);
static void static void
print_lock(const char *operation, isc_rwlock_t *rwl, isc_rwlocktype_t type) { print_lock(const char *operation, isc_rwlock_t *rwl, isc_rwlocktype_t type) {
#if defined(ISC_PLATFORM_HAVEXADD) && defined(ISC_PLATFORM_HAVECMPXCHG) #if defined(ISC_RWLOCK_USEATOMIC)
fprintf(stderr, fprintf(stderr,
isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
ISC_MSG_PRINTLOCK2, ISC_MSG_PRINTLOCK2,
@@ -105,7 +105,7 @@ isc_rwlock_init(isc_rwlock_t *rwl, unsigned int read_quota,
rwl->magic = 0; rwl->magic = 0;
rwl->spins = 0; rwl->spins = 0;
#if defined(ISC_PLATFORM_HAVEXADD) && defined(ISC_PLATFORM_HAVECMPXCHG) #if defined(ISC_RWLOCK_USEATOMIC)
rwl->write_requests = 0; rwl->write_requests = 0;
rwl->write_completions = 0; rwl->write_completions = 0;
rwl->cnt_and_flag = 0; rwl->cnt_and_flag = 0;
@@ -174,7 +174,7 @@ void
isc_rwlock_destroy(isc_rwlock_t *rwl) { isc_rwlock_destroy(isc_rwlock_t *rwl) {
REQUIRE(VALID_RWLOCK(rwl)); REQUIRE(VALID_RWLOCK(rwl));
#if defined(ISC_PLATFORM_HAVEXADD) && defined(ISC_PLATFORM_HAVECMPXCHG) #if defined(ISC_RWLOCK_USEATOMIC)
REQUIRE(rwl->write_requests == rwl->write_completions && REQUIRE(rwl->write_requests == rwl->write_completions &&
rwl->cnt_and_flag == 0 && rwl->readers_waiting == 0); rwl->cnt_and_flag == 0 && rwl->readers_waiting == 0);
#else #else
@@ -191,7 +191,7 @@ isc_rwlock_destroy(isc_rwlock_t *rwl) {
DESTROYLOCK(&rwl->lock); DESTROYLOCK(&rwl->lock);
} }
#if defined(ISC_PLATFORM_HAVEXADD) && defined(ISC_PLATFORM_HAVECMPXCHG) #if defined(ISC_RWLOCK_USEATOMIC)
/* /*
* When some architecture-dependent atomic operations are available, * When some architecture-dependent atomic operations are available,
@@ -281,7 +281,13 @@ isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
UNLOCK(&rwl->lock); UNLOCK(&rwl->lock);
} }
#if defined(ISC_RWLOCK_USESTDATOMIC)
cntflag = atomic_fetch_add_explicit(&rwl->cnt_and_flag,
READER_INCR,
memory_order_relaxed);
#else
cntflag = isc_atomic_xadd(&rwl->cnt_and_flag, READER_INCR); cntflag = isc_atomic_xadd(&rwl->cnt_and_flag, READER_INCR);
#endif
POST(cntflag); POST(cntflag);
while (1) { while (1) {
if ((rwl->cnt_and_flag & WRITER_ACTIVE) == 0) if ((rwl->cnt_and_flag & WRITER_ACTIVE) == 0)
@@ -331,7 +337,12 @@ isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
isc_int32_t prev_writer; isc_int32_t prev_writer;
/* enter the waiting queue, and wait for our turn */ /* enter the waiting queue, and wait for our turn */
#if defined(ISC_RWLOCK_USESTDATOMIC)
prev_writer = atomic_fetch_add_explicit(&rwl->write_requests, 1,
memory_order_relaxed);
#else
prev_writer = isc_atomic_xadd(&rwl->write_requests, 1); prev_writer = isc_atomic_xadd(&rwl->write_requests, 1);
#endif
while (rwl->write_completions != prev_writer) { while (rwl->write_completions != prev_writer) {
LOCK(&rwl->lock); LOCK(&rwl->lock);
if (rwl->write_completions != prev_writer) { if (rwl->write_completions != prev_writer) {
@@ -344,9 +355,18 @@ isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
} }
while (1) { while (1) {
cntflag = isc_atomic_cmpxchg(&rwl->cnt_and_flag, 0, #if defined(ISC_RWLOCK_USESTDATOMIC)
WRITER_ACTIVE); atomic_int_fast32_t cntflag2 = 0;
if (cntflag == 0) atomic_compare_exchange_strong_explicit
(&rwl->cnt_and_flag, &cntflag2, WRITER_ACTIVE,
memory_order_relaxed, memory_order_relaxed);
#else
isc_int32_t cntflag2;
cntflag2 = isc_atomic_cmpxchg(&rwl->cnt_and_flag, 0,
WRITER_ACTIVE);
#endif
if (cntflag2 == 0)
break; break;
/* Another active reader or writer is working. */ /* Another active reader or writer is working. */
@@ -409,14 +429,26 @@ isc_rwlock_trylock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
return (ISC_R_LOCKBUSY); return (ISC_R_LOCKBUSY);
/* Otherwise, be ready for reading. */ /* Otherwise, be ready for reading. */
#if defined(ISC_RWLOCK_USESTDATOMIC)
cntflag = atomic_fetch_add_explicit(&rwl->cnt_and_flag,
READER_INCR,
memory_order_relaxed);
#else
cntflag = isc_atomic_xadd(&rwl->cnt_and_flag, READER_INCR); cntflag = isc_atomic_xadd(&rwl->cnt_and_flag, READER_INCR);
#endif
if ((cntflag & WRITER_ACTIVE) != 0) { if ((cntflag & WRITER_ACTIVE) != 0) {
/* /*
* A writer is working. We lose, and cancel the read * A writer is working. We lose, and cancel the read
* request. * request.
*/ */
#if defined(ISC_RWLOCK_USESTDATOMIC)
cntflag = atomic_fetch_sub_explicit
(&rwl->cnt_and_flag, READER_INCR,
memory_order_relaxed);
#else
cntflag = isc_atomic_xadd(&rwl->cnt_and_flag, cntflag = isc_atomic_xadd(&rwl->cnt_and_flag,
-READER_INCR); -READER_INCR);
#endif
/* /*
* If no other readers are waiting and we've suspended * If no other readers are waiting and we've suspended
* new writers in this short period, wake them up. * new writers in this short period, wake them up.
@@ -432,16 +464,29 @@ isc_rwlock_trylock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
} }
} else { } else {
/* Try locking without entering the waiting queue. */ /* Try locking without entering the waiting queue. */
#if defined(ISC_RWLOCK_USESTDATOMIC)
atomic_int_fast32_t zero = 0;
if (!atomic_compare_exchange_strong_explicit
(&rwl->cnt_and_flag, &zero, WRITER_ACTIVE,
memory_order_relaxed, memory_order_relaxed))
return (ISC_R_LOCKBUSY);
#else
cntflag = isc_atomic_cmpxchg(&rwl->cnt_and_flag, 0, cntflag = isc_atomic_cmpxchg(&rwl->cnt_and_flag, 0,
WRITER_ACTIVE); WRITER_ACTIVE);
if (cntflag != 0) if (cntflag != 0)
return (ISC_R_LOCKBUSY); return (ISC_R_LOCKBUSY);
#endif
/* /*
* XXXJT: jump into the queue, possibly breaking the writer * XXXJT: jump into the queue, possibly breaking the writer
* order. * order.
*/ */
#if defined(ISC_RWLOCK_USESTDATOMIC)
atomic_fetch_sub_explicit(&rwl->write_completions, 1,
memory_order_relaxed);
#else
(void)isc_atomic_xadd(&rwl->write_completions, -1); (void)isc_atomic_xadd(&rwl->write_completions, -1);
#endif
rwl->write_granted++; rwl->write_granted++;
} }
@@ -456,31 +501,60 @@ isc_rwlock_trylock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
isc_result_t isc_result_t
isc_rwlock_tryupgrade(isc_rwlock_t *rwl) { isc_rwlock_tryupgrade(isc_rwlock_t *rwl) {
isc_int32_t prevcnt;
REQUIRE(VALID_RWLOCK(rwl)); REQUIRE(VALID_RWLOCK(rwl));
/* Try to acquire write access. */ #if defined(ISC_RWLOCK_USESTDATOMIC)
prevcnt = isc_atomic_cmpxchg(&rwl->cnt_and_flag, {
READER_INCR, WRITER_ACTIVE); atomic_int_fast32_t reader_incr = READER_INCR;
/*
* There must have been no writer, and there must have been at least
* one reader.
*/
INSIST((prevcnt & WRITER_ACTIVE) == 0 &&
(prevcnt & ~WRITER_ACTIVE) != 0);
if (prevcnt == READER_INCR) { /* Try to acquire write access. */
atomic_compare_exchange_strong_explicit
(&rwl->cnt_and_flag, &reader_incr, WRITER_ACTIVE,
memory_order_relaxed, memory_order_relaxed);
/* /*
* We are the only reader and have been upgraded. * There must have been no writer, and there must have
* Now jump into the head of the writer waiting queue. * been at least one reader.
*/ */
(void)isc_atomic_xadd(&rwl->write_completions, -1); INSIST((reader_incr & WRITER_ACTIVE) == 0 &&
} else (reader_incr & ~WRITER_ACTIVE) != 0);
return (ISC_R_LOCKBUSY);
if (reader_incr == READER_INCR) {
/*
* We are the only reader and have been upgraded.
* Now jump into the head of the writer waiting queue.
*/
atomic_fetch_sub_explicit(&rwl->write_completions, 1,
memory_order_relaxed);
} else
return (ISC_R_LOCKBUSY);
}
#else
{
isc_int32_t prevcnt;
/* Try to acquire write access. */
prevcnt = isc_atomic_cmpxchg(&rwl->cnt_and_flag,
READER_INCR, WRITER_ACTIVE);
/*
* There must have been no writer, and there must have
* been at least one reader.
*/
INSIST((prevcnt & WRITER_ACTIVE) == 0 &&
(prevcnt & ~WRITER_ACTIVE) != 0);
if (prevcnt == READER_INCR) {
/*
* We are the only reader and have been upgraded.
* Now jump into the head of the writer waiting queue.
*/
(void)isc_atomic_xadd(&rwl->write_completions, -1);
} else
return (ISC_R_LOCKBUSY);
}
#endif
return (ISC_R_SUCCESS); return (ISC_R_SUCCESS);
} }
void void
@@ -489,14 +563,33 @@ isc_rwlock_downgrade(isc_rwlock_t *rwl) {
REQUIRE(VALID_RWLOCK(rwl)); REQUIRE(VALID_RWLOCK(rwl));
/* Become an active reader. */ #if defined(ISC_RWLOCK_USESTDATOMIC)
prev_readers = isc_atomic_xadd(&rwl->cnt_and_flag, READER_INCR); {
/* We must have been a writer. */ /* Become an active reader. */
INSIST((prev_readers & WRITER_ACTIVE) != 0); prev_readers = atomic_fetch_add_explicit(&rwl->cnt_and_flag,
READER_INCR,
memory_order_relaxed);
/* We must have been a writer. */
INSIST((prev_readers & WRITER_ACTIVE) != 0);
/* Complete write */ /* Complete write */
(void)isc_atomic_xadd(&rwl->cnt_and_flag, -WRITER_ACTIVE); atomic_fetch_sub_explicit(&rwl->cnt_and_flag, WRITER_ACTIVE,
(void)isc_atomic_xadd(&rwl->write_completions, 1); memory_order_relaxed);
atomic_fetch_add_explicit(&rwl->write_completions, 1,
memory_order_relaxed);
}
#else
{
/* Become an active reader. */
prev_readers = isc_atomic_xadd(&rwl->cnt_and_flag, READER_INCR);
/* We must have been a writer. */
INSIST((prev_readers & WRITER_ACTIVE) != 0);
/* Complete write */
(void)isc_atomic_xadd(&rwl->cnt_and_flag, -WRITER_ACTIVE);
(void)isc_atomic_xadd(&rwl->write_completions, 1);
}
#endif
/* Resume other readers */ /* Resume other readers */
LOCK(&rwl->lock); LOCK(&rwl->lock);
@@ -517,8 +610,13 @@ isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
#endif #endif
if (type == isc_rwlocktype_read) { if (type == isc_rwlocktype_read) {
#if defined(ISC_RWLOCK_USESTDATOMIC)
prev_cnt = atomic_fetch_sub_explicit(&rwl->cnt_and_flag,
READER_INCR,
memory_order_relaxed);
#else
prev_cnt = isc_atomic_xadd(&rwl->cnt_and_flag, -READER_INCR); prev_cnt = isc_atomic_xadd(&rwl->cnt_and_flag, -READER_INCR);
#endif
/* /*
* If we're the last reader and any writers are waiting, wake * If we're the last reader and any writers are waiting, wake
* them up. We need to wake up all of them to ensure the * them up. We need to wake up all of them to ensure the
@@ -537,8 +635,15 @@ isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
* Reset the flag, and (implicitly) tell other writers * Reset the flag, and (implicitly) tell other writers
* we are done. * we are done.
*/ */
#if defined(ISC_RWLOCK_USESTDATOMIC)
atomic_fetch_sub_explicit(&rwl->cnt_and_flag, WRITER_ACTIVE,
memory_order_relaxed);
atomic_fetch_add_explicit(&rwl->write_completions, 1,
memory_order_relaxed);
#else
(void)isc_atomic_xadd(&rwl->cnt_and_flag, -WRITER_ACTIVE); (void)isc_atomic_xadd(&rwl->cnt_and_flag, -WRITER_ACTIVE);
(void)isc_atomic_xadd(&rwl->write_completions, 1); (void)isc_atomic_xadd(&rwl->write_completions, 1);
#endif
if (rwl->write_granted >= rwl->write_quota || if (rwl->write_granted >= rwl->write_quota ||
rwl->write_requests == rwl->write_completions || rwl->write_requests == rwl->write_completions ||
@@ -576,7 +681,7 @@ isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
return (ISC_R_SUCCESS); return (ISC_R_SUCCESS);
} }
#else /* ISC_PLATFORM_HAVEXADD && ISC_PLATFORM_HAVECMPXCHG */ #else /* ISC_RWLOCK_USEATOMIC */
static isc_result_t static isc_result_t
doit(isc_rwlock_t *rwl, isc_rwlocktype_t type, isc_boolean_t nonblock) { doit(isc_rwlock_t *rwl, isc_rwlocktype_t type, isc_boolean_t nonblock) {
@@ -782,7 +887,7 @@ isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
return (ISC_R_SUCCESS); return (ISC_R_SUCCESS);
} }
#endif /* ISC_PLATFORM_HAVEXADD && ISC_PLATFORM_HAVECMPXCHG */ #endif /* ISC_RWLOCK_USEATOMIC */
#else /* ISC_PLATFORM_USETHREADS */ #else /* ISC_PLATFORM_USETHREADS */
isc_result_t isc_result_t

View File

@@ -24,6 +24,10 @@
#include <isc/stats.h> #include <isc/stats.h>
#include <isc/util.h> #include <isc/util.h>
#if defined(ISC_PLATFORM_HAVESTDATOMIC)
#include <stdatomic.h>
#endif
#define ISC_STATS_MAGIC ISC_MAGIC('S', 't', 'a', 't') #define ISC_STATS_MAGIC ISC_MAGIC('S', 't', 'a', 't')
#define ISC_STATS_VALID(x) ISC_MAGIC_VALID(x, ISC_STATS_MAGIC) #define ISC_STATS_VALID(x) ISC_MAGIC_VALID(x, ISC_STATS_MAGIC)
@@ -32,8 +36,12 @@
* increment and store operations, just to make * increment and store operations, just to make
* the later macros simpler * the later macros simpler
*/ */
#if defined(ISC_PLATFORM_HAVEXADDQ) && defined(ISC_PLATFORM_HAVEATOMICSTOREQ) #if (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_LONG_LOCK_FREE)) || \
(defined(ISC_PLATFORM_HAVEXADDQ) && defined(ISC_PLATFORM_HAVEATOMICSTOREQ))
#define ISC_STATS_HAVEATOMICQ 1 #define ISC_STATS_HAVEATOMICQ 1
#if (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_LONG_LOCK_FREE))
#define ISC_STATS_HAVESTDATOMICQ 1
#endif
#else #else
#define ISC_STATS_HAVEATOMICQ 0 #define ISC_STATS_HAVEATOMICQ 0
#endif #endif
@@ -61,20 +69,32 @@
* Otherwise, just rely on standard 64-bit data types * Otherwise, just rely on standard 64-bit data types
* and operations * and operations
*/ */
#if !ISC_STATS_HAVEATOMICQ && defined(ISC_PLATFORM_HAVEXADD) #if !ISC_STATS_HAVEATOMICQ && ((defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_INT_LOCK_FREE)) || defined(ISC_PLATFORM_HAVEXADD))
#define ISC_STATS_USEMULTIFIELDS 1 #define ISC_STATS_USEMULTIFIELDS 1
#if (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_INT_LOCK_FREE))
#define ISC_STATS_HAVESTDATOMIC 1
#endif
#else #else
#define ISC_STATS_USEMULTIFIELDS 0 #define ISC_STATS_USEMULTIFIELDS 0
#endif #endif
#if ISC_STATS_USEMULTIFIELDS #if ISC_STATS_USEMULTIFIELDS
typedef struct { typedef struct {
#if defined(ISC_STATS_HAVESTDATOMIC)
atomic_int_fast32_t hi;
atomic_int_fast32_t lo;
#else
isc_uint32_t hi; isc_uint32_t hi;
isc_uint32_t lo; isc_uint32_t lo;
#endif
} isc_stat_t; } isc_stat_t;
#else #else
#if defined(ISC_STATS_HAVESTDATOMICQ)
typedef atomic_int_fast64_t isc_stat_t;
#else
typedef isc_uint64_t isc_stat_t; typedef isc_uint64_t isc_stat_t;
#endif #endif
#endif
struct isc_stats { struct isc_stats {
/*% Unlocked */ /*% Unlocked */
@@ -232,7 +252,12 @@ incrementcounter(isc_stats_t *stats, int counter) {
#endif #endif
#if ISC_STATS_USEMULTIFIELDS #if ISC_STATS_USEMULTIFIELDS
#if defined(ISC_STATS_HAVESTDATOMIC)
prev = atomic_fetch_add_explicit(&stats->counters[counter].lo, 1,
memory_order_relaxed);
#else
prev = isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].lo, 1); prev = isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].lo, 1);
#endif
/* /*
* If the lower 32-bit field overflows, increment the higher field. * If the lower 32-bit field overflows, increment the higher field.
* Note that it's *theoretically* possible that the lower field * Note that it's *theoretically* possible that the lower field
@@ -241,11 +266,22 @@ incrementcounter(isc_stats_t *stats, int counter) {
* isc_stats_copy() is called where the whole process is protected * isc_stats_copy() is called where the whole process is protected
* by the write (exclusive) lock. * by the write (exclusive) lock.
*/ */
if (prev == (isc_int32_t)0xffffffff) if (prev == (isc_int32_t)0xffffffff) {
#if defined(ISC_STATS_HAVESTDATOMIC)
atomic_fetch_add_explicit(&stats->counters[counter].hi, 1,
memory_order_relaxed);
#else
isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].hi, 1); isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].hi, 1);
#endif
}
#elif ISC_STATS_HAVEATOMICQ #elif ISC_STATS_HAVEATOMICQ
UNUSED(prev); UNUSED(prev);
#if defined(ISC_STATS_HAVESTDATOMICQ)
atomic_fetch_add_explicit(&stats->counters[counter], 1,
memory_order_relaxed);
#else
isc_atomic_xaddq((isc_int64_t *)&stats->counters[counter], 1); isc_atomic_xaddq((isc_int64_t *)&stats->counters[counter], 1);
#endif
#else #else
UNUSED(prev); UNUSED(prev);
stats->counters[counter]++; stats->counters[counter]++;
@@ -265,13 +301,29 @@ decrementcounter(isc_stats_t *stats, int counter) {
#endif #endif
#if ISC_STATS_USEMULTIFIELDS #if ISC_STATS_USEMULTIFIELDS
#if defined(ISC_STATS_HAVESTDATOMIC)
prev = atomic_fetch_sub_explicit(&stats->counters[counter].lo, 1,
memory_order_relaxed);
#else
prev = isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].lo, -1); prev = isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].lo, -1);
if (prev == 0) #endif
if (prev == 0) {
#if defined(ISC_STATS_HAVESTDATOMIC)
atomic_fetch_sub_explicit(&stats->counters[counter].hi, 1,
memory_order_relaxed);
#else
isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].hi, isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].hi,
-1); -1);
#endif
}
#elif ISC_STATS_HAVEATOMICQ #elif ISC_STATS_HAVEATOMICQ
UNUSED(prev); UNUSED(prev);
#if defined(ISC_STATS_HAVESTDATOMICQ)
atomic_fetch_sub_explicit(&stats->counters[counter], 1,
memory_order_relaxed);
#else
isc_atomic_xaddq((isc_int64_t *)&stats->counters[counter], -1); isc_atomic_xaddq((isc_int64_t *)&stats->counters[counter], -1);
#endif
#else #else
UNUSED(prev); UNUSED(prev);
stats->counters[counter]--; stats->counters[counter]--;
@@ -300,9 +352,15 @@ copy_counters(isc_stats_t *stats) {
(isc_uint64_t)(stats->counters[i].hi) << 32 | (isc_uint64_t)(stats->counters[i].hi) << 32 |
stats->counters[i].lo; stats->counters[i].lo;
#elif ISC_STATS_HAVEATOMICQ #elif ISC_STATS_HAVEATOMICQ
#if defined(ISC_STATS_HAVESTDATOMICQ)
stats->copiedcounters[i] =
atomic_load_explicit(&stats->counters[i],
memory_order_relaxed);
#else
/* use xaddq(..., 0) as an atomic load */ /* use xaddq(..., 0) as an atomic load */
stats->copiedcounters[i] = stats->copiedcounters[i] =
(isc_uint64_t)isc_atomic_xaddq((isc_int64_t *)&stats->counters[i], 0); (isc_uint64_t)isc_atomic_xaddq((isc_int64_t *)&stats->counters[i], 0);
#endif
#else #else
stats->copiedcounters[i] = stats->counters[i]; stats->copiedcounters[i] = stats->counters[i];
#endif #endif
@@ -373,7 +431,12 @@ isc_stats_set(isc_stats_t *stats, isc_uint64_t val,
stats->counters[counter].hi = (isc_uint32_t)((val >> 32) & 0xffffffff); stats->counters[counter].hi = (isc_uint32_t)((val >> 32) & 0xffffffff);
stats->counters[counter].lo = (isc_uint32_t)(val & 0xffffffff); stats->counters[counter].lo = (isc_uint32_t)(val & 0xffffffff);
#elif ISC_STATS_HAVEATOMICQ #elif ISC_STATS_HAVEATOMICQ
#if defined(ISC_STATS_HAVESTDATOMICQ)
atomic_store_explicit(&stats->counters[counter], val,
memory_order_relaxed);
#else
isc_atomic_storeq((isc_int64_t *)&stats->counters[counter], val); isc_atomic_storeq((isc_int64_t *)&stats->counters[counter], val);
#endif
#else #else
stats->counters[counter] = val; stats->counters[counter] = val;
#endif #endif
@@ -382,4 +445,3 @@ isc_stats_set(isc_stats_t *stats, isc_uint64_t val,
isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_write); isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_write);
#endif #endif
} }