2
0
mirror of https://gitlab.isc.org/isc-projects/bind9 synced 2025-09-05 00:55:24 +00:00

Remove isc_atomic usage from rwlock.c and stats.c

This commit is contained in:
Ondřej Surý
2018-08-14 11:42:06 +02:00
parent e119de4169
commit e9e55cbd03
4 changed files with 23 additions and 759 deletions

View File

@@ -9,9 +9,7 @@
* information regarding copyright ownership.
*/
#ifndef ISC_REFCOUNT_H
#define ISC_REFCOUNT_H 1
#pragma once
#include <inttypes.h>
@@ -23,10 +21,6 @@
#include <isc/platform.h>
#include <isc/types.h>
#if defined(ISC_PLATFORM_HAVESTDATOMIC)
#include <stdatomic.h>
#endif
/*! \file isc/refcount.h
* \brief Implements a locked reference counter.
*
@@ -94,33 +88,26 @@ ISC_LANG_BEGINDECLS
/*
* Sample implementations
*/
#if (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_INT_LOCK_FREE)) || defined(ISC_PLATFORM_HAVEXADD)
#define ISC_REFCOUNT_HAVEATOMIC 1
#if (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_INT_LOCK_FREE))
#define ISC_REFCOUNT_HAVESTDATOMIC 1
#endif
typedef struct isc_refcount {
#if defined(ISC_REFCOUNT_HAVESTDATOMIC)
atomic_int_fast32_t refs;
#else
int32_t refs;
#endif
} isc_refcount_t;
#if defined(ISC_REFCOUNT_HAVESTDATOMIC)
#define isc_refcount_init(rp, n) \
atomic_init(&(rp)->refs, n)
#define isc_refcount_current(rp) \
((unsigned int)(atomic_load_explicit(&(rp)->refs, \
memory_order_relaxed)))
#define isc_refcount_destroy(rp) ISC_REQUIRE(isc_refcount_current(rp) == 0)
#define isc_refcount_current(rp) \
atomic_load_explicit(&(rp)->refs, memory_order_relaxed)
#define isc_refcount_destroy(rp) \
ISC_REQUIRE(isc_refcount_current(rp) == 0)
#define isc_refcount_increment0(rp, tp) \
do { \
unsigned int *_tmp = (unsigned int *)(tp); \
int32_t prev; \
int32_t prev; \
prev = atomic_fetch_add_explicit \
(&(rp)->refs, 1, memory_order_relaxed); \
(&(rp)->refs, 1, memory_order_relaxed); \
if (_tmp != NULL) \
*_tmp = prev + 1; \
} while (0)
@@ -128,9 +115,9 @@ typedef struct isc_refcount {
#define isc_refcount_increment(rp, tp) \
do { \
unsigned int *_tmp = (unsigned int *)(tp); \
int32_t prev; \
int32_t prev; \
prev = atomic_fetch_add_explicit \
(&(rp)->refs, 1, memory_order_relaxed); \
(&(rp)->refs, 1, memory_order_relaxed); \
ISC_REQUIRE(prev > 0); \
if (_tmp != NULL) \
*_tmp = prev + 1; \
@@ -139,7 +126,7 @@ typedef struct isc_refcount {
#define isc_refcount_decrement(rp, tp) \
do { \
unsigned int *_tmp = (unsigned int *)(tp); \
int32_t prev; \
int32_t prev; \
prev = atomic_fetch_sub_explicit \
(&(rp)->refs, 1, memory_order_relaxed); \
ISC_REQUIRE(prev > 0); \
@@ -147,115 +134,4 @@ typedef struct isc_refcount {
*_tmp = prev - 1; \
} while (0)
#else /* ISC_REFCOUNT_HAVESTDATOMIC */
#define isc_refcount_current(rp) \
((unsigned int)(isc_atomic_xadd(&(rp)->refs, 0)))
#define isc_refcount_destroy(rp) ISC_REQUIRE(isc_refcount_current(rp) == 0)
#define isc_refcount_increment0(rp, tp) \
do { \
unsigned int *_tmp = (unsigned int *)(tp); \
int32_t prev; \
prev = isc_atomic_xadd(&(rp)->refs, 1); \
if (_tmp != NULL) \
*_tmp = prev + 1; \
} while (0)
#define isc_refcount_increment(rp, tp) \
do { \
unsigned int *_tmp = (unsigned int *)(tp); \
int32_t prev; \
prev = isc_atomic_xadd(&(rp)->refs, 1); \
ISC_REQUIRE(prev > 0); \
if (_tmp != NULL) \
*_tmp = prev + 1; \
} while (0)
#define isc_refcount_decrement(rp, tp) \
do { \
unsigned int *_tmp = (unsigned int *)(tp); \
int32_t prev; \
prev = isc_atomic_xadd(&(rp)->refs, -1); \
ISC_REQUIRE(prev > 0); \
if (_tmp != NULL) \
*_tmp = prev - 1; \
} while (0)
#endif /* ISC_REFCOUNT_HAVESTDATOMIC */
#else /* ISC_PLATFORM_HAVEXADD */
typedef struct isc_refcount {
int refs;
isc_mutex_t lock;
} isc_refcount_t;
/*% Destroys a reference counter. */
#define isc_refcount_destroy(rp) \
do { \
isc_result_t _result; \
ISC_REQUIRE((rp)->refs == 0); \
_result = isc_mutex_destroy(&(rp)->lock); \
ISC_ERROR_RUNTIMECHECK(_result == ISC_R_SUCCESS); \
} while (0)
#define isc_refcount_current(rp) ((unsigned int)((rp)->refs))
/*%
* Increments the reference count, returning the new value in
* 'tp' if it's not NULL.
*/
#define isc_refcount_increment0(rp, tp) \
do { \
isc_result_t _result; \
unsigned int *_tmp = (unsigned int *)(tp); \
_result = isc_mutex_lock(&(rp)->lock); \
ISC_ERROR_RUNTIMECHECK(_result == ISC_R_SUCCESS); \
++((rp)->refs); \
if (_tmp != NULL) \
*_tmp = ((rp)->refs); \
_result = isc_mutex_unlock(&(rp)->lock); \
ISC_ERROR_RUNTIMECHECK(_result == ISC_R_SUCCESS); \
} while (0)
#define isc_refcount_increment(rp, tp) \
do { \
isc_result_t _result; \
unsigned int *_tmp = (unsigned int *)(tp); \
_result = isc_mutex_lock(&(rp)->lock); \
ISC_ERROR_RUNTIMECHECK(_result == ISC_R_SUCCESS); \
ISC_REQUIRE((rp)->refs > 0); \
++((rp)->refs); \
if (_tmp != NULL) \
*_tmp = ((rp)->refs); \
_result = isc_mutex_unlock(&(rp)->lock); \
ISC_ERROR_RUNTIMECHECK(_result == ISC_R_SUCCESS); \
} while (0)
/*%
* Decrements the reference count, returning the new value in 'tp'
* if it's not NULL.
*/
#define isc_refcount_decrement(rp, tp) \
do { \
isc_result_t _result; \
unsigned int *_tmp = (unsigned int *)(tp); \
_result = isc_mutex_lock(&(rp)->lock); \
ISC_ERROR_RUNTIMECHECK(_result == ISC_R_SUCCESS); \
ISC_REQUIRE((rp)->refs > 0); \
--((rp)->refs); \
if (_tmp != NULL) \
*_tmp = ((rp)->refs); \
_result = isc_mutex_unlock(&(rp)->lock); \
ISC_ERROR_RUNTIMECHECK(_result == ISC_R_SUCCESS); \
} while (0)
#endif /* (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_INT_LOCK_FREE)) || defined(ISC_PLATFORM_HAVEXADD) */
isc_result_t
isc_refcount_init(isc_refcount_t *ref, unsigned int n);
ISC_LANG_ENDDECLS
#endif /* ISC_REFCOUNT_H */

View File

@@ -17,15 +17,12 @@
/*! \file isc/rwlock.h */
#include <isc/atomic.h>
#include <isc/condition.h>
#include <isc/lang.h>
#include <isc/platform.h>
#include <isc/types.h>
#if defined(ISC_PLATFORM_HAVESTDATOMIC)
#include <stdatomic.h>
#endif
ISC_LANG_BEGINDECLS
typedef enum {
@@ -34,20 +31,12 @@ typedef enum {
isc_rwlocktype_write
} isc_rwlocktype_t;
#if (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_INT_LOCK_FREE)) || (defined(ISC_PLATFORM_HAVEXADD) && defined(ISC_PLATFORM_HAVECMPXCHG))
#define ISC_RWLOCK_USEATOMIC 1
#if (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_INT_LOCK_FREE))
#define ISC_RWLOCK_USESTDATOMIC 1
#endif
#endif
struct isc_rwlock {
/* Unlocked. */
unsigned int magic;
isc_mutex_t lock;
int32_t spins;
#if defined(ISC_RWLOCK_USEATOMIC)
/*
* When some atomic instructions with hardware assistance are
* available, rwlock will use those so that concurrent readers do not
@@ -62,15 +51,9 @@ struct isc_rwlock {
*/
/* Read or modified atomically. */
#if defined(ISC_RWLOCK_USESTDATOMIC)
atomic_int_fast32_t write_requests;
atomic_int_fast32_t write_completions;
atomic_int_fast32_t cnt_and_flag;
#else
int32_t write_requests;
int32_t write_completions;
int32_t cnt_and_flag;
#endif
/* Locked by lock. */
isc_condition_t readable;
@@ -83,29 +66,6 @@ struct isc_rwlock {
/* Unlocked. */
unsigned int write_quota;
#else /* ISC_RWLOCK_USEATOMIC */
/*%< Locked by lock. */
isc_condition_t readable;
isc_condition_t writeable;
isc_rwlocktype_t type;
/*% The number of threads that have the lock. */
unsigned int active;
/*%
* The number of lock grants made since the lock was last switched
* from reading to writing or vice versa; used in determining
* when the quota is reached and it is time to switch.
*/
unsigned int granted;
unsigned int readers_waiting;
unsigned int writers_waiting;
unsigned int read_quota;
unsigned int write_quota;
isc_rwlocktype_t original;
#endif /* ISC_RWLOCK_USEATOMIC */
};
isc_result_t