Commit 404c9b1c authored by Mukund Sivaraman's avatar Mukund Sivaraman

Use C11's stdatomic.h instead of isc_atomic where available

parent fb088a00
4728. [func] Use C11's stdatomic.h instead of isc_atomic
where available. [RT #40668]
4727. [bug] Retransferring an inline-signed slave using NSEC3
around the time its NSEC3 salt was changed could result
in an infinite signing loop. [RT #45080]
......
......@@ -449,6 +449,9 @@ int sigwait(const unsigned int *set, int *sig);
/* Define to 1 if you have the `setresuid' function. */
#undef HAVE_SETRESUID
/* Define to 1 if you have the <stdatomic.h> header file. */
#undef HAVE_STDATOMIC_H
/* Define to 1 if you have the <stdint.h> header file. */
#undef HAVE_STDINT_H
......
......@@ -718,6 +718,7 @@ ISC_PLATFORM_HAVEATOMICSTORE
ISC_PLATFORM_HAVECMPXCHG
ISC_PLATFORM_HAVEXADDQ
ISC_PLATFORM_HAVEXADD
ISC_PLATFORM_HAVESTDATOMIC
ISC_PLATFORM_HAVEIFNAMETOINDEX
ISC_PLATFORM_HAVESTRINGSH
ISC_PLATFORM_BRACEPTHREADONCEINIT
......@@ -20126,6 +20127,21 @@ done
#
# Machine architecture dependent features
#
for ac_header in stdatomic.h
do :
ac_fn_c_check_header_mongrel "$LINENO" "stdatomic.h" "ac_cv_header_stdatomic_h" "$ac_includes_default"
if test "x$ac_cv_header_stdatomic_h" = xyes; then :
cat >>confdefs.h <<_ACEOF
#define HAVE_STDATOMIC_H 1
_ACEOF
ISC_PLATFORM_HAVESTDATOMIC="#define ISC_PLATFORM_HAVESTDATOMIC 1"
else
ISC_PLATFORM_HAVESTDATOMIC="#undef ISC_PLATFORM_HAVESTDATOMIC"
fi
done
# Check whether --enable-atomic was given.
if test "${enable_atomic+set}" = set; then :
enableval=$enable_atomic; enable_atomic="$enableval"
......@@ -20201,11 +20217,14 @@ rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
esac
;;
no)
ISC_PLATFORM_HAVESTDATOMIC="#undef ISC_PLATFORM_HAVESTDATOMIC"
use_atomic=no
arch=noatomic
;;
esac
ISC_PLATFORM_USEOSFASM="#undef ISC_PLATFORM_USEOSFASM"
ISC_PLATFORM_USEGCCASM="#undef ISC_PLATFORM_USEGCCASM"
ISC_PLATFORM_USESTDASM="#undef ISC_PLATFORM_USESTDASM"
......
......@@ -4006,6 +4006,10 @@ AC_CHECK_FUNCS(nanosleep usleep)
#
# Machine architecture dependent features
#
AC_CHECK_HEADERS(stdatomic.h,
[ISC_PLATFORM_HAVESTDATOMIC="#define ISC_PLATFORM_HAVESTDATOMIC 1"],
[ISC_PLATFORM_HAVESTDATOMIC="#undef ISC_PLATFORM_HAVESTDATOMIC"])
AC_ARG_ENABLE(atomic,
[ --enable-atomic enable machine specific atomic operations
[[default=autodetect]]],
......@@ -4048,11 +4052,14 @@ case "$enable_atomic" in
esac
;;
no)
ISC_PLATFORM_HAVESTDATOMIC="#undef ISC_PLATFORM_HAVESTDATOMIC"
use_atomic=no
arch=noatomic
;;
esac
AC_SUBST(ISC_PLATFORM_HAVESTDATOMIC)
ISC_PLATFORM_USEOSFASM="#undef ISC_PLATFORM_USEOSFASM"
ISC_PLATFORM_USEGCCASM="#undef ISC_PLATFORM_USEGCCASM"
ISC_PLATFORM_USESTDASM="#undef ISC_PLATFORM_USESTDASM"
......
......@@ -306,6 +306,12 @@
*/
@ISC_PLATFORM_HAVECMPXCHG@
/*
* If <stdatomic.h> is available on this architecture,
* ISC_PLATFORM_HAVESTDATOMIC will be defined.
*/
@ISC_PLATFORM_HAVESTDATOMIC@
/*
* Define if gcc ASM extension is available
*/
......
......@@ -18,6 +18,10 @@
#include <isc/types.h>
#include <isc/util.h>
#if defined(ISC_PLATFORM_HAVESTDATOMIC)
#include <stdatomic.h>
#endif
/*! \file isc/refcount.h
* \brief Implements a locked reference counter.
*
......@@ -86,17 +90,59 @@ ISC_LANG_BEGINDECLS
* Sample implementations
*/
#ifdef ISC_PLATFORM_USETHREADS
#ifdef ISC_PLATFORM_HAVEXADD
#if (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_INT_LOCK_FREE)) || defined(ISC_PLATFORM_HAVEXADD)
#define ISC_REFCOUNT_HAVEATOMIC 1
#if (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_INT_LOCK_FREE))
#define ISC_REFCOUNT_HAVESTDATOMIC 1
#endif
typedef struct isc_refcount {
#if defined(ISC_REFCOUNT_HAVESTDATOMIC)
atomic_int_fast32_t refs;
#else
isc_int32_t refs;
#endif
} isc_refcount_t;
#define isc_refcount_destroy(rp) REQUIRE((rp)->refs == 0)
#define isc_refcount_current(rp) ((unsigned int)((rp)->refs))
#if defined(ISC_REFCOUNT_HAVESTDATOMIC)
#define isc_refcount_increment0(rp, tp) \
do { \
unsigned int *_tmp = (unsigned int *)(tp); \
isc_int32_t prev; \
prev = atomic_fetch_add_explicit \
(&(rp)->refs, 1, memory_order_relaxed); \
if (_tmp != NULL) \
*_tmp = prev + 1; \
} while (0)
#define isc_refcount_increment(rp, tp) \
do { \
unsigned int *_tmp = (unsigned int *)(tp); \
isc_int32_t prev; \
prev = atomic_fetch_add_explicit \
(&(rp)->refs, 1, memory_order_relaxed); \
REQUIRE(prev > 0); \
if (_tmp != NULL) \
*_tmp = prev + 1; \
} while (0)
#define isc_refcount_decrement(rp, tp) \
do { \
unsigned int *_tmp = (unsigned int *)(tp); \
isc_int32_t prev; \
prev = atomic_fetch_sub_explicit \
(&(rp)->refs, 1, memory_order_relaxed); \
REQUIRE(prev > 0); \
if (_tmp != NULL) \
*_tmp = prev - 1; \
} while (0)
#else /* ISC_REFCOUNT_HAVESTDATOMIC */
#define isc_refcount_increment0(rp, tp) \
do { \
unsigned int *_tmp = (unsigned int *)(tp); \
......@@ -126,6 +172,8 @@ typedef struct isc_refcount {
*_tmp = prev - 1; \
} while (0)
#endif /* ISC_REFCOUNT_HAVESTDATOMIC */
#else /* ISC_PLATFORM_HAVEXADD */
typedef struct isc_refcount {
......@@ -176,7 +224,7 @@ typedef struct isc_refcount {
UNLOCK(&(rp)->lock); \
} while (0)
#endif /* ISC_PLATFORM_HAVEXADD */
#endif /* (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_INT_LOCK_FREE)) || defined(ISC_PLATFORM_HAVEXADD) */
#else /* ISC_PLATFORM_USETHREADS */
typedef struct isc_refcount {
......
......@@ -18,6 +18,10 @@
#include <isc/platform.h>
#include <isc/types.h>
#if defined(ISC_PLATFORM_HAVESTDATOMIC)
#include <stdatomic.h>
#endif
ISC_LANG_BEGINDECLS
typedef enum {
......@@ -27,8 +31,11 @@ typedef enum {
} isc_rwlocktype_t;
#ifdef ISC_PLATFORM_USETHREADS
#if defined(ISC_PLATFORM_HAVEXADD) && defined(ISC_PLATFORM_HAVECMPXCHG)
#if (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_INT_LOCK_FREE)) || (defined(ISC_PLATFORM_HAVEXADD) && defined(ISC_PLATFORM_HAVECMPXCHG))
#define ISC_RWLOCK_USEATOMIC 1
#if (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_INT_LOCK_FREE))
#define ISC_RWLOCK_USESTDATOMIC 1
#endif
#endif
struct isc_rwlock {
......@@ -37,7 +44,7 @@ struct isc_rwlock {
isc_mutex_t lock;
isc_int32_t spins;
#if defined(ISC_PLATFORM_HAVEXADD) && defined(ISC_PLATFORM_HAVECMPXCHG)
#if defined(ISC_RWLOCK_USEATOMIC)
/*
* When some atomic instructions with hardware assistance are
* available, rwlock will use those so that concurrent readers do not
......@@ -52,9 +59,15 @@ struct isc_rwlock {
*/
/* Read or modified atomically. */
#if defined(ISC_RWLOCK_USESTDATOMIC)
atomic_int_fast32_t write_requests;
atomic_int_fast32_t write_completions;
atomic_int_fast32_t cnt_and_flag;
#else
isc_int32_t write_requests;
isc_int32_t write_completions;
isc_int32_t cnt_and_flag;
#endif
/* Locked by lock. */
isc_condition_t readable;
......@@ -67,7 +80,7 @@ struct isc_rwlock {
/* Unlocked. */
unsigned int write_quota;
#else /* ISC_PLATFORM_HAVEXADD && ISC_PLATFORM_HAVECMPXCHG */
#else /* ISC_RWLOCK_USEATOMIC */
/*%< Locked by lock. */
isc_condition_t readable;
......@@ -89,7 +102,7 @@ struct isc_rwlock {
unsigned int read_quota;
unsigned int write_quota;
isc_rwlocktype_t original;
#endif /* ISC_PLATFORM_HAVEXADD && ISC_PLATFORM_HAVECMPXCHG */
#endif /* ISC_RWLOCK_USEATOMIC */
};
#else /* ISC_PLATFORM_USETHREADS */
struct isc_rwlock {
......
......@@ -21,7 +21,7 @@ isc_refcount_init(isc_refcount_t *ref, unsigned int n) {
REQUIRE(ref != NULL);
ref->refs = n;
#if defined(ISC_PLATFORM_USETHREADS) && !defined(ISC_PLATFORM_HAVEXADD)
#if defined(ISC_PLATFORM_USETHREADS) && !defined(ISC_REFCOUNT_HAVEATOMIC)
return (isc_mutex_init(&ref->lock));
#else
return (ISC_R_SUCCESS);
......
......@@ -39,7 +39,7 @@
#define RWLOCK_MAX_ADAPTIVE_COUNT 100
#endif
#if defined(ISC_PLATFORM_HAVEXADD) && defined(ISC_PLATFORM_HAVECMPXCHG)
#if defined(ISC_RWLOCK_USEATOMIC)
static isc_result_t
isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type);
#endif
......@@ -50,7 +50,7 @@ isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type);
static void
print_lock(const char *operation, isc_rwlock_t *rwl, isc_rwlocktype_t type) {
#if defined(ISC_PLATFORM_HAVEXADD) && defined(ISC_PLATFORM_HAVECMPXCHG)
#if defined(ISC_RWLOCK_USEATOMIC)
fprintf(stderr,
isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
ISC_MSG_PRINTLOCK2,
......@@ -105,7 +105,7 @@ isc_rwlock_init(isc_rwlock_t *rwl, unsigned int read_quota,
rwl->magic = 0;
rwl->spins = 0;
#if defined(ISC_PLATFORM_HAVEXADD) && defined(ISC_PLATFORM_HAVECMPXCHG)
#if defined(ISC_RWLOCK_USEATOMIC)
rwl->write_requests = 0;
rwl->write_completions = 0;
rwl->cnt_and_flag = 0;
......@@ -174,7 +174,7 @@ void
isc_rwlock_destroy(isc_rwlock_t *rwl) {
REQUIRE(VALID_RWLOCK(rwl));
#if defined(ISC_PLATFORM_HAVEXADD) && defined(ISC_PLATFORM_HAVECMPXCHG)
#if defined(ISC_RWLOCK_USEATOMIC)
REQUIRE(rwl->write_requests == rwl->write_completions &&
rwl->cnt_and_flag == 0 && rwl->readers_waiting == 0);
#else
......@@ -191,7 +191,7 @@ isc_rwlock_destroy(isc_rwlock_t *rwl) {
DESTROYLOCK(&rwl->lock);
}
#if defined(ISC_PLATFORM_HAVEXADD) && defined(ISC_PLATFORM_HAVECMPXCHG)
#if defined(ISC_RWLOCK_USEATOMIC)
/*
* When some architecture-dependent atomic operations are available,
......@@ -281,7 +281,13 @@ isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
UNLOCK(&rwl->lock);
}
#if defined(ISC_RWLOCK_USESTDATOMIC)
cntflag = atomic_fetch_add_explicit(&rwl->cnt_and_flag,
READER_INCR,
memory_order_relaxed);
#else
cntflag = isc_atomic_xadd(&rwl->cnt_and_flag, READER_INCR);
#endif
POST(cntflag);
while (1) {
if ((rwl->cnt_and_flag & WRITER_ACTIVE) == 0)
......@@ -331,7 +337,12 @@ isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
isc_int32_t prev_writer;
/* enter the waiting queue, and wait for our turn */
#if defined(ISC_RWLOCK_USESTDATOMIC)
prev_writer = atomic_fetch_add_explicit(&rwl->write_requests, 1,
memory_order_relaxed);
#else
prev_writer = isc_atomic_xadd(&rwl->write_requests, 1);
#endif
while (rwl->write_completions != prev_writer) {
LOCK(&rwl->lock);
if (rwl->write_completions != prev_writer) {
......@@ -344,9 +355,18 @@ isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
}
while (1) {
cntflag = isc_atomic_cmpxchg(&rwl->cnt_and_flag, 0,
WRITER_ACTIVE);
if (cntflag == 0)
#if defined(ISC_RWLOCK_USESTDATOMIC)
atomic_int_fast32_t cntflag2 = 0;
atomic_compare_exchange_strong_explicit
(&rwl->cnt_and_flag, &cntflag2, WRITER_ACTIVE,
memory_order_relaxed, memory_order_relaxed);
#else
isc_int32_t cntflag2;
cntflag2 = isc_atomic_cmpxchg(&rwl->cnt_and_flag, 0,
WRITER_ACTIVE);
#endif
if (cntflag2 == 0)
break;
/* Another active reader or writer is working. */
......@@ -409,14 +429,26 @@ isc_rwlock_trylock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
return (ISC_R_LOCKBUSY);
/* Otherwise, be ready for reading. */
#if defined(ISC_RWLOCK_USESTDATOMIC)
cntflag = atomic_fetch_add_explicit(&rwl->cnt_and_flag,
READER_INCR,
memory_order_relaxed);
#else
cntflag = isc_atomic_xadd(&rwl->cnt_and_flag, READER_INCR);
#endif
if ((cntflag & WRITER_ACTIVE) != 0) {
/*
* A writer is working. We lose, and cancel the read
* request.
*/
#if defined(ISC_RWLOCK_USESTDATOMIC)
cntflag = atomic_fetch_sub_explicit
(&rwl->cnt_and_flag, READER_INCR,
memory_order_relaxed);
#else
cntflag = isc_atomic_xadd(&rwl->cnt_and_flag,
-READER_INCR);
#endif
/*
* If no other readers are waiting and we've suspended
* new writers in this short period, wake them up.
......@@ -432,16 +464,29 @@ isc_rwlock_trylock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
}
} else {
/* Try locking without entering the waiting queue. */
#if defined(ISC_RWLOCK_USESTDATOMIC)
atomic_int_fast32_t zero = 0;
if (!atomic_compare_exchange_strong_explicit
(&rwl->cnt_and_flag, &zero, WRITER_ACTIVE,
memory_order_relaxed, memory_order_relaxed))
return (ISC_R_LOCKBUSY);
#else
cntflag = isc_atomic_cmpxchg(&rwl->cnt_and_flag, 0,
WRITER_ACTIVE);
if (cntflag != 0)
return (ISC_R_LOCKBUSY);
#endif
/*
* XXXJT: jump into the queue, possibly breaking the writer
* order.
*/
#if defined(ISC_RWLOCK_USESTDATOMIC)
atomic_fetch_sub_explicit(&rwl->write_completions, 1,
memory_order_relaxed);
#else
(void)isc_atomic_xadd(&rwl->write_completions, -1);
#endif
rwl->write_granted++;
}
......@@ -456,31 +501,60 @@ isc_rwlock_trylock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
isc_result_t
isc_rwlock_tryupgrade(isc_rwlock_t *rwl) {
isc_int32_t prevcnt;
REQUIRE(VALID_RWLOCK(rwl));
/* Try to acquire write access. */
prevcnt = isc_atomic_cmpxchg(&rwl->cnt_and_flag,
READER_INCR, WRITER_ACTIVE);
/*
* There must have been no writer, and there must have been at least
* one reader.
*/
INSIST((prevcnt & WRITER_ACTIVE) == 0 &&
(prevcnt & ~WRITER_ACTIVE) != 0);
#if defined(ISC_RWLOCK_USESTDATOMIC)
{
atomic_int_fast32_t reader_incr = READER_INCR;
if (prevcnt == READER_INCR) {
/* Try to acquire write access. */
atomic_compare_exchange_strong_explicit
(&rwl->cnt_and_flag, &reader_incr, WRITER_ACTIVE,
memory_order_relaxed, memory_order_relaxed);
/*
* We are the only reader and have been upgraded.
* Now jump into the head of the writer waiting queue.
* There must have been no writer, and there must have
* been at least one reader.
*/
(void)isc_atomic_xadd(&rwl->write_completions, -1);
} else
return (ISC_R_LOCKBUSY);
INSIST((reader_incr & WRITER_ACTIVE) == 0 &&
(reader_incr & ~WRITER_ACTIVE) != 0);
return (ISC_R_SUCCESS);
if (reader_incr == READER_INCR) {
/*
* We are the only reader and have been upgraded.
* Now jump into the head of the writer waiting queue.
*/
atomic_fetch_sub_explicit(&rwl->write_completions, 1,
memory_order_relaxed);
} else
return (ISC_R_LOCKBUSY);
}
#else
{
isc_int32_t prevcnt;
/* Try to acquire write access. */
prevcnt = isc_atomic_cmpxchg(&rwl->cnt_and_flag,
READER_INCR, WRITER_ACTIVE);
/*
* There must have been no writer, and there must have
* been at least one reader.
*/
INSIST((prevcnt & WRITER_ACTIVE) == 0 &&
(prevcnt & ~WRITER_ACTIVE) != 0);
if (prevcnt == READER_INCR) {
/*
* We are the only reader and have been upgraded.
* Now jump into the head of the writer waiting queue.
*/
(void)isc_atomic_xadd(&rwl->write_completions, -1);
} else
return (ISC_R_LOCKBUSY);
}
#endif
return (ISC_R_SUCCESS);
}
void
......@@ -489,14 +563,33 @@ isc_rwlock_downgrade(isc_rwlock_t *rwl) {
REQUIRE(VALID_RWLOCK(rwl));
/* Become an active reader. */
prev_readers = isc_atomic_xadd(&rwl->cnt_and_flag, READER_INCR);
/* We must have been a writer. */
INSIST((prev_readers & WRITER_ACTIVE) != 0);
#if defined(ISC_RWLOCK_USESTDATOMIC)
{
/* Become an active reader. */
prev_readers = atomic_fetch_add_explicit(&rwl->cnt_and_flag,
READER_INCR,
memory_order_relaxed);
/* We must have been a writer. */
INSIST((prev_readers & WRITER_ACTIVE) != 0);
/* Complete write */
atomic_fetch_sub_explicit(&rwl->cnt_and_flag, WRITER_ACTIVE,
memory_order_relaxed);
atomic_fetch_add_explicit(&rwl->write_completions, 1,
memory_order_relaxed);
}
#else
{
/* Become an active reader. */
prev_readers = isc_atomic_xadd(&rwl->cnt_and_flag, READER_INCR);
/* We must have been a writer. */
INSIST((prev_readers & WRITER_ACTIVE) != 0);
/* Complete write */
(void)isc_atomic_xadd(&rwl->cnt_and_flag, -WRITER_ACTIVE);
(void)isc_atomic_xadd(&rwl->write_completions, 1);
/* Complete write */
(void)isc_atomic_xadd(&rwl->cnt_and_flag, -WRITER_ACTIVE);
(void)isc_atomic_xadd(&rwl->write_completions, 1);
}
#endif
/* Resume other readers */
LOCK(&rwl->lock);
......@@ -517,8 +610,13 @@ isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
#endif
if (type == isc_rwlocktype_read) {
#if defined(ISC_RWLOCK_USESTDATOMIC)
prev_cnt = atomic_fetch_sub_explicit(&rwl->cnt_and_flag,
READER_INCR,
memory_order_relaxed);
#else
prev_cnt = isc_atomic_xadd(&rwl->cnt_and_flag, -READER_INCR);
#endif
/*
* If we're the last reader and any writers are waiting, wake
* them up. We need to wake up all of them to ensure the
......@@ -537,8 +635,15 @@ isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
* Reset the flag, and (implicitly) tell other writers
* we are done.
*/
#if defined(ISC_RWLOCK_USESTDATOMIC)
atomic_fetch_sub_explicit(&rwl->cnt_and_flag, WRITER_ACTIVE,
memory_order_relaxed);
atomic_fetch_add_explicit(&rwl->write_completions, 1,
memory_order_relaxed);
#else
(void)isc_atomic_xadd(&rwl->cnt_and_flag, -WRITER_ACTIVE);
(void)isc_atomic_xadd(&rwl->write_completions, 1);
#endif
if (rwl->write_granted >= rwl->write_quota ||
rwl->write_requests == rwl->write_completions ||
......@@ -576,7 +681,7 @@ isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
return (ISC_R_SUCCESS);
}
#else /* ISC_PLATFORM_HAVEXADD && ISC_PLATFORM_HAVECMPXCHG */
#else /* ISC_RWLOCK_USEATOMIC */
static isc_result_t
doit(isc_rwlock_t *rwl, isc_rwlocktype_t type, isc_boolean_t nonblock) {
......@@ -782,7 +887,7 @@ isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
return (ISC_R_SUCCESS);
}
#endif /* ISC_PLATFORM_HAVEXADD && ISC_PLATFORM_HAVECMPXCHG */
#endif /* ISC_RWLOCK_USEATOMIC */
#else /* ISC_PLATFORM_USETHREADS */
isc_result_t
......
......@@ -24,6 +24,10 @@
#include <isc/stats.h>
#include <isc/util.h>
#if defined(ISC_PLATFORM_HAVESTDATOMIC)
#include <stdatomic.h>
#endif
#define ISC_STATS_MAGIC ISC_MAGIC('S', 't', 'a', 't')
#define ISC_STATS_VALID(x) ISC_MAGIC_VALID(x, ISC_STATS_MAGIC)
......@@ -32,8 +36,12 @@
* increment and store operations, just to make
* the later macros simpler
*/
#if defined(ISC_PLATFORM_HAVEXADDQ) && defined(ISC_PLATFORM_HAVEATOMICSTOREQ)
#if (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_LONG_LOCK_FREE)) || \
(defined(ISC_PLATFORM_HAVEXADDQ) && defined(ISC_PLATFORM_HAVEATOMICSTOREQ))
#define ISC_STATS_HAVEATOMICQ 1
#if (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_LONG_LOCK_FREE))
#define ISC_STATS_HAVESTDATOMICQ 1
#endif
#else
#define ISC_STATS_HAVEATOMICQ 0
#endif
......@@ -61,20 +69,32 @@
* Otherwise, just rely on standard 64-bit data types
* and operations
*/
#if !ISC_STATS_HAVEATOMICQ && defined(ISC_PLATFORM_HAVEXADD)
#if !ISC_STATS_HAVEATOMICQ && ((defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_INT_LOCK_FREE)) || defined(ISC_PLATFORM_HAVEXADD))
#define ISC_STATS_USEMULTIFIELDS 1
#if (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_INT_LOCK_FREE))
#define ISC_STATS_HAVESTDATOMIC 1
#endif
#else
#define ISC_STATS_USEMULTIFIELDS 0
#endif
#if ISC_STATS_USEMULTIFIELDS
typedef struct {
#if defined(ISC_STATS_HAVESTDATOMIC)
atomic_int_fast32_t hi;
atomic_int_fast32_t lo;
#else
isc_uint32_t hi;
isc_uint32_t lo;
#endif
} isc_stat_t;
#else
#if defined(ISC_STATS_HAVESTDATOMICQ)
typedef atomic_int_fast64_t isc_stat_t;
#else
typedef isc_uint64_t isc_stat_t;
#endif
#endif
struct isc_stats {
/*% Unlocked */
......@@ -232,7 +252,12 @@ incrementcounter(isc_stats_t *stats, int counter) {
#endif
#if ISC_STATS_USEMULTIFIELDS
#if defined(ISC_STATS_HAVESTDATOMIC)
prev = atomic_fetch_add_explicit(&stats->counters[counter].lo, 1,
memory_order_relaxed);
#else
prev = isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].lo, 1);
#endif
/*
* If the lower 32-bit field overflows, increment the higher field.
* Note that it's *theoretically* possible that the lower field
......@@ -241,11 +266,22 @@ incrementcounter(isc_stats_t *stats, int counter) {
* isc_stats_copy() is called where the whole process is protected
* by the write (exclusive) lock.
*/
if (prev == (isc_int32_t)0xffffffff)
if (prev == (isc_int32_t)0xffffffff) {
#if defined(ISC_STATS_HAVESTDATOMIC)
atomic_fetch_add_explicit(&stats->counters[counter].hi, 1,
memory_order_relaxed);
#else
isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].hi, 1);
#endif
}
#elif ISC_STATS_HAVEATOMICQ
UNUSED(prev);
#if defined(ISC_STATS_HAVESTDATOMICQ)
atomic_fetch_add_explicit(&stats->counters[counter], 1,
memory_order_relaxed);
#else
isc_atomic_xaddq((isc_int64_t *)&stats->counters[counter], 1);
#endif
#else
UNUSED(prev);
stats->counters[counter]++;
......@@ -265,13 +301,29 @@ decrementcounter(isc_stats_t *stats, int counter) {
#endif
#if ISC_STATS_USEMULTIFIELDS
#if defined(ISC_STATS_HAVESTDATOMIC)