Commit e9e55cbd authored by Ondřej Surý's avatar Ondřej Surý
Browse files

Remove isc_atomic usage from rwlock.c and stats.c

parent e119de41
......@@ -9,9 +9,7 @@
* information regarding copyright ownership.
*/
#ifndef ISC_REFCOUNT_H
#define ISC_REFCOUNT_H 1
#pragma once
#include <inttypes.h>
......@@ -23,10 +21,6 @@
#include <isc/platform.h>
#include <isc/types.h>
#if defined(ISC_PLATFORM_HAVESTDATOMIC)
#include <stdatomic.h>
#endif
/*! \file isc/refcount.h
* \brief Implements a locked reference counter.
*
......@@ -94,33 +88,26 @@ ISC_LANG_BEGINDECLS
/*
* Sample implementations
*/
#if (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_INT_LOCK_FREE)) || defined(ISC_PLATFORM_HAVEXADD)
#define ISC_REFCOUNT_HAVEATOMIC 1
#if (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_INT_LOCK_FREE))
#define ISC_REFCOUNT_HAVESTDATOMIC 1
#endif
typedef struct isc_refcount {
#if defined(ISC_REFCOUNT_HAVESTDATOMIC)
atomic_int_fast32_t refs;
#else
int32_t refs;
#endif
} isc_refcount_t;
#if defined(ISC_REFCOUNT_HAVESTDATOMIC)
#define isc_refcount_init(rp, n) \
atomic_init(&(rp)->refs, n)
#define isc_refcount_current(rp) \
atomic_load_explicit(&(rp)->refs, memory_order_relaxed)
#define isc_refcount_current(rp) \
((unsigned int)(atomic_load_explicit(&(rp)->refs, \
memory_order_relaxed)))
#define isc_refcount_destroy(rp) ISC_REQUIRE(isc_refcount_current(rp) == 0)
#define isc_refcount_destroy(rp) \
ISC_REQUIRE(isc_refcount_current(rp) == 0)
#define isc_refcount_increment0(rp, tp) \
do { \
unsigned int *_tmp = (unsigned int *)(tp); \
int32_t prev; \
int32_t prev; \
prev = atomic_fetch_add_explicit \
(&(rp)->refs, 1, memory_order_relaxed); \
(&(rp)->refs, 1, memory_order_relaxed); \
if (_tmp != NULL) \
*_tmp = prev + 1; \
} while (0)
......@@ -128,9 +115,9 @@ typedef struct isc_refcount {
#define isc_refcount_increment(rp, tp) \
do { \
unsigned int *_tmp = (unsigned int *)(tp); \
int32_t prev; \
int32_t prev; \
prev = atomic_fetch_add_explicit \
(&(rp)->refs, 1, memory_order_relaxed); \
(&(rp)->refs, 1, memory_order_relaxed); \
ISC_REQUIRE(prev > 0); \
if (_tmp != NULL) \
*_tmp = prev + 1; \
......@@ -139,7 +126,7 @@ typedef struct isc_refcount {
#define isc_refcount_decrement(rp, tp) \
do { \
unsigned int *_tmp = (unsigned int *)(tp); \
int32_t prev; \
int32_t prev; \
prev = atomic_fetch_sub_explicit \
(&(rp)->refs, 1, memory_order_relaxed); \
ISC_REQUIRE(prev > 0); \
......@@ -147,115 +134,4 @@ typedef struct isc_refcount {
*_tmp = prev - 1; \
} while (0)
#else /* ISC_REFCOUNT_HAVESTDATOMIC */
#define isc_refcount_current(rp) \
((unsigned int)(isc_atomic_xadd(&(rp)->refs, 0)))
#define isc_refcount_destroy(rp) ISC_REQUIRE(isc_refcount_current(rp) == 0)
#define isc_refcount_increment0(rp, tp) \
do { \
unsigned int *_tmp = (unsigned int *)(tp); \
int32_t prev; \
prev = isc_atomic_xadd(&(rp)->refs, 1); \
if (_tmp != NULL) \
*_tmp = prev + 1; \
} while (0)
#define isc_refcount_increment(rp, tp) \
do { \
unsigned int *_tmp = (unsigned int *)(tp); \
int32_t prev; \
prev = isc_atomic_xadd(&(rp)->refs, 1); \
ISC_REQUIRE(prev > 0); \
if (_tmp != NULL) \
*_tmp = prev + 1; \
} while (0)
#define isc_refcount_decrement(rp, tp) \
do { \
unsigned int *_tmp = (unsigned int *)(tp); \
int32_t prev; \
prev = isc_atomic_xadd(&(rp)->refs, -1); \
ISC_REQUIRE(prev > 0); \
if (_tmp != NULL) \
*_tmp = prev - 1; \
} while (0)
#endif /* ISC_REFCOUNT_HAVESTDATOMIC */
#else /* ISC_PLATFORM_HAVEXADD */
typedef struct isc_refcount {
int refs;
isc_mutex_t lock;
} isc_refcount_t;
/*% Destroys a reference counter. */
#define isc_refcount_destroy(rp) \
do { \
isc_result_t _result; \
ISC_REQUIRE((rp)->refs == 0); \
_result = isc_mutex_destroy(&(rp)->lock); \
ISC_ERROR_RUNTIMECHECK(_result == ISC_R_SUCCESS); \
} while (0)
#define isc_refcount_current(rp) ((unsigned int)((rp)->refs))
/*%
* Increments the reference count, returning the new value in
* 'tp' if it's not NULL.
*/
#define isc_refcount_increment0(rp, tp) \
do { \
isc_result_t _result; \
unsigned int *_tmp = (unsigned int *)(tp); \
_result = isc_mutex_lock(&(rp)->lock); \
ISC_ERROR_RUNTIMECHECK(_result == ISC_R_SUCCESS); \
++((rp)->refs); \
if (_tmp != NULL) \
*_tmp = ((rp)->refs); \
_result = isc_mutex_unlock(&(rp)->lock); \
ISC_ERROR_RUNTIMECHECK(_result == ISC_R_SUCCESS); \
} while (0)
#define isc_refcount_increment(rp, tp) \
do { \
isc_result_t _result; \
unsigned int *_tmp = (unsigned int *)(tp); \
_result = isc_mutex_lock(&(rp)->lock); \
ISC_ERROR_RUNTIMECHECK(_result == ISC_R_SUCCESS); \
ISC_REQUIRE((rp)->refs > 0); \
++((rp)->refs); \
if (_tmp != NULL) \
*_tmp = ((rp)->refs); \
_result = isc_mutex_unlock(&(rp)->lock); \
ISC_ERROR_RUNTIMECHECK(_result == ISC_R_SUCCESS); \
} while (0)
/*%
* Decrements the reference count, returning the new value in 'tp'
* if it's not NULL.
*/
#define isc_refcount_decrement(rp, tp) \
do { \
isc_result_t _result; \
unsigned int *_tmp = (unsigned int *)(tp); \
_result = isc_mutex_lock(&(rp)->lock); \
ISC_ERROR_RUNTIMECHECK(_result == ISC_R_SUCCESS); \
ISC_REQUIRE((rp)->refs > 0); \
--((rp)->refs); \
if (_tmp != NULL) \
*_tmp = ((rp)->refs); \
_result = isc_mutex_unlock(&(rp)->lock); \
ISC_ERROR_RUNTIMECHECK(_result == ISC_R_SUCCESS); \
} while (0)
#endif /* (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_INT_LOCK_FREE)) || defined(ISC_PLATFORM_HAVEXADD) */
isc_result_t
isc_refcount_init(isc_refcount_t *ref, unsigned int n);
ISC_LANG_ENDDECLS
#endif /* ISC_REFCOUNT_H */
......@@ -17,15 +17,12 @@
/*! \file isc/rwlock.h */
#include <isc/atomic.h>
#include <isc/condition.h>
#include <isc/lang.h>
#include <isc/platform.h>
#include <isc/types.h>
#if defined(ISC_PLATFORM_HAVESTDATOMIC)
#include <stdatomic.h>
#endif
ISC_LANG_BEGINDECLS
typedef enum {
......@@ -34,20 +31,12 @@ typedef enum {
isc_rwlocktype_write
} isc_rwlocktype_t;
#if (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_INT_LOCK_FREE)) || (defined(ISC_PLATFORM_HAVEXADD) && defined(ISC_PLATFORM_HAVECMPXCHG))
#define ISC_RWLOCK_USEATOMIC 1
#if (defined(ISC_PLATFORM_HAVESTDATOMIC) && defined(ATOMIC_INT_LOCK_FREE))
#define ISC_RWLOCK_USESTDATOMIC 1
#endif
#endif
struct isc_rwlock {
/* Unlocked. */
unsigned int magic;
isc_mutex_t lock;
int32_t spins;
#if defined(ISC_RWLOCK_USEATOMIC)
/*
* When some atomic instructions with hardware assistance are
* available, rwlock will use those so that concurrent readers do not
......@@ -62,15 +51,9 @@ struct isc_rwlock {
*/
/* Read or modified atomically. */
#if defined(ISC_RWLOCK_USESTDATOMIC)
atomic_int_fast32_t write_requests;
atomic_int_fast32_t write_completions;
atomic_int_fast32_t cnt_and_flag;
#else
int32_t write_requests;
int32_t write_completions;
int32_t cnt_and_flag;
#endif
/* Locked by lock. */
isc_condition_t readable;
......@@ -83,29 +66,6 @@ struct isc_rwlock {
/* Unlocked. */
unsigned int write_quota;
#else /* ISC_RWLOCK_USEATOMIC */
/*%< Locked by lock. */
isc_condition_t readable;
isc_condition_t writeable;
isc_rwlocktype_t type;
/*% The number of threads that have the lock. */
unsigned int active;
/*%
* The number of lock grants made since the lock was last switched
* from reading to writing or vice versa; used in determining
* when the quota is reached and it is time to switch.
*/
unsigned int granted;
unsigned int readers_waiting;
unsigned int writers_waiting;
unsigned int read_quota;
unsigned int write_quota;
isc_rwlocktype_t original;
#endif /* ISC_RWLOCK_USEATOMIC */
};
isc_result_t
......
......@@ -41,10 +41,8 @@
#define RWLOCK_MAX_ADAPTIVE_COUNT 100
#endif
#if defined(ISC_RWLOCK_USEATOMIC)
static isc_result_t
isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type);
#endif
#ifdef ISC_RWLOCK_TRACE
#include <stdio.h> /* Required for fprintf/stderr. */
......@@ -52,7 +50,6 @@ isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type);
static void
print_lock(const char *operation, isc_rwlock_t *rwl, isc_rwlocktype_t type) {
#if defined(ISC_RWLOCK_USEATOMIC)
fprintf(stderr,
isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
ISC_MSG_PRINTLOCK2,
......@@ -69,26 +66,6 @@ print_lock(const char *operation, isc_rwlock_t *rwl, isc_rwlocktype_t type) {
rwl->write_requests, rwl->write_completions,
rwl->cnt_and_flag, rwl->readers_waiting,
rwl->write_granted, rwl->write_quota);
#else
fprintf(stderr,
isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
ISC_MSG_PRINTLOCK,
"rwlock %p thread %lu %s(%s): %s, %u active, "
"%u granted, %u rwaiting, %u wwaiting\n"),
rwl, isc_thread_self(), operation,
(type == isc_rwlocktype_read ?
isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
ISC_MSG_READ, "read") :
isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
ISC_MSG_WRITE, "write")),
(rwl->type == isc_rwlocktype_read ?
isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
ISC_MSG_READING, "reading") :
isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
ISC_MSG_WRITING, "writing")),
rwl->active, rwl->granted,
rwl->readers_waiting, rwl->writers_waiting);
#endif
}
#endif /* ISC_RWLOCK_TRACE */
......@@ -107,7 +84,6 @@ isc_rwlock_init(isc_rwlock_t *rwl, unsigned int read_quota,
rwl->magic = 0;
rwl->spins = 0;
#if defined(ISC_RWLOCK_USEATOMIC)
rwl->write_requests = 0;
rwl->write_completions = 0;
rwl->cnt_and_flag = 0;
......@@ -120,20 +96,6 @@ isc_rwlock_init(isc_rwlock_t *rwl, unsigned int read_quota,
if (write_quota == 0)
write_quota = RWLOCK_DEFAULT_WRITE_QUOTA;
rwl->write_quota = write_quota;
#else
rwl->type = isc_rwlocktype_read;
rwl->original = isc_rwlocktype_none;
rwl->active = 0;
rwl->granted = 0;
rwl->readers_waiting = 0;
rwl->writers_waiting = 0;
if (read_quota == 0)
read_quota = RWLOCK_DEFAULT_READ_QUOTA;
rwl->read_quota = read_quota;
if (write_quota == 0)
write_quota = RWLOCK_DEFAULT_WRITE_QUOTA;
rwl->write_quota = write_quota;
#endif
result = isc_mutex_init(&rwl->lock);
if (result != ISC_R_SUCCESS)
......@@ -176,16 +138,8 @@ void
isc_rwlock_destroy(isc_rwlock_t *rwl) {
REQUIRE(VALID_RWLOCK(rwl));
#if defined(ISC_RWLOCK_USEATOMIC)
REQUIRE(rwl->write_requests == rwl->write_completions &&
rwl->cnt_and_flag == 0 && rwl->readers_waiting == 0);
#else
LOCK(&rwl->lock);
REQUIRE(rwl->active == 0 &&
rwl->readers_waiting == 0 &&
rwl->writers_waiting == 0);
UNLOCK(&rwl->lock);
#endif
rwl->magic = 0;
(void)isc_condition_destroy(&rwl->readable);
......@@ -193,8 +147,6 @@ isc_rwlock_destroy(isc_rwlock_t *rwl) {
DESTROYLOCK(&rwl->lock);
}
#if defined(ISC_RWLOCK_USEATOMIC)
/*
* When some architecture-dependent atomic operations are available,
* rwlock can be more efficient than the generic algorithm defined below.
......@@ -283,13 +235,9 @@ isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
UNLOCK(&rwl->lock);
}
#if defined(ISC_RWLOCK_USESTDATOMIC)
cntflag = atomic_fetch_add_explicit(&rwl->cnt_and_flag,
READER_INCR,
memory_order_relaxed);
#else
cntflag = isc_atomic_xadd(&rwl->cnt_and_flag, READER_INCR);
#endif
POST(cntflag);
while (1) {
if ((rwl->cnt_and_flag & WRITER_ACTIVE) == 0)
......@@ -339,12 +287,8 @@ isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
int32_t prev_writer;
/* enter the waiting queue, and wait for our turn */
#if defined(ISC_RWLOCK_USESTDATOMIC)
prev_writer = atomic_fetch_add_explicit(&rwl->write_requests, 1,
memory_order_relaxed);
#else
prev_writer = isc_atomic_xadd(&rwl->write_requests, 1);
#endif
while (rwl->write_completions != prev_writer) {
LOCK(&rwl->lock);
if (rwl->write_completions != prev_writer) {
......@@ -357,16 +301,10 @@ isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
}
while (1) {
#if defined(ISC_RWLOCK_USESTDATOMIC)
int_fast32_t cntflag2 = 0;
atomic_compare_exchange_strong_explicit
(&rwl->cnt_and_flag, &cntflag2, WRITER_ACTIVE,
memory_order_relaxed, memory_order_relaxed);
#else
int32_t cntflag2;
cntflag2 = isc_atomic_cmpxchg(&rwl->cnt_and_flag, 0,
WRITER_ACTIVE);
#endif
if (cntflag2 == 0)
break;
......@@ -431,26 +369,17 @@ isc_rwlock_trylock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
return (ISC_R_LOCKBUSY);
/* Otherwise, be ready for reading. */
#if defined(ISC_RWLOCK_USESTDATOMIC)
cntflag = atomic_fetch_add_explicit(&rwl->cnt_and_flag,
READER_INCR,
memory_order_relaxed);
#else
cntflag = isc_atomic_xadd(&rwl->cnt_and_flag, READER_INCR);
#endif
if ((cntflag & WRITER_ACTIVE) != 0) {
/*
* A writer is working. We lose, and cancel the read
* request.
*/
#if defined(ISC_RWLOCK_USESTDATOMIC)
cntflag = atomic_fetch_sub_explicit
(&rwl->cnt_and_flag, READER_INCR,
memory_order_relaxed);
#else
cntflag = isc_atomic_xadd(&rwl->cnt_and_flag,
-READER_INCR);
#endif
/*
* If no other readers are waiting and we've suspended
* new writers in this short period, wake them up.
......@@ -466,29 +395,18 @@ isc_rwlock_trylock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
}
} else {
/* Try locking without entering the waiting queue. */
#if defined(ISC_RWLOCK_USESTDATOMIC)
int_fast32_t zero = 0;
if (!atomic_compare_exchange_strong_explicit
(&rwl->cnt_and_flag, &zero, WRITER_ACTIVE,
memory_order_relaxed, memory_order_relaxed))
return (ISC_R_LOCKBUSY);
#else
cntflag = isc_atomic_cmpxchg(&rwl->cnt_and_flag, 0,
WRITER_ACTIVE);
if (cntflag != 0)
return (ISC_R_LOCKBUSY);
#endif
/*
* XXXJT: jump into the queue, possibly breaking the writer
* order.
*/
#if defined(ISC_RWLOCK_USESTDATOMIC)
atomic_fetch_sub_explicit(&rwl->write_completions, 1,
memory_order_relaxed);
#else
(void)isc_atomic_xadd(&rwl->write_completions, -1);
#endif
rwl->write_granted++;
}
......@@ -505,7 +423,6 @@ isc_result_t
isc_rwlock_tryupgrade(isc_rwlock_t *rwl) {
REQUIRE(VALID_RWLOCK(rwl));
#if defined(ISC_RWLOCK_USESTDATOMIC)
{
int_fast32_t reader_incr = READER_INCR;
......@@ -531,30 +448,6 @@ isc_rwlock_tryupgrade(isc_rwlock_t *rwl) {
return (ISC_R_LOCKBUSY);
}
#else
{
int32_t prevcnt;
/* Try to acquire write access. */
prevcnt = isc_atomic_cmpxchg(&rwl->cnt_and_flag,
READER_INCR, WRITER_ACTIVE);
/*
* There must have been no writer, and there must have
* been at least one reader.
*/
INSIST((prevcnt & WRITER_ACTIVE) == 0 &&
(prevcnt & ~WRITER_ACTIVE) != 0);
if (prevcnt == READER_INCR) {
/*
* We are the only reader and have been upgraded.
* Now jump into the head of the writer waiting queue.
*/
(void)isc_atomic_xadd(&rwl->write_completions, -1);
} else
return (ISC_R_LOCKBUSY);
}
#endif
return (ISC_R_SUCCESS);
}
......@@ -565,7 +458,6 @@ isc_rwlock_downgrade(isc_rwlock_t *rwl) {
REQUIRE(VALID_RWLOCK(rwl));
#if defined(ISC_RWLOCK_USESTDATOMIC)
{
/* Become an active reader. */
prev_readers = atomic_fetch_add_explicit(&rwl->cnt_and_flag,
......@@ -580,18 +472,6 @@ isc_rwlock_downgrade(isc_rwlock_t *rwl) {
atomic_fetch_add_explicit(&rwl->write_completions, 1,
memory_order_relaxed);
}
#else
{
/* Become an active reader. */
prev_readers = isc_atomic_xadd(&rwl->cnt_and_flag, READER_INCR);
/* We must have been a writer. */
INSIST((prev_readers & WRITER_ACTIVE) != 0);
/* Complete write */
(void)isc_atomic_xadd(&rwl->cnt_and_flag, -WRITER_ACTIVE);
(void)isc_atomic_xadd(&rwl->write_completions, 1);
}
#endif
/* Resume other readers */
LOCK(&rwl->lock);
......@@ -612,13 +492,9 @@ isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
#endif
if (type == isc_rwlocktype_read) {
#if defined(ISC_RWLOCK_USESTDATOMIC)
prev_cnt = atomic_fetch_sub_explicit(&rwl->cnt_and_flag,
READER_INCR,
memory_order_relaxed);
#else
prev_cnt = isc_atomic_xadd(&rwl->cnt_and_flag, -READER_INCR);
#endif
/*
* If we're the last reader and any writers are waiting, wake
* them up. We need to wake up all of them to ensure the
......@@ -637,15 +513,10 @@ isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
* Reset the flag, and (implicitly) tell other writers
* we are done.
*/
#if defined(ISC_RWLOCK_USESTDATOMIC)
atomic_fetch_sub_explicit(&rwl->cnt_and_flag, WRITER_ACTIVE,
memory_order_relaxed);
atomic_fetch_add_explicit(&rwl->write_completions, 1,
memory_order_relaxed);
#else
(void)isc_atomic_xadd(&rwl->cnt_and_flag, -WRITER_ACTIVE);
(void)isc_atomic_xadd(&rwl->write_completions, 1);
#endif
if (rwl->write_granted >= rwl->write_quota ||
rwl->write_requests == rwl->write_completions ||
......@@ -682,211 +553,3 @@ isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
return (ISC_R_SUCCESS);
}
#else /* ISC_RWLOCK_USEATOMIC */
static isc_result_t
doit(isc_rwlock_t *rwl, isc_rwlocktype_t type, bool nonblock) {
bool skip = false;
bool done = false;
isc_result_t result = ISC_R_SUCCESS;
REQUIRE(VALID_RWLOCK(rwl));
LOCK(&rwl->lock);
#ifdef ISC_RWLOCK_TRACE
print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
ISC_MSG_PRELOCK, "prelock"), rwl, type);
#endif
if (type == isc_rwlocktype_read) {
if (rwl->readers_waiting != 0)
skip = true;
while (!done) {
if (!skip &&
((rwl->active == 0 ||
(rwl->type == isc_rwlocktype_read &&
(rwl->writers_waiting == 0 ||
rwl->granted < rwl->read_quota)))))
{
rwl->type = isc_rwlocktype_read;
rwl->active++;
rwl->granted++;
done = true;
} else if (nonblock) {
result = ISC_R_LOCKBUSY;
done = true;
} else {
skip = false;
rwl->readers_waiting++;
WAIT(&rwl->readable, &rwl->lock);
rwl->readers_waiting--;
}
}
} else {
if (rwl->writers_waiting != 0)
skip = true;
while (!done) {
if (!skip && rwl->active == 0) {
rwl->type = isc_rwlocktype_write;
rwl->active = 1;
rwl->granted++;
done = true;
} else if (nonblock) {
result = ISC_R_LOCKBUSY;
done = true;
} else {
skip = false;
rwl->writers_waiting++;
WAIT(&rwl->writeable, &rwl->lock);
rwl->writers_waiting--;
}
}
}
#ifdef ISC_RWLOCK_TRACE
print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK,
ISC_MSG_POSTLOCK, "postlock"), rwl, type);
#endif
UNLOCK(&rwl->lock);
return (result);