Commit 1da0994e authored by Ondřej Surý's avatar Ondřej Surý Committed by Ondřej Surý

Make isc_rwlock.c thread-safe

The ThreadSanitizer found several possible data races in our rwlock
implementation.  This commit changes all the unprotected variables to atomic and
also changes the explicit memory ordering (atomic_<foo>_explicit(..., <order>)
functions to use our convenience macros (atomic_<foo>_<order>).
parent 50058a4b
......@@ -35,7 +35,7 @@ struct isc_rwlock {
/* Unlocked. */
unsigned int magic;
isc_mutex_t lock;
int32_t spins;
atomic_int_fast32_t spins;
/*
* When some atomic instructions with hardware assistance are
......@@ -61,7 +61,7 @@ struct isc_rwlock {
unsigned int readers_waiting;
/* Locked by rwlock itself. */
unsigned int write_granted;
atomic_uint_fast32_t write_granted;
/* Unlocked. */
unsigned int write_quota;
......
......@@ -83,11 +83,11 @@ print_lock(const char *operation, isc_rwlock_t *rwl, isc_rwlocktype_t type) {
"write_granted=%u, write_quota=%u\n",
rwl, isc_thread_self(), operation,
(type == isc_rwlocktype_read ? "read" : "write"),
atomic_load_explicit(&rwl->write_requests, memory_order_relaxed),
atomic_load_explicit(&rwl->write_completions, memory_order_relaxed),
atomic_load_explicit(&rwl->cnt_and_flag, memory_order_relaxed),
atomic_load_relaxed(&rwl->write_requests),
atomic_load_relaxed(&rwl->write_completions),
atomic_load_relaxed(&rwl->cnt_and_flag),
rwl->readers_waiting,
rwl->write_granted, rwl->write_quota);
atomic_load_relaxed(&rwl->write_granted), rwl->write_quota);
}
#endif /* ISC_RWLOCK_TRACE */
......@@ -103,12 +103,12 @@ isc_rwlock_init(isc_rwlock_t *rwl, unsigned int read_quota,
*/
rwl->magic = 0;
rwl->spins = 0;
atomic_init(&rwl->spins, 0);
atomic_init(&rwl->write_requests, 0);
atomic_init(&rwl->write_completions, 0);
atomic_init(&rwl->cnt_and_flag, 0);
rwl->readers_waiting = 0;
rwl->write_granted = 0;
atomic_init(&rwl->write_granted, 0);
if (read_quota != 0) {
UNEXPECTED_ERROR(__FILE__, __LINE__,
"read quota is not supported");
......@@ -131,9 +131,9 @@ void
isc_rwlock_destroy(isc_rwlock_t *rwl) {
REQUIRE(VALID_RWLOCK(rwl));
REQUIRE(atomic_load_explicit(&rwl->write_requests, memory_order_relaxed) ==
atomic_load_explicit(&rwl->write_completions, memory_order_relaxed) &&
atomic_load_explicit(&rwl->cnt_and_flag, memory_order_relaxed) == 0 && rwl->readers_waiting == 0);
REQUIRE(atomic_load_relaxed(&rwl->write_requests) ==
atomic_load_relaxed(&rwl->write_completions) &&
atomic_load_relaxed(&rwl->cnt_and_flag) == 0 && rwl->readers_waiting == 0);
rwl->magic = 0;
(void)isc_condition_destroy(&rwl->readable);
......@@ -217,13 +217,13 @@ isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
#endif
if (type == isc_rwlocktype_read) {
if (atomic_load_explicit(&rwl->write_requests, memory_order_relaxed) !=
atomic_load_explicit(&rwl->write_completions, memory_order_relaxed))
if (atomic_load_relaxed(&rwl->write_requests) !=
atomic_load_relaxed(&rwl->write_completions))
{
/* there is a waiting or active writer */
LOCK(&rwl->lock);
if (atomic_load_explicit(&rwl->write_requests, memory_order_relaxed) !=
atomic_load_explicit(&rwl->write_completions, memory_order_relaxed)) {
if (atomic_load_relaxed(&rwl->write_requests) !=
atomic_load_relaxed(&rwl->write_completions)) {
rwl->readers_waiting++;
WAIT(&rwl->readable, &rwl->lock);
rwl->readers_waiting--;
......@@ -231,18 +231,22 @@ isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
UNLOCK(&rwl->lock);
}
cntflag = atomic_fetch_add_explicit(&rwl->cnt_and_flag,
READER_INCR,
memory_order_relaxed);
cntflag = atomic_fetch_add_relaxed(&rwl->cnt_and_flag,
READER_INCR);
POST(cntflag);
while (1) {
if ((atomic_load_explicit(&rwl->cnt_and_flag, memory_order_relaxed) & WRITER_ACTIVE) == 0)
if ((atomic_load_relaxed(&rwl->cnt_and_flag)
& WRITER_ACTIVE) == 0)
{
break;
}
/* A writer is still working */
LOCK(&rwl->lock);
rwl->readers_waiting++;
if ((atomic_load_explicit(&rwl->cnt_and_flag, memory_order_relaxed) & WRITER_ACTIVE) != 0) {
if ((atomic_load_relaxed(&rwl->cnt_and_flag)
& WRITER_ACTIVE) != 0)
{
WAIT(&rwl->readable, &rwl->lock);
}
rwl->readers_waiting--;
......@@ -279,16 +283,19 @@ isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
* quota, reset the condition (race among readers doesn't
* matter).
*/
rwl->write_granted = 0;
atomic_store_relaxed(&rwl->write_granted, 0);
} else {
int32_t prev_writer;
/* enter the waiting queue, and wait for our turn */
prev_writer = atomic_fetch_add_explicit(&rwl->write_requests, 1,
memory_order_relaxed);
while (atomic_load_explicit(&rwl->write_completions, memory_order_relaxed) != prev_writer) {
prev_writer = atomic_fetch_add_relaxed(&rwl->write_requests, 1);
while (atomic_load_relaxed(&rwl->write_completions)
!= prev_writer)
{
LOCK(&rwl->lock);
if (atomic_load_explicit(&rwl->write_completions, memory_order_relaxed) != prev_writer) {
if (atomic_load_relaxed(&rwl->write_completions)
!= prev_writer)
{
WAIT(&rwl->writeable, &rwl->lock);
UNLOCK(&rwl->lock);
continue;
......@@ -299,23 +306,23 @@ isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
while (1) {
int_fast32_t zero = 0;
if (atomic_compare_exchange_strong_explicit
(&rwl->cnt_and_flag, &zero, WRITER_ACTIVE,
memory_order_relaxed, memory_order_relaxed))
if (atomic_compare_exchange_strong_relaxed(
&rwl->cnt_and_flag, &zero, WRITER_ACTIVE))
{
break;
}
/* Another active reader or writer is working. */
LOCK(&rwl->lock);
if (atomic_load_explicit(&rwl->cnt_and_flag, memory_order_relaxed) != 0) {
if (atomic_load_relaxed(&rwl->cnt_and_flag) != 0) {
WAIT(&rwl->writeable, &rwl->lock);
}
UNLOCK(&rwl->lock);
}
INSIST((atomic_load_explicit(&rwl->cnt_and_flag, memory_order_relaxed) & WRITER_ACTIVE));
rwl->write_granted++;
INSIST((atomic_load_relaxed(&rwl->cnt_and_flag)
& WRITER_ACTIVE));
atomic_fetch_add_relaxed(&rwl->write_granted, 1);
}
#ifdef ISC_RWLOCK_TRACE
......@@ -328,12 +335,10 @@ isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
isc_result_t
isc_rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
int32_t cnt = 0;
int32_t max_cnt = rwl->spins * 2 + 10;
int32_t spins = atomic_load_relaxed(&rwl->spins) * 2 + 10;
int32_t max_cnt = ISC_MAX(spins, RWLOCK_MAX_ADAPTIVE_COUNT);
isc_result_t result = ISC_R_SUCCESS;
if (max_cnt > RWLOCK_MAX_ADAPTIVE_COUNT)
max_cnt = RWLOCK_MAX_ADAPTIVE_COUNT;
do {
if (cnt++ >= max_cnt) {
result = isc__rwlock_lock(rwl, type);
......@@ -342,7 +347,7 @@ isc_rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
isc_rwlock_pause();
} while (isc_rwlock_trylock(rwl, type) != ISC_R_SUCCESS);
rwl->spins += (cnt - rwl->spins) / 8;
atomic_fetch_add_relaxed(&rwl->spins, (cnt - spins) / 8);
return (result);
}
......@@ -359,29 +364,28 @@ isc_rwlock_trylock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
if (type == isc_rwlocktype_read) {
/* If a writer is waiting or working, we fail. */
if (atomic_load_explicit(&rwl->write_requests, memory_order_relaxed) !=
atomic_load_explicit(&rwl->write_completions, memory_order_relaxed))
if (atomic_load_relaxed(&rwl->write_requests) !=
atomic_load_relaxed(&rwl->write_completions))
return (ISC_R_LOCKBUSY);
/* Otherwise, be ready for reading. */
cntflag = atomic_fetch_add_explicit(&rwl->cnt_and_flag,
READER_INCR,
memory_order_relaxed);
cntflag = atomic_fetch_add_relaxed(&rwl->cnt_and_flag,
READER_INCR);
if ((cntflag & WRITER_ACTIVE) != 0) {
/*
* A writer is working. We lose, and cancel the read
* request.
*/
cntflag = atomic_fetch_sub_explicit
(&rwl->cnt_and_flag, READER_INCR,
memory_order_relaxed);
cntflag = atomic_fetch_sub_relaxed(
&rwl->cnt_and_flag, READER_INCR);
/*
* If no other readers are waiting and we've suspended
* new writers in this short period, wake them up.
*/
if (cntflag == READER_INCR &&
atomic_load_explicit(&rwl->write_completions, memory_order_relaxed) !=
atomic_load_explicit(&rwl->write_requests, memory_order_relaxed)) {
atomic_load_relaxed(&rwl->write_completions) !=
atomic_load_relaxed(&rwl->write_requests))
{
LOCK(&rwl->lock);
BROADCAST(&rwl->writeable);
UNLOCK(&rwl->lock);
......@@ -392,9 +396,8 @@ isc_rwlock_trylock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
} else {
/* Try locking without entering the waiting queue. */
int_fast32_t zero = 0;
if (!atomic_compare_exchange_strong_explicit
(&rwl->cnt_and_flag, &zero, WRITER_ACTIVE,
memory_order_relaxed, memory_order_relaxed))
if (!atomic_compare_exchange_strong_relaxed(
&rwl->cnt_and_flag, &zero, WRITER_ACTIVE))
{
return (ISC_R_LOCKBUSY);
}
......@@ -403,10 +406,8 @@ isc_rwlock_trylock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
* XXXJT: jump into the queue, possibly breaking the writer
* order.
*/
atomic_fetch_sub_explicit(&rwl->write_completions, 1,
memory_order_relaxed);
rwl->write_granted++;
atomic_fetch_sub_relaxed(&rwl->write_completions, 1);
atomic_fetch_add_relaxed(&rwl->write_granted, 1);
}
#ifdef ISC_RWLOCK_TRACE
......@@ -424,9 +425,8 @@ isc_rwlock_tryupgrade(isc_rwlock_t *rwl) {
int_fast32_t reader_incr = READER_INCR;
/* Try to acquire write access. */
atomic_compare_exchange_strong_explicit
(&rwl->cnt_and_flag, &reader_incr, WRITER_ACTIVE,
memory_order_relaxed, memory_order_relaxed);
atomic_compare_exchange_strong_relaxed(
&rwl->cnt_and_flag, &reader_incr, WRITER_ACTIVE);
/*
* There must have been no writer, and there must have
* been at least one reader.
......@@ -439,8 +439,7 @@ isc_rwlock_tryupgrade(isc_rwlock_t *rwl) {
* We are the only reader and have been upgraded.
* Now jump into the head of the writer waiting queue.
*/
atomic_fetch_sub_explicit(&rwl->write_completions, 1,
memory_order_relaxed);
atomic_fetch_sub_relaxed(&rwl->write_completions, 1);
} else
return (ISC_R_LOCKBUSY);
......@@ -457,17 +456,14 @@ isc_rwlock_downgrade(isc_rwlock_t *rwl) {
{
/* Become an active reader. */
prev_readers = atomic_fetch_add_explicit(&rwl->cnt_and_flag,
READER_INCR,
memory_order_relaxed);
prev_readers = atomic_fetch_add_relaxed(&rwl->cnt_and_flag,
READER_INCR);
/* We must have been a writer. */
INSIST((prev_readers & WRITER_ACTIVE) != 0);
/* Complete write */
atomic_fetch_sub_explicit(&rwl->cnt_and_flag, WRITER_ACTIVE,
memory_order_relaxed);
atomic_fetch_add_explicit(&rwl->write_completions, 1,
memory_order_relaxed);
atomic_fetch_sub_relaxed(&rwl->cnt_and_flag, WRITER_ACTIVE);
atomic_fetch_add_relaxed(&rwl->write_completions, 1);
}
/* Resume other readers */
......@@ -488,17 +484,16 @@ isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
#endif
if (type == isc_rwlocktype_read) {
prev_cnt = atomic_fetch_sub_explicit(&rwl->cnt_and_flag,
READER_INCR,
memory_order_relaxed);
prev_cnt = atomic_fetch_sub_relaxed(&rwl->cnt_and_flag,
READER_INCR);
/*
* If we're the last reader and any writers are waiting, wake
* them up. We need to wake up all of them to ensure the
* FIFO order.
*/
if (prev_cnt == READER_INCR &&
atomic_load_explicit(&rwl->write_completions, memory_order_relaxed) !=
atomic_load_explicit(&rwl->write_requests, memory_order_relaxed)) {
atomic_load_relaxed(&rwl->write_completions) !=
atomic_load_relaxed(&rwl->write_requests)) {
LOCK(&rwl->lock);
BROADCAST(&rwl->writeable);
UNLOCK(&rwl->lock);
......@@ -510,15 +505,16 @@ isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
* Reset the flag, and (implicitly) tell other writers
* we are done.
*/
atomic_fetch_sub_explicit(&rwl->cnt_and_flag, WRITER_ACTIVE,
memory_order_relaxed);
atomic_fetch_add_explicit(&rwl->write_completions, 1,
memory_order_relaxed);
if (rwl->write_granted >= rwl->write_quota ||
(atomic_load_explicit(&rwl->write_requests, memory_order_relaxed) ==
atomic_load_explicit(&rwl->write_completions, memory_order_relaxed)) ||
(atomic_load_explicit(&rwl->cnt_and_flag, memory_order_relaxed) & ~WRITER_ACTIVE)) {
atomic_fetch_sub_relaxed(&rwl->cnt_and_flag, WRITER_ACTIVE);
atomic_fetch_add_relaxed(&rwl->write_completions, 1);
if ((atomic_load_relaxed(&rwl->write_granted) >=
rwl->write_quota) ||
(atomic_load_relaxed(&rwl->write_requests) ==
atomic_load_relaxed(&rwl->write_completions)) ||
(atomic_load_relaxed(&rwl->cnt_and_flag)
& ~WRITER_ACTIVE))
{
/*
* We have passed the write quota, no writer is
* waiting, or some readers are almost ready, pending
......@@ -535,8 +531,8 @@ isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) {
UNLOCK(&rwl->lock);
}
if ((atomic_load_explicit(&rwl->write_requests, memory_order_relaxed) !=
atomic_load_explicit(&rwl->write_completions, memory_order_relaxed)) &&
if ((atomic_load_relaxed(&rwl->write_requests) !=
atomic_load_relaxed(&rwl->write_completions)) &&
wakeup_writers) {
LOCK(&rwl->lock);
BROADCAST(&rwl->writeable);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment