mirror of https://github.com/tinygo-org/tinygo.git
Nia Waldvogel
3 years ago
committed by
Ron Evans
5 changed files with 386 additions and 0 deletions
@ -0,0 +1,83 @@ |
|||
package sync_test |
|||
|
|||
import ( |
|||
"sync" |
|||
"sync/atomic" |
|||
"testing" |
|||
) |
|||
|
|||
// TestCondSignal tests waiting on a Cond and notifying it with Signal.
|
|||
func TestCondSignal(t *testing.T) { |
|||
// Create a Cond with a normal mutex.
|
|||
cond := sync.Cond{ |
|||
L: &sync.Mutex{}, |
|||
} |
|||
cond.L.Lock() |
|||
|
|||
// Start a goroutine to signal us once we wait.
|
|||
var signaled uint32 |
|||
go func() { |
|||
// Wait for the test goroutine to wait.
|
|||
cond.L.Lock() |
|||
defer cond.L.Unlock() |
|||
|
|||
// Send a signal to the test goroutine.
|
|||
atomic.StoreUint32(&signaled, 1) |
|||
cond.Signal() |
|||
}() |
|||
|
|||
// Wait for a signal.
|
|||
// This will unlock the mutex, and allow the spawned goroutine to run.
|
|||
cond.Wait() |
|||
if atomic.LoadUint32(&signaled) == 0 { |
|||
t.Error("wait returned before a signal was sent") |
|||
} |
|||
} |
|||
|
|||
func TestCondBroadcast(t *testing.T) { |
|||
// Create a Cond with an RWMutex.
|
|||
var mu sync.RWMutex |
|||
cond := sync.Cond{ |
|||
L: mu.RLocker(), |
|||
} |
|||
|
|||
// Start goroutines to wait for the broadcast.
|
|||
var wg sync.WaitGroup |
|||
const n = 5 |
|||
for i := 0; i < n; i++ { |
|||
wg.Add(1) |
|||
mu.RLock() |
|||
go func() { |
|||
defer wg.Done() |
|||
|
|||
cond.Wait() |
|||
}() |
|||
} |
|||
|
|||
// Wait for all goroutines to start waiting.
|
|||
mu.Lock() |
|||
|
|||
// Broadcast to all of the waiting goroutines.
|
|||
cond.Broadcast() |
|||
|
|||
// Wait for all spawned goroutines to process the broadcast.
|
|||
mu.Unlock() |
|||
wg.Wait() |
|||
} |
|||
|
|||
// TestCondUnlockNotify verifies that a signal is processed even if it happens during the mutex unlock in Wait.
|
|||
func TestCondUnlockNotify(t *testing.T) { |
|||
// Create a Cond that signals itself when waiting.
|
|||
var cond sync.Cond |
|||
cond.L = fakeLocker{cond.Signal} |
|||
|
|||
cond.Wait() |
|||
} |
|||
|
|||
// fakeLocker is a fake sync.Locker where unlock calls an arbitrary function.
|
|||
type fakeLocker struct { |
|||
unlock func() |
|||
} |
|||
|
|||
func (l fakeLocker) Lock() {} |
|||
func (l fakeLocker) Unlock() { l.unlock() } |
@ -0,0 +1,205 @@ |
|||
package sync_test |
|||
|
|||
import ( |
|||
"runtime" |
|||
"sync" |
|||
"sync/atomic" |
|||
"testing" |
|||
) |
|||
|
|||
// TestMutexUncontended tests locking and unlocking a Mutex that is not shared with any other goroutines.
|
|||
func TestMutexUncontended(t *testing.T) { |
|||
var mu sync.Mutex |
|||
|
|||
// Lock and unlock the mutex a few times.
|
|||
for i := 0; i < 3; i++ { |
|||
mu.Lock() |
|||
mu.Unlock() |
|||
} |
|||
} |
|||
|
|||
// TestMutexConcurrent tests a mutex concurrently from multiple goroutines.
|
|||
// It will fail if multiple goroutines hold the lock simultaneously.
|
|||
func TestMutexConcurrent(t *testing.T) { |
|||
var mu sync.Mutex |
|||
var active uint |
|||
var completed uint |
|||
ok := true |
|||
|
|||
const n = 10 |
|||
for i := 0; i < n; i++ { |
|||
j := i |
|||
go func() { |
|||
// Delay a bit.
|
|||
for k := j; k > 0; k-- { |
|||
runtime.Gosched() |
|||
} |
|||
|
|||
mu.Lock() |
|||
|
|||
// Increment the active counter.
|
|||
active++ |
|||
|
|||
if active > 1 { |
|||
// Multiple things are holding the lock at the same time.
|
|||
ok = false |
|||
} else { |
|||
// Delay a bit.
|
|||
for k := j; k < n; k++ { |
|||
runtime.Gosched() |
|||
} |
|||
} |
|||
|
|||
// Decrement the active counter.
|
|||
active-- |
|||
|
|||
// This is completed.
|
|||
completed++ |
|||
|
|||
mu.Unlock() |
|||
}() |
|||
} |
|||
|
|||
// Wait for everything to finish.
|
|||
var done bool |
|||
for !done { |
|||
// Wait a bit for other things to run.
|
|||
runtime.Gosched() |
|||
|
|||
// Acquire the lock and check whether everything has completed.
|
|||
mu.Lock() |
|||
done = completed == n |
|||
mu.Unlock() |
|||
} |
|||
if !ok { |
|||
t.Error("lock held concurrently") |
|||
} |
|||
} |
|||
|
|||
// TestRWMutexUncontended tests locking and unlocking an RWMutex that is not shared with any other goroutines.
|
|||
func TestRWMutexUncontended(t *testing.T) { |
|||
var mu sync.RWMutex |
|||
|
|||
// Lock the mutex exclusively and then unlock it.
|
|||
mu.Lock() |
|||
mu.Unlock() |
|||
|
|||
// Acuire several read locks.
|
|||
const n = 5 |
|||
for i := 0; i < n; i++ { |
|||
mu.RLock() |
|||
} |
|||
|
|||
// Release all of the read locks.
|
|||
for i := 0; i < n; i++ { |
|||
mu.RUnlock() |
|||
} |
|||
|
|||
// Re-acquire the lock exclusively.
|
|||
mu.Lock() |
|||
mu.Unlock() |
|||
} |
|||
|
|||
// TestRWMutexWriteToRead tests the transition from a write lock to a read lock while contended.
|
|||
func TestRWMutexWriteToRead(t *testing.T) { |
|||
// Create a new RWMutex and acquire a write lock.
|
|||
var mu sync.RWMutex |
|||
mu.Lock() |
|||
|
|||
const n = 3 |
|||
var readAcquires uint32 |
|||
var completed uint32 |
|||
var unlocked uint32 |
|||
var bad uint32 |
|||
for i := 0; i < n; i++ { |
|||
go func() { |
|||
// Acquire a read lock.
|
|||
mu.RLock() |
|||
|
|||
// Verify that the write lock is supposed to be released by now.
|
|||
if atomic.LoadUint32(&unlocked) == 0 { |
|||
// The write lock is still being held.
|
|||
atomic.AddUint32(&bad, 1) |
|||
} |
|||
|
|||
// Add ourselves to the read lock counter.
|
|||
atomic.AddUint32(&readAcquires, 1) |
|||
|
|||
// Wait for everything to hold the read lock simultaneously.
|
|||
for atomic.LoadUint32(&readAcquires) < n { |
|||
runtime.Gosched() |
|||
} |
|||
|
|||
// Notify of completion.
|
|||
atomic.AddUint32(&completed, 1) |
|||
|
|||
// Release the read lock.
|
|||
mu.RUnlock() |
|||
}() |
|||
} |
|||
|
|||
// Wait a bit for the goroutines to block.
|
|||
for i := 0; i < 3*n; i++ { |
|||
runtime.Gosched() |
|||
} |
|||
|
|||
// Release the write lock so that the goroutines acquire read locks.
|
|||
atomic.StoreUint32(&unlocked, 1) |
|||
mu.Unlock() |
|||
|
|||
// Wait for everything to complete.
|
|||
for atomic.LoadUint32(&completed) < n { |
|||
runtime.Gosched() |
|||
} |
|||
|
|||
// Acquire another write lock.
|
|||
mu.Lock() |
|||
|
|||
if bad != 0 { |
|||
t.Error("read lock acquired while write-locked") |
|||
} |
|||
} |
|||
|
|||
// TestRWMutexWriteToRead tests the transition from a read lock to a write lock while contended.
|
|||
func TestRWMutexReadToWrite(t *testing.T) { |
|||
// Create a new RWMutex and read-lock it several times.
|
|||
const n = 3 |
|||
var mu sync.RWMutex |
|||
var readers uint32 |
|||
for i := 0; i < n; i++ { |
|||
mu.RLock() |
|||
readers++ |
|||
} |
|||
|
|||
// Start a goroutine to acquire a write lock.
|
|||
result := ^uint32(0) |
|||
go func() { |
|||
// Acquire a write lock.
|
|||
mu.Lock() |
|||
|
|||
// Check for active readers.
|
|||
readers := atomic.LoadUint32(&readers) |
|||
|
|||
mu.Unlock() |
|||
|
|||
// Report the number of active readers.
|
|||
atomic.StoreUint32(&result, readers) |
|||
}() |
|||
|
|||
// Release the read locks.
|
|||
for i := 0; i < n; i++ { |
|||
runtime.Gosched() |
|||
atomic.AddUint32(&readers, ^uint32(0)) |
|||
mu.RUnlock() |
|||
} |
|||
|
|||
// Wait for a result.
|
|||
var res uint32 |
|||
for res == ^uint32(0) { |
|||
runtime.Gosched() |
|||
res = atomic.LoadUint32(&result) |
|||
} |
|||
if res != 0 { |
|||
t.Errorf("write lock acquired while %d readers were active", res) |
|||
} |
|||
} |
@ -0,0 +1,62 @@ |
|||
package sync_test |
|||
|
|||
import ( |
|||
"sync" |
|||
"testing" |
|||
) |
|||
|
|||
// TestOnceUncontended tests Once on a single goroutine.
|
|||
func TestOnceUncontended(t *testing.T) { |
|||
var once sync.Once |
|||
{ |
|||
var ran bool |
|||
once.Do(func() { |
|||
ran = true |
|||
}) |
|||
if !ran { |
|||
t.Error("first call to Do did not run") |
|||
} |
|||
} |
|||
{ |
|||
var ran bool |
|||
once.Do(func() { |
|||
ran = true |
|||
}) |
|||
if ran { |
|||
t.Error("second call to Do ran") |
|||
} |
|||
} |
|||
} |
|||
|
|||
// TestOnceConcurrent tests multiple concurrent invocations of sync.Once.
|
|||
func TestOnceConcurrent(t *testing.T) { |
|||
var once sync.Once |
|||
var mu sync.Mutex |
|||
mu.Lock() |
|||
var ran bool |
|||
var ranTwice bool |
|||
once.Do(func() { |
|||
ran = true |
|||
|
|||
// Start a goroutine and (approximately) wait for it to enter the call to Do.
|
|||
var startWait sync.Mutex |
|||
startWait.Lock() |
|||
go func() { |
|||
startWait.Unlock() |
|||
once.Do(func() { |
|||
ranTwice = true |
|||
}) |
|||
mu.Unlock() |
|||
}() |
|||
startWait.Lock() |
|||
}) |
|||
if !ran { |
|||
t.Error("first call to Do did not run") |
|||
} |
|||
|
|||
// Wait for the goroutine to finish.
|
|||
mu.Lock() |
|||
if ranTwice { |
|||
t.Error("second concurrent call to Once also ran") |
|||
} |
|||
} |
@ -0,0 +1,35 @@ |
|||
package sync_test |
|||
|
|||
import ( |
|||
"sync" |
|||
"testing" |
|||
) |
|||
|
|||
// TestWaitGroupUncontended tests the wait group from a single goroutine.
|
|||
func TestWaitGroupUncontended(t *testing.T) { |
|||
// Check that a single add-and-done works.
|
|||
var wg sync.WaitGroup |
|||
wg.Add(1) |
|||
wg.Done() |
|||
wg.Wait() |
|||
|
|||
// Check that mixing positive and negative counts works.
|
|||
wg.Add(10) |
|||
wg.Add(-8) |
|||
wg.Add(-1) |
|||
wg.Add(0) |
|||
wg.Done() |
|||
wg.Wait() |
|||
} |
|||
|
|||
// TestWaitGroup tests the typical usage of WaitGroup.
|
|||
func TestWaitGroup(t *testing.T) { |
|||
const n = 5 |
|||
var wg sync.WaitGroup |
|||
wg.Add(n) |
|||
for i := 0; i < n; i++ { |
|||
go wg.Done() |
|||
} |
|||
|
|||
wg.Wait() |
|||
} |
Loading…
Reference in new issue