1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
|
import (
"fmt"
"runtime"
"strings"
"sync"
"sync/atomic"
)
func getGID() int64 {
b := make([]byte, 64)
b = b[:runtime.Stack(b, false)]
b = b[:strings.Index(string(b), " ")]
b = b[10:]
var gid int64
fmt.Sscanf(string(b), "%d", &gid)
return gid
}
type RecursiveMutex struct {
sync.Mutex
owner int64
recursion uint32
}
func (rm *RecursiveMutex) Lock() {
gid := getGID()
if atomic.LoadInt64(&rm.owner) == gid {
rm.recursion++
return
}
rm.Mutex.Lock()
atomic.StoreInt64(&rm.owner, gid)
rm.recursion = 1
}
func (rm *RecursiveMutex) UnLock() {
gid := getGID()
if atomic.LoadInt64(&rm.owner) != gid {
panic(fmt.Sprintf("wrong owner(%d): %d!", rm.owner, gid))
}
rm.recursion--
if rm.recursion != 0 {
return
}
atomic.StoreInt64(&rm.owner, -1)
rm.Mutex.Unlock()
}
type RWMutex struct {
w Mutex // held if there are pending writers
writerSem uint32 // semaphore for writers to wait for completing readers
readerSem uint32 // semaphore for readers to wait for completing writers
readerCount atomic.Int32 // number of pending readers
readerWait atomic.Int32 // number of departing readers
}
const rwmutexMaxReaders = 1 << 30
type WaitGroup struct {
noCopy noCopy
state atomic.Uint64 // high 32 bits are counter, low 32 bits are waiter count.
sema uint32
}
// c.L.Lock()
// for !condition() {
// c.Wait()
// }
// ... make use of condition ...
// c.L.Unlock()
func (c *Cond) Wait() {
c.checker.check()
t := runtime_notifyListAdd(&c.notify)
c.L.Unlock()
runtime_notifyListWait(&c.notify, t)
c.L.Lock()
}
func (o *Once) doSlow(f func()) {
// use lock to prevent race
o.m.Lock()
defer o.m.Unlock()
// final check
if o.done.Load() == 0 {
defer o.done.Store(1)
f()
}
}
|