forked from hmarui66/blink-tree-go
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathlatchmgr.go
205 lines (171 loc) · 4.46 KB
/
latchmgr.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
package blink_tree
import (
"runtime"
"sync"
"sync/atomic"
)
/*
* There are six lock types for each node in four independent sets:
* Set 1
* 1. AccessIntent: Sharable.
* Going to Read the node. Incompatible with NodeDelete.
* 2. NodeDelete: Exclusive.
* About to release the node. Incompatible with AccessIntent.
* Set 2
* 3. ReadLock: Sharable.
* Read the node. Incompatible with WriteLock.
* 4. WriteLock: Exclusive.
* Modify the node. Incompatible with ReadLock and other WriteLocks.
* Set 3
* 5. ParentModification: Exclusive.
* Change the node's parent keys. Incompatible with ParentModification.
* Set 4
* 6. AtomicModification: Exclusive.
* Atomic Update including node is underway. Incompatible with AtomicModification.
*/
type BLTLockMode int
const (
LockNone BLTLockMode = 0
LockAccess BLTLockMode = 1
LockDelete BLTLockMode = 2
LockRead BLTLockMode = 4
LockWrite BLTLockMode = 8
LockParent BLTLockMode = 16
//LockAtomic BLTLockMode = 32 // Note: not supported in this golang implementation
)
const (
PhID = 0x1
Pres = 0x2
Mask = 0x3
RInc = 0x4
)
type (
// BLTRWLock is definition for phase-fair reader/writer lock implementation
BLTRWLock struct {
rin uint32
rout uint32
ticket uint32
serving uint32
}
// SpinLatch is a spin latch implementation
SpinLatch struct {
mu sync.Mutex
exclusive bool // exclusive is set for write access
pending bool
share uint16 // share is count of read accessors grant write lock when share == 0
}
// HashEntry is hash table entries
HashEntry struct {
slot uint // latch table entry at head of chain
latch SpinLatch
}
// Latchs is latch manager table structure
Latchs struct {
pageNo Uid // latch set page number
readWr BLTRWLock // read / write page lock
access BLTRWLock // access intent / page delete
parent BLTRWLock // posting of fence key in parent
atomic BLTRWLock // atomic update in progress
split uint // right split page atomic insert
entry uint // entry slot in latch table
next uint // next entry in hash table chain
prev uint // prev entry in hash table chain
pin uint32 // number of outstanding threads
dirty bool // page in cache is dirty
atomicID uint // thread id holding atomic lock
}
)
func (lock *BLTRWLock) WriteLock() {
tix := atomic.AddUint32(&lock.ticket, 1) - 1
// wait for our ticket to come up
for tix != lock.serving {
runtime.Gosched()
}
w := Pres | (tix & PhID)
r := atomic.AddUint32(&lock.rin, w) - w
for r != lock.rout {
runtime.Gosched()
}
}
func (lock *BLTRWLock) WriteRelease() {
FetchAndAndUint32(&lock.rin, ^uint32(Mask))
lock.serving++
}
func (lock *BLTRWLock) ReadLock() {
w := (atomic.AddUint32(&lock.rin, RInc) - RInc) & Mask
if w > 0 {
for w == lock.rin&Mask {
runtime.Gosched()
}
}
}
func (lock *BLTRWLock) ReadRelease() {
atomic.AddUint32(&lock.rout, RInc)
}
// SpinReadLock wait until write lock mode is clear and add 1 to the share count
func (l *SpinLatch) SpinReadLock() {
var prev bool
// loop until write lock mode is clear
// (note: original source use `sched_yield()` here)
for {
// obtain l mutex
l.mu.Lock()
// see if exclusive request is granted or pending
prev = !(l.exclusive || l.pending)
if prev {
l.share++
}
l.mu.Unlock()
if prev {
return
}
}
}
// SpinWriteLock wait for other read and write latches to relinquish
func (l *SpinLatch) SpinWriteLock() {
var prev bool
// loop until write lock mode is clear and share count is zero
// (note: original source use `sched_yield()` here)
for {
// obtain latch mutex
l.mu.Lock()
prev = !(l.share > 0 || l.exclusive)
if prev {
l.exclusive = true
l.pending = false
} else {
l.pending = true
}
l.mu.Unlock()
if prev {
return
}
}
}
// SpinWriteTry try to obtain write lock
func (l *SpinLatch) SpinWriteTry() bool {
// obtain latch mutex
if !l.mu.TryLock() {
return false
}
defer l.mu.Unlock()
prev := !(l.share > 0 || l.exclusive)
if prev {
l.exclusive = true
}
return prev
}
// SpinReleaseWrite clear write mode
func (l *SpinLatch) SpinReleaseWrite() {
// obtain latch mutex
l.mu.Lock()
defer l.mu.Unlock()
l.exclusive = false
}
// SpinReleaseRead decrement reader count
func (l *SpinLatch) SpinReleaseRead() {
// obtain latch mutex
l.mu.Lock()
defer l.mu.Unlock()
l.share--
}