|
| 1 | +# lock |
| 2 | + |
| 3 | +## futex |
| 4 | + |
| 5 | +```go |
| 6 | +// This implementation depends on OS-specific implementations of |
| 7 | +// |
| 8 | +// futexsleep(addr *uint32, val uint32, ns int64) |
| 9 | +// Atomically, |
| 10 | +// if *addr == val { sleep } |
| 11 | +// Might be woken up spuriously; that's allowed. |
| 12 | +// Don't sleep longer than ns; ns < 0 means forever. |
| 13 | +// |
| 14 | +// futexwakeup(addr *uint32, cnt uint32) |
| 15 | +// If any procs are sleeping on addr, wake up at most cnt. |
| 16 | + |
| 17 | +const ( |
| 18 | + mutex_unlocked = 0 |
| 19 | + mutex_locked = 1 |
| 20 | + mutex_sleeping = 2 |
| 21 | + |
| 22 | + active_spin = 4 |
| 23 | + active_spin_cnt = 30 |
| 24 | + passive_spin = 1 |
| 25 | +) |
| 26 | + |
| 27 | +// Possible lock states are mutex_unlocked, mutex_locked and mutex_sleeping. |
| 28 | +// mutex_sleeping means that there is presumably at least one sleeping thread. |
| 29 | +// Note that there can be spinning threads during all states - they do not |
| 30 | +// affect mutex's state. |
| 31 | + |
| 32 | +// We use the uintptr mutex.key and note.key as a uint32. |
| 33 | +//go:nosplit |
| 34 | +func key32(p *uintptr) *uint32 { |
| 35 | + return (*uint32)(unsafe.Pointer(p)) |
| 36 | +} |
| 37 | + |
| 38 | +func lock(l *mutex) { |
| 39 | + gp := getg() |
| 40 | + |
| 41 | + if gp.m.locks < 0 { |
| 42 | + throw("runtime·lock: lock count") |
| 43 | + } |
| 44 | + gp.m.locks++ |
| 45 | + |
| 46 | + // Speculative grab for lock. |
| 47 | + v := atomic.Xchg(key32(&l.key), mutex_locked) |
| 48 | + if v == mutex_unlocked { |
| 49 | + return |
| 50 | + } |
| 51 | + |
| 52 | + // wait is either MUTEX_LOCKED or MUTEX_SLEEPING |
| 53 | + // depending on whether there is a thread sleeping |
| 54 | + // on this mutex. If we ever change l->key from |
| 55 | + // MUTEX_SLEEPING to some other value, we must be |
| 56 | + // careful to change it back to MUTEX_SLEEPING before |
| 57 | + // returning, to ensure that the sleeping thread gets |
| 58 | + // its wakeup call. |
| 59 | + wait := v |
| 60 | + |
| 61 | + // On uniprocessors, no point spinning. |
| 62 | + // On multiprocessors, spin for ACTIVE_SPIN attempts. |
| 63 | + spin := 0 |
| 64 | + if ncpu > 1 { |
| 65 | + spin = active_spin |
| 66 | + } |
| 67 | + for { |
| 68 | + // Try for lock, spinning. |
| 69 | + for i := 0; i < spin; i++ { |
| 70 | + for l.key == mutex_unlocked { |
| 71 | + if atomic.Cas(key32(&l.key), mutex_unlocked, wait) { |
| 72 | + return |
| 73 | + } |
| 74 | + } |
| 75 | + procyield(active_spin_cnt) |
| 76 | + } |
| 77 | + |
| 78 | + // Try for lock, rescheduling. |
| 79 | + for i := 0; i < passive_spin; i++ { |
| 80 | + for l.key == mutex_unlocked { |
| 81 | + if atomic.Cas(key32(&l.key), mutex_unlocked, wait) { |
| 82 | + return |
| 83 | + } |
| 84 | + } |
| 85 | + osyield() |
| 86 | + } |
| 87 | + |
| 88 | + // Sleep. |
| 89 | + v = atomic.Xchg(key32(&l.key), mutex_sleeping) |
| 90 | + if v == mutex_unlocked { |
| 91 | + return |
| 92 | + } |
| 93 | + wait = mutex_sleeping |
| 94 | + futexsleep(key32(&l.key), mutex_sleeping, -1) |
| 95 | + } |
| 96 | +} |
| 97 | + |
| 98 | +func unlock(l *mutex) { |
| 99 | + v := atomic.Xchg(key32(&l.key), mutex_unlocked) |
| 100 | + if v == mutex_unlocked { |
| 101 | + throw("unlock of unlocked lock") |
| 102 | + } |
| 103 | + if v == mutex_sleeping { |
| 104 | + futexwakeup(key32(&l.key), 1) |
| 105 | + } |
| 106 | + |
| 107 | + gp := getg() |
| 108 | + gp.m.locks-- |
| 109 | + if gp.m.locks < 0 { |
| 110 | + throw("runtime·unlock: lock count") |
| 111 | + } |
| 112 | + if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack |
| 113 | + gp.stackguard0 = stackPreempt |
| 114 | + } |
| 115 | +} |
| 116 | + |
| 117 | +// One-time notifications. |
| 118 | +func noteclear(n *note) { |
| 119 | + n.key = 0 |
| 120 | +} |
| 121 | + |
| 122 | +func notewakeup(n *note) { |
| 123 | + old := atomic.Xchg(key32(&n.key), 1) |
| 124 | + if old != 0 { |
| 125 | + print("notewakeup - double wakeup (", old, ")\n") |
| 126 | + throw("notewakeup - double wakeup") |
| 127 | + } |
| 128 | + futexwakeup(key32(&n.key), 1) |
| 129 | +} |
| 130 | + |
| 131 | +func notesleep(n *note) { |
| 132 | + gp := getg() |
| 133 | + if gp != gp.m.g0 { |
| 134 | + throw("notesleep not on g0") |
| 135 | + } |
| 136 | + ns := int64(-1) |
| 137 | + if *cgo_yield != nil { |
| 138 | + // Sleep for an arbitrary-but-moderate interval to poll libc interceptors. |
| 139 | + ns = 10e6 |
| 140 | + } |
| 141 | + for atomic.Load(key32(&n.key)) == 0 { |
| 142 | + gp.m.blocked = true |
| 143 | + futexsleep(key32(&n.key), 0, ns) |
| 144 | + if *cgo_yield != nil { |
| 145 | + asmcgocall(*cgo_yield, nil) |
| 146 | + } |
| 147 | + gp.m.blocked = false |
| 148 | + } |
| 149 | +} |
| 150 | + |
| 151 | +// May run with m.p==nil if called from notetsleep, so write barriers |
| 152 | +// are not allowed. |
| 153 | +// |
| 154 | +//go:nosplit |
| 155 | +//go:nowritebarrier |
| 156 | +func notetsleep_internal(n *note, ns int64) bool { |
| 157 | + gp := getg() |
| 158 | + |
| 159 | + if ns < 0 { |
| 160 | + if *cgo_yield != nil { |
| 161 | + // Sleep for an arbitrary-but-moderate interval to poll libc interceptors. |
| 162 | + ns = 10e6 |
| 163 | + } |
| 164 | + for atomic.Load(key32(&n.key)) == 0 { |
| 165 | + gp.m.blocked = true |
| 166 | + futexsleep(key32(&n.key), 0, ns) |
| 167 | + if *cgo_yield != nil { |
| 168 | + asmcgocall(*cgo_yield, nil) |
| 169 | + } |
| 170 | + gp.m.blocked = false |
| 171 | + } |
| 172 | + return true |
| 173 | + } |
| 174 | + |
| 175 | + if atomic.Load(key32(&n.key)) != 0 { |
| 176 | + return true |
| 177 | + } |
| 178 | + |
| 179 | + deadline := nanotime() + ns |
| 180 | + for { |
| 181 | + if *cgo_yield != nil && ns > 10e6 { |
| 182 | + ns = 10e6 |
| 183 | + } |
| 184 | + gp.m.blocked = true |
| 185 | + futexsleep(key32(&n.key), 0, ns) |
| 186 | + if *cgo_yield != nil { |
| 187 | + asmcgocall(*cgo_yield, nil) |
| 188 | + } |
| 189 | + gp.m.blocked = false |
| 190 | + if atomic.Load(key32(&n.key)) != 0 { |
| 191 | + break |
| 192 | + } |
| 193 | + now := nanotime() |
| 194 | + if now >= deadline { |
| 195 | + break |
| 196 | + } |
| 197 | + ns = deadline - now |
| 198 | + } |
| 199 | + return atomic.Load(key32(&n.key)) != 0 |
| 200 | +} |
| 201 | + |
| 202 | +func notetsleep(n *note, ns int64) bool { |
| 203 | + gp := getg() |
| 204 | + if gp != gp.m.g0 && gp.m.preemptoff != "" { |
| 205 | + throw("notetsleep not on g0") |
| 206 | + } |
| 207 | + |
| 208 | + return notetsleep_internal(n, ns) |
| 209 | +} |
| 210 | + |
| 211 | +// same as runtime·notetsleep, but called on user g (not g0) |
| 212 | +// calls only nosplit functions between entersyscallblock/exitsyscall |
| 213 | +func notetsleepg(n *note, ns int64) bool { |
| 214 | + gp := getg() |
| 215 | + if gp == gp.m.g0 { |
| 216 | + throw("notetsleepg on g0") |
| 217 | + } |
| 218 | + |
| 219 | + entersyscallblock() |
| 220 | + ok := notetsleep_internal(n, ns) |
| 221 | + exitsyscall() |
| 222 | + return ok |
| 223 | +} |
| 224 | + |
| 225 | +func pauseSchedulerUntilCallback() bool { |
| 226 | + return false |
| 227 | +} |
| 228 | + |
| 229 | +func checkTimeouts() {} |
| 230 | +``` |
| 231 | + |
| 232 | +## sync.RWMutex |
| 233 | + |
| 234 | +## atomic |
| 235 | + |
| 236 | +## sync.Map |
0 commit comments