Skip to content

Commit ca9802f

Browse files
committed
add code
1 parent cd1867b commit ca9802f

File tree

1 file changed

+218
-0
lines changed

1 file changed

+218
-0
lines changed

map.md

Lines changed: 218 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -360,8 +360,226 @@ done:
360360

361361
## 删除
362362

363+
```go
364+
func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
365+
if h == nil || h.count == 0 {
366+
return
367+
}
368+
if h.flags&hashWriting != 0 {
369+
throw("concurrent map writes")
370+
}
371+
372+
alg := t.key.alg
373+
hash := alg.hash(key, uintptr(h.hash0))
374+
375+
// Set hashWriting after calling alg.hash, since alg.hash may panic,
376+
// in which case we have not actually done a write (delete).
377+
h.flags |= hashWriting
378+
379+
bucket := hash & bucketMask(h.B)
380+
if h.growing() {
381+
growWork(t, h, bucket)
382+
}
383+
b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
384+
top := tophash(hash)
385+
search:
386+
for ; b != nil; b = b.overflow(t) {
387+
for i := uintptr(0); i < bucketCnt; i++ {
388+
if b.tophash[i] != top {
389+
continue
390+
}
391+
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
392+
k2 := k
393+
if t.indirectkey {
394+
k2 = *((*unsafe.Pointer)(k2))
395+
}
396+
if !alg.equal(key, k2) {
397+
continue
398+
}
399+
// Only clear key if there are pointers in it.
400+
if t.indirectkey {
401+
*(*unsafe.Pointer)(k) = nil
402+
} else if t.key.kind&kindNoPointers == 0 {
403+
memclrHasPointers(k, t.key.size)
404+
}
405+
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
406+
if t.indirectvalue {
407+
*(*unsafe.Pointer)(v) = nil
408+
} else if t.elem.kind&kindNoPointers == 0 {
409+
memclrHasPointers(v, t.elem.size)
410+
} else {
411+
memclrNoHeapPointers(v, t.elem.size)
412+
}
413+
b.tophash[i] = empty
414+
h.count--
415+
break search
416+
}
417+
}
418+
419+
if h.flags&hashWriting == 0 {
420+
throw("concurrent map writes")
421+
}
422+
h.flags &^= hashWriting
423+
}
424+
```
425+
363426
## 扩容
364427

428+
```go
429+
func growWork(t *maptype, h *hmap, bucket uintptr) {
430+
// make sure we evacuate the oldbucket corresponding
431+
// to the bucket we're about to use
432+
evacuate(t, h, bucket&h.oldbucketmask())
433+
434+
// evacuate one more oldbucket to make progress on growing
435+
if h.growing() {
436+
evacuate(t, h, h.nevacuate)
437+
}
438+
}
439+
```
440+
441+
```go
442+
func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
443+
b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
444+
newbit := h.noldbuckets()
445+
if !evacuated(b) {
446+
// TODO: reuse overflow buckets instead of using new ones, if there
447+
// is no iterator using the old buckets. (If !oldIterator.)
448+
449+
// xy contains the x and y (low and high) evacuation destinations.
450+
var xy [2]evacDst
451+
x := &xy[0]
452+
x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
453+
x.k = add(unsafe.Pointer(x.b), dataOffset)
454+
x.v = add(x.k, bucketCnt*uintptr(t.keysize))
455+
456+
if !h.sameSizeGrow() {
457+
// Only calculate y pointers if we're growing bigger.
458+
// Otherwise GC can see bad pointers.
459+
y := &xy[1]
460+
y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
461+
y.k = add(unsafe.Pointer(y.b), dataOffset)
462+
y.v = add(y.k, bucketCnt*uintptr(t.keysize))
463+
}
464+
465+
for ; b != nil; b = b.overflow(t) {
466+
k := add(unsafe.Pointer(b), dataOffset)
467+
v := add(k, bucketCnt*uintptr(t.keysize))
468+
for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) {
469+
top := b.tophash[i]
470+
if top == empty {
471+
b.tophash[i] = evacuatedEmpty
472+
continue
473+
}
474+
if top < minTopHash {
475+
throw("bad map state")
476+
}
477+
k2 := k
478+
if t.indirectkey {
479+
k2 = *((*unsafe.Pointer)(k2))
480+
}
481+
var useY uint8
482+
if !h.sameSizeGrow() {
483+
// Compute hash to make our evacuation decision (whether we need
484+
// to send this key/value to bucket x or bucket y).
485+
hash := t.key.alg.hash(k2, uintptr(h.hash0))
486+
if h.flags&iterator != 0 && !t.reflexivekey && !t.key.alg.equal(k2, k2) {
487+
// If key != key (NaNs), then the hash could be (and probably
488+
// will be) entirely different from the old hash. Moreover,
489+
// it isn't reproducible. Reproducibility is required in the
490+
// presence of iterators, as our evacuation decision must
491+
// match whatever decision the iterator made.
492+
// Fortunately, we have the freedom to send these keys either
493+
// way. Also, tophash is meaningless for these kinds of keys.
494+
// We let the low bit of tophash drive the evacuation decision.
495+
// We recompute a new random tophash for the next level so
496+
// these keys will get evenly distributed across all buckets
497+
// after multiple grows.
498+
useY = top & 1
499+
top = tophash(hash)
500+
} else {
501+
if hash&newbit != 0 {
502+
useY = 1
503+
}
504+
}
505+
}
506+
507+
if evacuatedX+1 != evacuatedY {
508+
throw("bad evacuatedN")
509+
}
510+
511+
b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY
512+
dst := &xy[useY] // evacuation destination
513+
514+
if dst.i == bucketCnt {
515+
dst.b = h.newoverflow(t, dst.b)
516+
dst.i = 0
517+
dst.k = add(unsafe.Pointer(dst.b), dataOffset)
518+
dst.v = add(dst.k, bucketCnt*uintptr(t.keysize))
519+
}
520+
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
521+
if t.indirectkey {
522+
*(*unsafe.Pointer)(dst.k) = k2 // copy pointer
523+
} else {
524+
typedmemmove(t.key, dst.k, k) // copy value
525+
}
526+
if t.indirectvalue {
527+
*(*unsafe.Pointer)(dst.v) = *(*unsafe.Pointer)(v)
528+
} else {
529+
typedmemmove(t.elem, dst.v, v)
530+
}
531+
dst.i++
532+
// These updates might push these pointers past the end of the
533+
// key or value arrays. That's ok, as we have the overflow pointer
534+
// at the end of the bucket to protect against pointing past the
535+
// end of the bucket.
536+
dst.k = add(dst.k, uintptr(t.keysize))
537+
dst.v = add(dst.v, uintptr(t.valuesize))
538+
}
539+
}
540+
// Unlink the overflow buckets & clear key/value to help GC.
541+
if h.flags&oldIterator == 0 && t.bucket.kind&kindNoPointers == 0 {
542+
b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
543+
// Preserve b.tophash because the evacuation
544+
// state is maintained there.
545+
ptr := add(b, dataOffset)
546+
n := uintptr(t.bucketsize) - dataOffset
547+
memclrHasPointers(ptr, n)
548+
}
549+
}
550+
551+
if oldbucket == h.nevacuate {
552+
advanceEvacuationMark(h, t, newbit)
553+
}
554+
}
555+
```
556+
557+
```go
558+
func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
559+
h.nevacuate++
560+
// Experiments suggest that 1024 is overkill by at least an order of magnitude.
561+
// Put it in there as a safeguard anyway, to ensure O(1) behavior.
562+
stop := h.nevacuate + 1024
563+
if stop > newbit {
564+
stop = newbit
565+
}
566+
for h.nevacuate != stop && bucketEvacuated(t, h, h.nevacuate) {
567+
h.nevacuate++
568+
}
569+
if h.nevacuate == newbit { // newbit == # of oldbuckets
570+
// Growing is all done. Free old main bucket array.
571+
h.oldbuckets = nil
572+
// Can discard old overflow buckets as well.
573+
// If they are still referenced by an iterator,
574+
// then the iterator holds a pointers to the slice.
575+
if h.extra != nil {
576+
h.extra.oldoverflow = nil
577+
}
578+
h.flags &^= sameSizeGrow
579+
}
580+
}
581+
```
582+
365583
## 其它
366584

367585
针对 32 位、64 位 和 string 类型的 map 元素的访问、赋值、删除、扩容,Go 内部有都有对应的优化函数,比如 mapaccess1 对应有 mapaccess1_fast64,mapaccess1_fast32,mapaccess1_faststr。mapassign 对应有 mapassign_fast64,mapassign_fast32 和 mapassign_faststr。

0 commit comments

Comments
 (0)