Skip to content

Commit edd60ad

Browse files
authored
Merge pull request #10 from s0ders/main
feat: now using the preferred testing.b.Loop instead of testing.b.N for benchmarks.
2 parents 2b9f64a + b7e9616 commit edd60ad

9 files changed

+40
-63
lines changed

docs/01-common-patterns/src/batching-ops_test.go

Lines changed: 10 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@ import (
1010
"testing"
1111
)
1212

13-
var sink string
1413
var lines = make([]string, 10000)
1514

1615
func init() {
@@ -22,31 +21,31 @@ func init() {
2221
// --- 1. No I/O ---
2322

2423
func BenchmarkUnbatchedProcessing(b *testing.B) {
25-
for n := 0; n < b.N; n++ {
24+
for b.Loop() {
2625
for _, line := range lines {
27-
sink = strings.ToUpper(line)
26+
strings.ToUpper(line)
2827
}
2928
}
3029
}
3130

3231
func BenchmarkBatchedProcessing(b *testing.B) {
3332
batchSize := 100
34-
for n := 0; n < b.N; n++ {
33+
for b.Loop() {
3534
for i := 0; i < len(lines); i += batchSize {
3635
end := i + batchSize
3736
if end > len(lines) {
3837
end = len(lines)
3938
}
4039
batch := strings.Join(lines[i:end], "|")
41-
sink = strings.ToUpper(batch)
40+
strings.ToUpper(batch)
4241
}
4342
}
4443
}
4544

4645
// --- 2. With I/O ---
4746

4847
func BenchmarkUnbatchedIO(b *testing.B) {
49-
for n := 0; n < b.N; n++ {
48+
for b.Loop() {
5049
f, err := os.CreateTemp("", "unbatched")
5150
if err != nil {
5251
b.Fatal(err)
@@ -61,7 +60,7 @@ func BenchmarkUnbatchedIO(b *testing.B) {
6160

6261
func BenchmarkBatchedIO(b *testing.B) {
6362
batchSize := 100
64-
for n := 0; n < b.N; n++ {
63+
for b.Loop() {
6564
f, err := os.CreateTemp("", "batched")
6665
if err != nil {
6766
b.Fatal(err)
@@ -87,23 +86,23 @@ func hash(s string) string {
8786
}
8887

8988
func BenchmarkUnbatchedCrypto(b *testing.B) {
90-
for n := 0; n < b.N; n++ {
89+
for b.Loop() {
9190
for _, line := range lines {
92-
sink = hash(line)
91+
hash(line)
9392
}
9493
}
9594
}
9695

9796
func BenchmarkBatchedCrypto(b *testing.B) {
9897
batchSize := 100
99-
for n := 0; n < b.N; n++ {
98+
for b.Loop() {
10099
for i := 0; i < len(lines); i += batchSize {
101100
end := i + batchSize
102101
if end > len(lines) {
103102
end = len(lines)
104103
}
105104
joined := strings.Join(lines[i:end], "")
106-
sink = hash(joined)
105+
hash(joined)
107106
}
108107
}
109108
}

docs/01-common-patterns/src/buffered-io_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ func writeBuffered(w io.Writer, count int) {
4444
}
4545

4646
func BenchmarkWriteNotBuffered(b *testing.B) {
47-
for i := 0; i < b.N; i++ {
47+
for b.Loop() {
4848
f, _ := os.CreateTemp("", "nobuf")
4949
writeNotBuffered(f, N)
5050
f.Close()
@@ -53,7 +53,7 @@ func BenchmarkWriteNotBuffered(b *testing.B) {
5353
}
5454

5555
func BenchmarkWriteBuffered(b *testing.B) {
56-
for i := 0; i < b.N; i++ {
56+
for b.Loop() {
5757
f, _ := os.CreateTemp("", "buf")
5858
writeBuffered(f, N)
5959
f.Close()

docs/01-common-patterns/src/fields-alignment_test.go

Lines changed: 4 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -19,25 +19,21 @@ type WellAligned struct {
1919
}
2020
// types-simple-end
2121

22-
var result int64
23-
2422
// simple-start
2523
func BenchmarkPoorlyAligned(b *testing.B) {
26-
for i := 0; i < b.N; i++ {
24+
for b.Loop() {
2725
var items = make([]PoorlyAligned, 10_000_000)
2826
for j := range items {
2927
items[j].count = int64(j)
30-
result += items[j].count
3128
}
3229
}
3330
}
3431

3532
func BenchmarkWellAligned(b *testing.B) {
36-
for i := 0; i < b.N; i++ {
33+
for b.Loop() {
3734
var items = make([]WellAligned, 10_000_000)
3835
for j := range items {
3936
items[j].count = int64(j)
40-
result += items[j].count
4137
}
4238
}
4339
}
@@ -63,8 +59,7 @@ func BenchmarkFalseSharing(b *testing.B) {
6359
var c SharedCounterBad // (1)
6460
var wg sync.WaitGroup
6561

66-
b.ResetTimer()
67-
for i := 0; i < b.N; i++ {
62+
for b.Loop() {
6863
wg.Add(2)
6964
go func() {
7065
for i := 0; i < 1_000_000; i++ {
@@ -87,8 +82,7 @@ func BenchmarkNoFalseSharing(b *testing.B) {
8782
var c SharedCounterGood
8883
var wg sync.WaitGroup
8984

90-
b.ResetTimer()
91-
for i := 0; i < b.N; i++ {
85+
for b.Loop() {
9286
wg.Add(2)
9387
go func() {
9488
for i := 0; i < 1_000_000; i++ {

docs/01-common-patterns/src/interface-boxing_test.go

Lines changed: 6 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -18,49 +18,45 @@ func (LargeJob) Work() {}
1818
// interface-end
1919

2020
// bench-slice-start
21-
var sink []Worker
22-
2321
func BenchmarkBoxedLargeSlice(b *testing.B) {
2422
jobs := make([]Worker, 0, 1000)
25-
for i := 0; i < b.N; i++ {
23+
for b.Loop() {
2624
jobs = jobs[:0]
2725
for j := 0; j < 1000; j++ {
2826
var job LargeJob
2927
jobs = append(jobs, job)
3028
}
31-
sink = jobs
3229
}
3330
}
3431

3532
func BenchmarkPointerLargeSlice(b *testing.B) {
3633
jobs := make([]Worker, 0, 1000)
37-
for i := 0; i < b.N; i++ {
34+
for b.Loop() {
3835
jobs := jobs[:0]
3936
for j := 0; j < 1000; j++ {
4037
job := &LargeJob{}
4138
jobs = append(jobs, job)
4239
}
43-
sink = jobs
4440
}
4541
}
4642
// bench-slice-end
4743

4844
// bench-call-start
49-
var sinkOne Worker
45+
var sink Worker
5046

5147
func call(w Worker) {
52-
sinkOne = w
48+
sink = w
5349
}
5450

5551
func BenchmarkCallWithValue(b *testing.B) {
56-
for i := 0; i < b.N; i++ {
52+
for b.Loop() {
5753
var j LargeJob
5854
call(j)
5955
}
6056
}
6157

6258
func BenchmarkCallWithPointer(b *testing.B) {
63-
for i := 0; i < b.N; i++ {
59+
for b.Loop() {
6460
j := &LargeJob{}
6561
call(j)
6662
}

docs/01-common-patterns/src/mem-prealloc_test.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ import (
55
)
66

77
func BenchmarkAppendNoPrealloc(b *testing.B) {
8-
for i := 0; i < b.N; i++ {
8+
for b.Loop() {
99
var s []int
1010
for j := 0; j < 10000; j++ {
1111
s = append(s, j)
@@ -14,10 +14,10 @@ func BenchmarkAppendNoPrealloc(b *testing.B) {
1414
}
1515

1616
func BenchmarkAppendWithPrealloc(b *testing.B) {
17-
for i := 0; i < b.N; i++ {
17+
for b.Loop() {
1818
s := make([]int, 0, 10000)
1919
for j := 0; j < 10000; j++ {
2020
s = append(s, j)
2121
}
2222
}
23-
}
23+
}

docs/01-common-patterns/src/object-pooling_test.go

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -10,14 +10,11 @@ type Data struct {
1010
Values [1024]int
1111
}
1212

13-
// globalSink prevents compiler optimizations that could remove memory allocations.
14-
var globalSink *Data
15-
1613
// BenchmarkWithoutPooling measures the performance of direct heap allocations.
1714
func BenchmarkWithoutPooling(b *testing.B) {
18-
for i := 0; i < b.N; i++ {
19-
globalSink = &Data{} // Allocating a new object each time
20-
globalSink.Values[0] = 42 // Simulating some memory activity
15+
for b.Loop() {
16+
data := &Data{} // Allocating a new object each time
17+
data.Values[0] = 42 // Simulating some memory activity
2118
}
2219
}
2320

@@ -30,10 +27,9 @@ var dataPool = sync.Pool{
3027

3128
// BenchmarkWithPooling measures the performance of using sync.Pool to reuse objects.
3229
func BenchmarkWithPooling(b *testing.B) {
33-
for i := 0; i < b.N; i++ {
30+
for b.Loop() {
3431
obj := dataPool.Get().(*Data) // Retrieve from pool
3532
obj.Values[0] = 42 // Simulate memory usage
3633
dataPool.Put(obj) // Return object to pool for reuse
37-
globalSink = obj // Prevents compiler optimizations from removing pooling logic
3834
}
3935
}

docs/01-common-patterns/src/stack-alloc_test.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,13 +16,13 @@ func HeapAlloc() *Data {
1616
}
1717

1818
func BenchmarkStackAlloc(b *testing.B) {
19-
for i := 0; i < b.N; i++ {
19+
for b.Loop() {
2020
_ = StackAlloc()
2121
}
2222
}
2323

2424
func BenchmarkHeapAlloc(b *testing.B) {
25-
for i := 0; i < b.N; i++ {
25+
for b.Loop() {
2626
_ = HeapAlloc()
2727
}
2828
}
@@ -37,7 +37,7 @@ func HeapAllocEscape() {
3737
}
3838

3939
func BenchmarkHeapAllocEscape(b *testing.B) {
40-
for i := 0; i < b.N; i++ {
40+
for b.Loop() {
4141
HeapAllocEscape()
4242
}
4343
}

docs/01-common-patterns/src/worker-pool_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ func doWork(n int) [32]byte {
2121
}
2222

2323
func BenchmarkUnboundedGoroutines(b *testing.B) {
24-
for i := 0; i < b.N; i++ {
24+
for b.Loop() {
2525
var wg sync.WaitGroup
2626
wg.Add(numJobs)
2727

@@ -43,7 +43,7 @@ func worker(jobs <-chan int, wg *sync.WaitGroup) {
4343
}
4444

4545
func BenchmarkWorkerPool(b *testing.B) {
46-
for i := 0; i < b.N; i++ {
46+
for b.Loop() {
4747
var wg sync.WaitGroup
4848
wg.Add(numJobs)
4949

docs/01-common-patterns/src/zero-copy_test.go

Lines changed: 6 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -10,22 +10,18 @@ import (
1010
)
1111

1212
// bench-start
13-
var sink []byte
14-
1513
func BenchmarkCopy(b *testing.B) {
1614
data := make([]byte, 64*1024)
17-
for i := 0; i < b.N; i++ {
15+
for b.Loop() {
1816
buf := make([]byte, len(data))
1917
copy(buf, data)
20-
sink = buf
2118
}
2219
}
2320

2421
func BenchmarkSlice(b *testing.B) {
2522
data := make([]byte, 64*1024)
26-
for i := 0; i < b.N; i++ {
27-
s := data[:]
28-
sink = s
23+
for b.Loop() {
24+
_ = data[:]
2925
}
3026
}
3127
// bench-end
@@ -39,13 +35,11 @@ func BenchmarkReadWithCopy(b *testing.B) {
3935
defer f.Close()
4036

4137
buf := make([]byte, 4*1024*1024) // 4MB buffer
42-
b.ResetTimer()
43-
for i := 0; i < b.N; i++ {
38+
for b.Loop() {
4439
_, err := f.ReadAt(buf, 0)
4540
if err != nil && err != io.EOF {
4641
b.Fatal(err)
4742
}
48-
sink = buf
4943
}
5044
}
5145

@@ -57,13 +51,11 @@ func BenchmarkReadWithMmap(b *testing.B) {
5751
defer r.Close()
5852

5953
buf := make([]byte, r.Len())
60-
b.ResetTimer()
61-
for i := 0; i < b.N; i++ {
54+
for b.Loop() {
6255
_, err := r.ReadAt(buf, 0)
6356
if err != nil && err != io.EOF {
6457
b.Fatal(err)
6558
}
66-
sink = buf
6759
}
6860
}
69-
// bench-io-end
61+
// bench-io-end

0 commit comments

Comments
 (0)