Source file
src/runtime/malloc_test.go
1
2
3
4
5 package runtime_test
6
7 import (
8 "flag"
9 "fmt"
10 "internal/race"
11 "internal/testenv"
12 "os"
13 "os/exec"
14 "reflect"
15 "runtime"
16 . "runtime"
17 "strings"
18 "sync/atomic"
19 "testing"
20 "time"
21 "unsafe"
22 )
23
24 var testMemStatsCount int
25
26 func TestMemStats(t *testing.T) {
27 testMemStatsCount++
28
29
30 GC()
31
32
33 st := new(MemStats)
34 ReadMemStats(st)
35
36 nz := func(x any) error {
37 if x != reflect.Zero(reflect.TypeOf(x)).Interface() {
38 return nil
39 }
40 return fmt.Errorf("zero value")
41 }
42 le := func(thresh float64) func(any) error {
43 return func(x any) error {
44
45
46
47 if testMemStatsCount > 1 {
48 return nil
49 }
50
51 if reflect.ValueOf(x).Convert(reflect.TypeOf(thresh)).Float() < thresh {
52 return nil
53 }
54 return fmt.Errorf("insanely high value (overflow?); want <= %v", thresh)
55 }
56 }
57 eq := func(x any) func(any) error {
58 return func(y any) error {
59 if x == y {
60 return nil
61 }
62 return fmt.Errorf("want %v", x)
63 }
64 }
65
66
67 fields := map[string][]func(any) error{
68 "Alloc": {nz, le(1e10)}, "TotalAlloc": {nz, le(1e11)}, "Sys": {nz, le(1e10)},
69 "Lookups": {eq(uint64(0))}, "Mallocs": {nz, le(1e10)}, "Frees": {nz, le(1e10)},
70 "HeapAlloc": {nz, le(1e10)}, "HeapSys": {nz, le(1e10)}, "HeapIdle": {le(1e10)},
71 "HeapInuse": {nz, le(1e10)}, "HeapReleased": {le(1e10)}, "HeapObjects": {nz, le(1e10)},
72 "StackInuse": {nz, le(1e10)}, "StackSys": {nz, le(1e10)},
73 "MSpanInuse": {nz, le(1e10)}, "MSpanSys": {nz, le(1e10)},
74 "MCacheInuse": {nz, le(1e10)}, "MCacheSys": {nz, le(1e10)},
75 "BuckHashSys": {nz, le(1e10)}, "GCSys": {nz, le(1e10)}, "OtherSys": {nz, le(1e10)},
76 "NextGC": {nz, le(1e10)}, "LastGC": {nz},
77 "PauseTotalNs": {le(1e11)}, "PauseNs": nil, "PauseEnd": nil,
78 "NumGC": {nz, le(1e9)}, "NumForcedGC": {nz, le(1e9)},
79 "GCCPUFraction": {le(0.99)}, "EnableGC": {eq(true)}, "DebugGC": {eq(false)},
80 "BySize": nil,
81 }
82
83 rst := reflect.ValueOf(st).Elem()
84 for i := 0; i < rst.Type().NumField(); i++ {
85 name, val := rst.Type().Field(i).Name, rst.Field(i).Interface()
86 checks, ok := fields[name]
87 if !ok {
88 t.Errorf("unknown MemStats field %s", name)
89 continue
90 }
91 for _, check := range checks {
92 if err := check(val); err != nil {
93 t.Errorf("%s = %v: %s", name, val, err)
94 }
95 }
96 }
97
98 if st.Sys != st.HeapSys+st.StackSys+st.MSpanSys+st.MCacheSys+
99 st.BuckHashSys+st.GCSys+st.OtherSys {
100 t.Fatalf("Bad sys value: %+v", *st)
101 }
102
103 if st.HeapIdle+st.HeapInuse != st.HeapSys {
104 t.Fatalf("HeapIdle(%d) + HeapInuse(%d) should be equal to HeapSys(%d), but isn't.", st.HeapIdle, st.HeapInuse, st.HeapSys)
105 }
106
107 if lpe := st.PauseEnd[int(st.NumGC+255)%len(st.PauseEnd)]; st.LastGC != lpe {
108 t.Fatalf("LastGC(%d) != last PauseEnd(%d)", st.LastGC, lpe)
109 }
110
111 var pauseTotal uint64
112 for _, pause := range st.PauseNs {
113 pauseTotal += pause
114 }
115 if int(st.NumGC) < len(st.PauseNs) {
116
117 if st.PauseTotalNs != pauseTotal {
118 t.Fatalf("PauseTotalNs(%d) != sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
119 }
120 for i := int(st.NumGC); i < len(st.PauseNs); i++ {
121 if st.PauseNs[i] != 0 {
122 t.Fatalf("Non-zero PauseNs[%d]: %+v", i, st)
123 }
124 if st.PauseEnd[i] != 0 {
125 t.Fatalf("Non-zero PauseEnd[%d]: %+v", i, st)
126 }
127 }
128 } else {
129 if st.PauseTotalNs < pauseTotal {
130 t.Fatalf("PauseTotalNs(%d) < sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
131 }
132 }
133
134 if st.NumForcedGC > st.NumGC {
135 t.Fatalf("NumForcedGC(%d) > NumGC(%d)", st.NumForcedGC, st.NumGC)
136 }
137 }
138
139 func TestStringConcatenationAllocs(t *testing.T) {
140 n := testing.AllocsPerRun(1e3, func() {
141 b := make([]byte, 10)
142 for i := 0; i < 10; i++ {
143 b[i] = byte(i) + '0'
144 }
145 s := "foo" + string(b)
146 if want := "foo0123456789"; s != want {
147 t.Fatalf("want %v, got %v", want, s)
148 }
149 })
150
151 if n != 1 {
152 t.Fatalf("want 1 allocation, got %v", n)
153 }
154 }
155
156 func TestTinyAlloc(t *testing.T) {
157 if runtime.Raceenabled {
158 t.Skip("tinyalloc suppressed when running in race mode")
159 }
160 const N = 16
161 var v [N]unsafe.Pointer
162 for i := range v {
163 v[i] = unsafe.Pointer(new(byte))
164 }
165
166 chunks := make(map[uintptr]bool, N)
167 for _, p := range v {
168 chunks[uintptr(p)&^7] = true
169 }
170
171 if len(chunks) == N {
172 t.Fatal("no bytes allocated within the same 8-byte chunk")
173 }
174 }
175
176 var (
177 tinyByteSink *byte
178 tinyUint32Sink *uint32
179 tinyObj12Sink *obj12
180 )
181
182 type obj12 struct {
183 a uint64
184 b uint32
185 }
186
187 func TestTinyAllocIssue37262(t *testing.T) {
188 if runtime.Raceenabled {
189 t.Skip("tinyalloc suppressed when running in race mode")
190 }
191
192
193
194
195
196
197
198 runtime.GC()
199 runtime.GC()
200
201
202
203 runtime.Acquirem()
204
205
206 aligned := false
207 for i := 0; i < 16; i++ {
208 tinyByteSink = new(byte)
209 if uintptr(unsafe.Pointer(tinyByteSink))&0xf == 0xf {
210 aligned = true
211 break
212 }
213 }
214 if !aligned {
215 runtime.Releasem()
216 t.Fatal("unable to get a fresh tiny slot")
217 }
218
219
220
221 tinyUint32Sink = new(uint32)
222
223
224
225
226
227
228 tinyObj12Sink = new(obj12)
229
230
231 atomic.StoreUint64(&tinyObj12Sink.a, 10)
232
233
234 tinyByteSink = nil
235 tinyUint32Sink = nil
236 tinyObj12Sink = nil
237
238 runtime.Releasem()
239 }
240
241 func TestPageCacheLeak(t *testing.T) {
242 defer GOMAXPROCS(GOMAXPROCS(1))
243 leaked := PageCachePagesLeaked()
244 if leaked != 0 {
245 t.Fatalf("found %d leaked pages in page caches", leaked)
246 }
247 }
248
249 func TestPhysicalMemoryUtilization(t *testing.T) {
250 got := runTestProg(t, "testprog", "GCPhys")
251 want := "OK\n"
252 if got != want {
253 t.Fatalf("expected %q, but got %q", want, got)
254 }
255 }
256
257 func TestScavengedBitsCleared(t *testing.T) {
258 var mismatches [128]BitsMismatch
259 if n, ok := CheckScavengedBitsCleared(mismatches[:]); !ok {
260 t.Errorf("uncleared scavenged bits")
261 for _, m := range mismatches[:n] {
262 t.Logf("\t@ address 0x%x", m.Base)
263 t.Logf("\t| got: %064b", m.Got)
264 t.Logf("\t| want: %064b", m.Want)
265 }
266 t.FailNow()
267 }
268 }
269
270 type acLink struct {
271 x [1 << 20]byte
272 }
273
274 var arenaCollisionSink []*acLink
275
276 func TestArenaCollision(t *testing.T) {
277 testenv.MustHaveExec(t)
278
279
280
281 if os.Getenv("TEST_ARENA_COLLISION") != "1" {
282 cmd := testenv.CleanCmdEnv(exec.Command(os.Args[0], "-test.run=TestArenaCollision", "-test.v"))
283 cmd.Env = append(cmd.Env, "TEST_ARENA_COLLISION=1")
284 out, err := cmd.CombinedOutput()
285 if race.Enabled {
286
287
288
289
290
291 if want := "too many address space collisions"; !strings.Contains(string(out), want) {
292 t.Fatalf("want %q, got:\n%s", want, string(out))
293 }
294 } else if !strings.Contains(string(out), "PASS\n") || err != nil {
295 t.Fatalf("%s\n(exit status %v)", string(out), err)
296 }
297 return
298 }
299 disallowed := [][2]uintptr{}
300
301
302 KeepNArenaHints(3)
303
304
305 for i := 0; i < 5; i++ {
306
307
308 start, end := MapNextArenaHint()
309 disallowed = append(disallowed, [2]uintptr{start, end})
310
311
312 hint := GetNextArenaHint()
313 for GetNextArenaHint() == hint {
314 ac := new(acLink)
315 arenaCollisionSink = append(arenaCollisionSink, ac)
316
317
318 p := uintptr(unsafe.Pointer(ac))
319 for _, d := range disallowed {
320 if d[0] <= p && p < d[1] {
321 t.Fatalf("allocation %#x in reserved region [%#x, %#x)", p, d[0], d[1])
322 }
323 }
324 }
325 }
326 }
327
328 var mallocSink uintptr
329
330 func BenchmarkMalloc8(b *testing.B) {
331 var x uintptr
332 for i := 0; i < b.N; i++ {
333 p := new(int64)
334 x ^= uintptr(unsafe.Pointer(p))
335 }
336 mallocSink = x
337 }
338
339 func BenchmarkMalloc16(b *testing.B) {
340 var x uintptr
341 for i := 0; i < b.N; i++ {
342 p := new([2]int64)
343 x ^= uintptr(unsafe.Pointer(p))
344 }
345 mallocSink = x
346 }
347
348 func BenchmarkMallocTypeInfo8(b *testing.B) {
349 var x uintptr
350 for i := 0; i < b.N; i++ {
351 p := new(struct {
352 p [8 / unsafe.Sizeof(uintptr(0))]*int
353 })
354 x ^= uintptr(unsafe.Pointer(p))
355 }
356 mallocSink = x
357 }
358
359 func BenchmarkMallocTypeInfo16(b *testing.B) {
360 var x uintptr
361 for i := 0; i < b.N; i++ {
362 p := new(struct {
363 p [16 / unsafe.Sizeof(uintptr(0))]*int
364 })
365 x ^= uintptr(unsafe.Pointer(p))
366 }
367 mallocSink = x
368 }
369
370 type LargeStruct struct {
371 x [16][]byte
372 }
373
374 func BenchmarkMallocLargeStruct(b *testing.B) {
375 var x uintptr
376 for i := 0; i < b.N; i++ {
377 p := make([]LargeStruct, 2)
378 x ^= uintptr(unsafe.Pointer(&p[0]))
379 }
380 mallocSink = x
381 }
382
383 var n = flag.Int("n", 1000, "number of goroutines")
384
385 func BenchmarkGoroutineSelect(b *testing.B) {
386 quit := make(chan struct{})
387 read := func(ch chan struct{}) {
388 for {
389 select {
390 case _, ok := <-ch:
391 if !ok {
392 return
393 }
394 case <-quit:
395 return
396 }
397 }
398 }
399 benchHelper(b, *n, read)
400 }
401
402 func BenchmarkGoroutineBlocking(b *testing.B) {
403 read := func(ch chan struct{}) {
404 for {
405 if _, ok := <-ch; !ok {
406 return
407 }
408 }
409 }
410 benchHelper(b, *n, read)
411 }
412
413 func BenchmarkGoroutineForRange(b *testing.B) {
414 read := func(ch chan struct{}) {
415 for range ch {
416 }
417 }
418 benchHelper(b, *n, read)
419 }
420
421 func benchHelper(b *testing.B, n int, read func(chan struct{})) {
422 m := make([]chan struct{}, n)
423 for i := range m {
424 m[i] = make(chan struct{}, 1)
425 go read(m[i])
426 }
427 b.StopTimer()
428 b.ResetTimer()
429 GC()
430
431 for i := 0; i < b.N; i++ {
432 for _, ch := range m {
433 if ch != nil {
434 ch <- struct{}{}
435 }
436 }
437 time.Sleep(10 * time.Millisecond)
438 b.StartTimer()
439 GC()
440 b.StopTimer()
441 }
442
443 for _, ch := range m {
444 close(ch)
445 }
446 time.Sleep(10 * time.Millisecond)
447 }
448
449 func BenchmarkGoroutineIdle(b *testing.B) {
450 quit := make(chan struct{})
451 fn := func() {
452 <-quit
453 }
454 for i := 0; i < *n; i++ {
455 go fn()
456 }
457
458 GC()
459 b.ResetTimer()
460
461 for i := 0; i < b.N; i++ {
462 GC()
463 }
464
465 b.StopTimer()
466 close(quit)
467 time.Sleep(10 * time.Millisecond)
468 }
469
View as plain text