Source file
src/runtime/runtime1.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/bytealg"
9 "internal/goarch"
10 "runtime/internal/atomic"
11 "unsafe"
12 )
13
14
15
16
17
18
19 const (
20 tracebackCrash = 1 << iota
21 tracebackAll
22 tracebackShift = iota
23 )
24
25 var traceback_cache uint32 = 2 << tracebackShift
26 var traceback_env uint32
27
28
29
30
31
32
33
34
35
36
37 func gotraceback() (level int32, all, crash bool) {
38 _g_ := getg()
39 t := atomic.Load(&traceback_cache)
40 crash = t&tracebackCrash != 0
41 all = _g_.m.throwing > 0 || t&tracebackAll != 0
42 if _g_.m.traceback != 0 {
43 level = int32(_g_.m.traceback)
44 } else {
45 level = int32(t >> tracebackShift)
46 }
47 return
48 }
49
50 var (
51 argc int32
52 argv **byte
53 )
54
55
56
57 func argv_index(argv **byte, i int32) *byte {
58 return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*goarch.PtrSize))
59 }
60
61 func args(c int32, v **byte) {
62 argc = c
63 argv = v
64 sysargs(c, v)
65 }
66
67 func goargs() {
68 if GOOS == "windows" {
69 return
70 }
71 argslice = make([]string, argc)
72 for i := int32(0); i < argc; i++ {
73 argslice[i] = gostringnocopy(argv_index(argv, i))
74 }
75 }
76
77 func goenvs_unix() {
78
79
80
81 n := int32(0)
82 for argv_index(argv, argc+1+n) != nil {
83 n++
84 }
85
86 envs = make([]string, n)
87 for i := int32(0); i < n; i++ {
88 envs[i] = gostring(argv_index(argv, argc+1+i))
89 }
90 }
91
92 func environ() []string {
93 return envs
94 }
95
96
97
98 var test_z64, test_x64 uint64
99
100 func testAtomic64() {
101 test_z64 = 42
102 test_x64 = 0
103 if atomic.Cas64(&test_z64, test_x64, 1) {
104 throw("cas64 failed")
105 }
106 if test_x64 != 0 {
107 throw("cas64 failed")
108 }
109 test_x64 = 42
110 if !atomic.Cas64(&test_z64, test_x64, 1) {
111 throw("cas64 failed")
112 }
113 if test_x64 != 42 || test_z64 != 1 {
114 throw("cas64 failed")
115 }
116 if atomic.Load64(&test_z64) != 1 {
117 throw("load64 failed")
118 }
119 atomic.Store64(&test_z64, (1<<40)+1)
120 if atomic.Load64(&test_z64) != (1<<40)+1 {
121 throw("store64 failed")
122 }
123 if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
124 throw("xadd64 failed")
125 }
126 if atomic.Load64(&test_z64) != (2<<40)+2 {
127 throw("xadd64 failed")
128 }
129 if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
130 throw("xchg64 failed")
131 }
132 if atomic.Load64(&test_z64) != (3<<40)+3 {
133 throw("xchg64 failed")
134 }
135 }
136
137 func check() {
138 var (
139 a int8
140 b uint8
141 c int16
142 d uint16
143 e int32
144 f uint32
145 g int64
146 h uint64
147 i, i1 float32
148 j, j1 float64
149 k unsafe.Pointer
150 l *uint16
151 m [4]byte
152 )
153 type x1t struct {
154 x uint8
155 }
156 type y1t struct {
157 x1 x1t
158 y uint8
159 }
160 var x1 x1t
161 var y1 y1t
162
163 if unsafe.Sizeof(a) != 1 {
164 throw("bad a")
165 }
166 if unsafe.Sizeof(b) != 1 {
167 throw("bad b")
168 }
169 if unsafe.Sizeof(c) != 2 {
170 throw("bad c")
171 }
172 if unsafe.Sizeof(d) != 2 {
173 throw("bad d")
174 }
175 if unsafe.Sizeof(e) != 4 {
176 throw("bad e")
177 }
178 if unsafe.Sizeof(f) != 4 {
179 throw("bad f")
180 }
181 if unsafe.Sizeof(g) != 8 {
182 throw("bad g")
183 }
184 if unsafe.Sizeof(h) != 8 {
185 throw("bad h")
186 }
187 if unsafe.Sizeof(i) != 4 {
188 throw("bad i")
189 }
190 if unsafe.Sizeof(j) != 8 {
191 throw("bad j")
192 }
193 if unsafe.Sizeof(k) != goarch.PtrSize {
194 throw("bad k")
195 }
196 if unsafe.Sizeof(l) != goarch.PtrSize {
197 throw("bad l")
198 }
199 if unsafe.Sizeof(x1) != 1 {
200 throw("bad unsafe.Sizeof x1")
201 }
202 if unsafe.Offsetof(y1.y) != 1 {
203 throw("bad offsetof y1.y")
204 }
205 if unsafe.Sizeof(y1) != 2 {
206 throw("bad unsafe.Sizeof y1")
207 }
208
209 if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
210 throw("bad timediv")
211 }
212
213 var z uint32
214 z = 1
215 if !atomic.Cas(&z, 1, 2) {
216 throw("cas1")
217 }
218 if z != 2 {
219 throw("cas2")
220 }
221
222 z = 4
223 if atomic.Cas(&z, 5, 6) {
224 throw("cas3")
225 }
226 if z != 4 {
227 throw("cas4")
228 }
229
230 z = 0xffffffff
231 if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
232 throw("cas5")
233 }
234 if z != 0xfffffffe {
235 throw("cas6")
236 }
237
238 m = [4]byte{1, 1, 1, 1}
239 atomic.Or8(&m[1], 0xf0)
240 if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
241 throw("atomicor8")
242 }
243
244 m = [4]byte{0xff, 0xff, 0xff, 0xff}
245 atomic.And8(&m[1], 0x1)
246 if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
247 throw("atomicand8")
248 }
249
250 *(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
251 if j == j {
252 throw("float64nan")
253 }
254 if !(j != j) {
255 throw("float64nan1")
256 }
257
258 *(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
259 if j == j1 {
260 throw("float64nan2")
261 }
262 if !(j != j1) {
263 throw("float64nan3")
264 }
265
266 *(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
267 if i == i {
268 throw("float32nan")
269 }
270 if i == i {
271 throw("float32nan1")
272 }
273
274 *(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
275 if i == i1 {
276 throw("float32nan2")
277 }
278 if i == i1 {
279 throw("float32nan3")
280 }
281
282 testAtomic64()
283
284 if _FixedStack != round2(_FixedStack) {
285 throw("FixedStack is not power-of-2")
286 }
287
288 if !checkASM() {
289 throw("assembly checks failed")
290 }
291 }
292
293 type dbgVar struct {
294 name string
295 value *int32
296 }
297
298
299
300
301
302 var debug struct {
303 cgocheck int32
304 clobberfree int32
305 efence int32
306 gccheckmark int32
307 gcpacertrace int32
308 gcshrinkstackoff int32
309 gcstoptheworld int32
310 gctrace int32
311 invalidptr int32
312 madvdontneed int32
313 scavtrace int32
314 scheddetail int32
315 schedtrace int32
316 tracebackancestors int32
317 asyncpreemptoff int32
318 harddecommit int32
319
320
321
322
323 malloc bool
324 allocfreetrace int32
325 inittrace int32
326 sbrk int32
327 }
328
329 var dbgvars = []dbgVar{
330 {"allocfreetrace", &debug.allocfreetrace},
331 {"clobberfree", &debug.clobberfree},
332 {"cgocheck", &debug.cgocheck},
333 {"efence", &debug.efence},
334 {"gccheckmark", &debug.gccheckmark},
335 {"gcpacertrace", &debug.gcpacertrace},
336 {"gcshrinkstackoff", &debug.gcshrinkstackoff},
337 {"gcstoptheworld", &debug.gcstoptheworld},
338 {"gctrace", &debug.gctrace},
339 {"invalidptr", &debug.invalidptr},
340 {"madvdontneed", &debug.madvdontneed},
341 {"sbrk", &debug.sbrk},
342 {"scavtrace", &debug.scavtrace},
343 {"scheddetail", &debug.scheddetail},
344 {"schedtrace", &debug.schedtrace},
345 {"tracebackancestors", &debug.tracebackancestors},
346 {"asyncpreemptoff", &debug.asyncpreemptoff},
347 {"inittrace", &debug.inittrace},
348 {"harddecommit", &debug.harddecommit},
349 }
350
351 func parsedebugvars() {
352
353 debug.cgocheck = 1
354 debug.invalidptr = 1
355 if GOOS == "linux" {
356
357
358
359
360
361
362
363
364 debug.madvdontneed = 1
365 }
366
367 for p := gogetenv("GODEBUG"); p != ""; {
368 field := ""
369 i := bytealg.IndexByteString(p, ',')
370 if i < 0 {
371 field, p = p, ""
372 } else {
373 field, p = p[:i], p[i+1:]
374 }
375 i = bytealg.IndexByteString(field, '=')
376 if i < 0 {
377 continue
378 }
379 key, value := field[:i], field[i+1:]
380
381
382
383
384 if key == "memprofilerate" {
385 if n, ok := atoi(value); ok {
386 MemProfileRate = n
387 }
388 } else {
389 for _, v := range dbgvars {
390 if v.name == key {
391 if n, ok := atoi32(value); ok {
392 *v.value = n
393 }
394 }
395 }
396 }
397 }
398
399 debug.malloc = (debug.allocfreetrace | debug.inittrace | debug.sbrk) != 0
400
401 setTraceback(gogetenv("GOTRACEBACK"))
402 traceback_env = traceback_cache
403 }
404
405
406 func setTraceback(level string) {
407 var t uint32
408 switch level {
409 case "none":
410 t = 0
411 case "single", "":
412 t = 1 << tracebackShift
413 case "all":
414 t = 1<<tracebackShift | tracebackAll
415 case "system":
416 t = 2<<tracebackShift | tracebackAll
417 case "crash":
418 t = 2<<tracebackShift | tracebackAll | tracebackCrash
419 default:
420 t = tracebackAll
421 if n, ok := atoi(level); ok && n == int(uint32(n)) {
422 t |= uint32(n) << tracebackShift
423 }
424 }
425
426
427 if islibrary || isarchive {
428 t |= tracebackCrash
429 }
430
431 t |= traceback_env
432
433 atomic.Store(&traceback_cache, t)
434 }
435
436
437
438
439
440
441
442 func timediv(v int64, div int32, rem *int32) int32 {
443 res := int32(0)
444 for bit := 30; bit >= 0; bit-- {
445 if v >= int64(div)<<uint(bit) {
446 v = v - (int64(div) << uint(bit))
447
448
449 res |= 1 << uint(bit)
450 }
451 }
452 if v >= int64(div) {
453 if rem != nil {
454 *rem = 0
455 }
456 return 0x7fffffff
457 }
458 if rem != nil {
459 *rem = int32(v)
460 }
461 return res
462 }
463
464
465
466
467 func acquirem() *m {
468 _g_ := getg()
469 _g_.m.locks++
470 return _g_.m
471 }
472
473
474 func releasem(mp *m) {
475 _g_ := getg()
476 mp.locks--
477 if mp.locks == 0 && _g_.preempt {
478
479 _g_.stackguard0 = stackPreempt
480 }
481 }
482
483
484 func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
485 modules := activeModules()
486 sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)}
487 ret := [][]int32{modules[0].typelinks}
488 for _, md := range modules[1:] {
489 sections = append(sections, unsafe.Pointer(md.types))
490 ret = append(ret, md.typelinks)
491 }
492 return sections, ret
493 }
494
495
496
497 func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
498 return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).bytes)
499 }
500
501
502
503 func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
504 return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
505 }
506
507
508
509 func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
510 return (*_type)(rtype).textOff(textOff(off))
511
512 }
513
514
515
516 func reflectlite_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
517 return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).bytes)
518 }
519
520
521
522 func reflectlite_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
523 return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
524 }
525
526
527
528 func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
529 reflectOffsLock()
530 if reflectOffs.m == nil {
531 reflectOffs.m = make(map[int32]unsafe.Pointer)
532 reflectOffs.minv = make(map[unsafe.Pointer]int32)
533 reflectOffs.next = -1
534 }
535 id, found := reflectOffs.minv[ptr]
536 if !found {
537 id = reflectOffs.next
538 reflectOffs.next--
539 reflectOffs.m[id] = ptr
540 reflectOffs.minv[ptr] = id
541 }
542 reflectOffsUnlock()
543 return id
544 }
545
View as plain text