Source file src/runtime/mcache.go
1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/atomic" 9 "unsafe" 10 ) 11 12 // Per-thread (in Go, per-P) cache for small objects. 13 // This includes a small object cache and local allocation stats. 14 // No locking needed because it is per-thread (per-P). 15 // 16 // mcaches are allocated from non-GC'd memory, so any heap pointers 17 // must be specially handled. 18 // 19 //go:notinheap 20 type mcache struct { 21 // The following members are accessed on every malloc, 22 // so they are grouped here for better caching. 23 nextSample uintptr // trigger heap sample after allocating this many bytes 24 scanAlloc uintptr // bytes of scannable heap allocated 25 26 // Allocator cache for tiny objects w/o pointers. 27 // See "Tiny allocator" comment in malloc.go. 28 29 // tiny points to the beginning of the current tiny block, or 30 // nil if there is no current tiny block. 31 // 32 // tiny is a heap pointer. Since mcache is in non-GC'd memory, 33 // we handle it by clearing it in releaseAll during mark 34 // termination. 35 // 36 // tinyAllocs is the number of tiny allocations performed 37 // by the P that owns this mcache. 38 tiny uintptr 39 tinyoffset uintptr 40 tinyAllocs uintptr 41 42 // The rest is not accessed on every malloc. 43 44 alloc [numSpanClasses]*mspan // spans to allocate from, indexed by spanClass 45 46 stackcache [_NumStackOrders]stackfreelist 47 48 // flushGen indicates the sweepgen during which this mcache 49 // was last flushed. If flushGen != mheap_.sweepgen, the spans 50 // in this mcache are stale and need to the flushed so they 51 // can be swept. This is done in acquirep. 52 flushGen uint32 53 } 54 55 // A gclink is a node in a linked list of blocks, like mlink, 56 // but it is opaque to the garbage collector. 57 // The GC does not trace the pointers during collection, 58 // and the compiler does not emit write barriers for assignments 59 // of gclinkptr values. Code should store references to gclinks 60 // as gclinkptr, not as *gclink. 61 type gclink struct { 62 next gclinkptr 63 } 64 65 // A gclinkptr is a pointer to a gclink, but it is opaque 66 // to the garbage collector. 67 type gclinkptr uintptr 68 69 // ptr returns the *gclink form of p. 70 // The result should be used for accessing fields, not stored 71 // in other data structures. 72 func (p gclinkptr) ptr() *gclink { 73 return (*gclink)(unsafe.Pointer(p)) 74 } 75 76 type stackfreelist struct { 77 list gclinkptr // linked list of free stacks 78 size uintptr // total size of stacks in list 79 } 80 81 // dummy mspan that contains no free objects. 82 var emptymspan mspan 83 84 func allocmcache() *mcache { 85 var c *mcache 86 systemstack(func() { 87 lock(&mheap_.lock) 88 c = (*mcache)(mheap_.cachealloc.alloc()) 89 c.flushGen = mheap_.sweepgen 90 unlock(&mheap_.lock) 91 }) 92 for i := range c.alloc { 93 c.alloc[i] = &emptymspan 94 } 95 c.nextSample = nextSample() 96 return c 97 } 98 99 // freemcache releases resources associated with this 100 // mcache and puts the object onto a free list. 101 // 102 // In some cases there is no way to simply release 103 // resources, such as statistics, so donate them to 104 // a different mcache (the recipient). 105 func freemcache(c *mcache) { 106 systemstack(func() { 107 c.releaseAll() 108 stackcache_clear(c) 109 110 // NOTE(rsc,rlh): If gcworkbuffree comes back, we need to coordinate 111 // with the stealing of gcworkbufs during garbage collection to avoid 112 // a race where the workbuf is double-freed. 113 // gcworkbuffree(c.gcworkbuf) 114 115 lock(&mheap_.lock) 116 mheap_.cachealloc.free(unsafe.Pointer(c)) 117 unlock(&mheap_.lock) 118 }) 119 } 120 121 // getMCache is a convenience function which tries to obtain an mcache. 122 // 123 // Returns nil if we're not bootstrapping or we don't have a P. The caller's 124 // P must not change, so we must be in a non-preemptible state. 125 func getMCache(mp *m) *mcache { 126 // Grab the mcache, since that's where stats live. 127 pp := mp.p.ptr() 128 var c *mcache 129 if pp == nil { 130 // We will be called without a P while bootstrapping, 131 // in which case we use mcache0, which is set in mallocinit. 132 // mcache0 is cleared when bootstrapping is complete, 133 // by procresize. 134 c = mcache0 135 } else { 136 c = pp.mcache 137 } 138 return c 139 } 140 141 // refill acquires a new span of span class spc for c. This span will 142 // have at least one free object. The current span in c must be full. 143 // 144 // Must run in a non-preemptible context since otherwise the owner of 145 // c could change. 146 func (c *mcache) refill(spc spanClass) { 147 // Return the current cached span to the central lists. 148 s := c.alloc[spc] 149 150 if uintptr(s.allocCount) != s.nelems { 151 throw("refill of span with free space remaining") 152 } 153 if s != &emptymspan { 154 // Mark this span as no longer cached. 155 if s.sweepgen != mheap_.sweepgen+3 { 156 throw("bad sweepgen in refill") 157 } 158 mheap_.central[spc].mcentral.uncacheSpan(s) 159 } 160 161 // Get a new cached span from the central lists. 162 s = mheap_.central[spc].mcentral.cacheSpan() 163 if s == nil { 164 throw("out of memory") 165 } 166 167 if uintptr(s.allocCount) == s.nelems { 168 throw("span has no free space") 169 } 170 171 // Indicate that this span is cached and prevent asynchronous 172 // sweeping in the next sweep phase. 173 s.sweepgen = mheap_.sweepgen + 3 174 175 // Assume all objects from this span will be allocated in the 176 // mcache. If it gets uncached, we'll adjust this. 177 stats := memstats.heapStats.acquire() 178 atomic.Xadduintptr(&stats.smallAllocCount[spc.sizeclass()], uintptr(s.nelems)-uintptr(s.allocCount)) 179 180 // Flush tinyAllocs. 181 if spc == tinySpanClass { 182 atomic.Xadduintptr(&stats.tinyAllocCount, c.tinyAllocs) 183 c.tinyAllocs = 0 184 } 185 memstats.heapStats.release() 186 187 // Update heapLive with the same assumption. 188 // While we're here, flush scanAlloc, since we have to call 189 // revise anyway. 190 usedBytes := uintptr(s.allocCount) * s.elemsize 191 gcController.update(int64(s.npages*pageSize)-int64(usedBytes), int64(c.scanAlloc)) 192 c.scanAlloc = 0 193 194 c.alloc[spc] = s 195 } 196 197 // allocLarge allocates a span for a large object. 198 func (c *mcache) allocLarge(size uintptr, noscan bool) *mspan { 199 if size+_PageSize < size { 200 throw("out of memory") 201 } 202 npages := size >> _PageShift 203 if size&_PageMask != 0 { 204 npages++ 205 } 206 207 // Deduct credit for this span allocation and sweep if 208 // necessary. mHeap_Alloc will also sweep npages, so this only 209 // pays the debt down to npage pages. 210 deductSweepCredit(npages*_PageSize, npages) 211 212 spc := makeSpanClass(0, noscan) 213 s := mheap_.alloc(npages, spc) 214 if s == nil { 215 throw("out of memory") 216 } 217 stats := memstats.heapStats.acquire() 218 atomic.Xadduintptr(&stats.largeAlloc, npages*pageSize) 219 atomic.Xadduintptr(&stats.largeAllocCount, 1) 220 memstats.heapStats.release() 221 222 // Update heapLive. 223 gcController.update(int64(s.npages*pageSize), 0) 224 225 // Put the large span in the mcentral swept list so that it's 226 // visible to the background sweeper. 227 mheap_.central[spc].mcentral.fullSwept(mheap_.sweepgen).push(s) 228 s.limit = s.base() + size 229 heapBitsForAddr(s.base()).initSpan(s) 230 return s 231 } 232 233 func (c *mcache) releaseAll() { 234 // Take this opportunity to flush scanAlloc. 235 scanAlloc := int64(c.scanAlloc) 236 c.scanAlloc = 0 237 238 sg := mheap_.sweepgen 239 dHeapLive := int64(0) 240 for i := range c.alloc { 241 s := c.alloc[i] 242 if s != &emptymspan { 243 // Adjust nsmallalloc in case the span wasn't fully allocated. 244 n := uintptr(s.nelems) - uintptr(s.allocCount) 245 stats := memstats.heapStats.acquire() 246 atomic.Xadduintptr(&stats.smallAllocCount[spanClass(i).sizeclass()], -n) 247 memstats.heapStats.release() 248 if s.sweepgen != sg+1 { 249 // refill conservatively counted unallocated slots in gcController.heapLive. 250 // Undo this. 251 // 252 // If this span was cached before sweep, then 253 // gcController.heapLive was totally recomputed since 254 // caching this span, so we don't do this for 255 // stale spans. 256 dHeapLive -= int64(n) * int64(s.elemsize) 257 } 258 // Release the span to the mcentral. 259 mheap_.central[i].mcentral.uncacheSpan(s) 260 c.alloc[i] = &emptymspan 261 } 262 } 263 // Clear tinyalloc pool. 264 c.tiny = 0 265 c.tinyoffset = 0 266 267 // Flush tinyAllocs. 268 stats := memstats.heapStats.acquire() 269 atomic.Xadduintptr(&stats.tinyAllocCount, c.tinyAllocs) 270 c.tinyAllocs = 0 271 memstats.heapStats.release() 272 273 // Updated heapScan and heapLive. 274 gcController.update(dHeapLive, scanAlloc) 275 } 276 277 // prepareForSweep flushes c if the system has entered a new sweep phase 278 // since c was populated. This must happen between the sweep phase 279 // starting and the first allocation from c. 280 func (c *mcache) prepareForSweep() { 281 // Alternatively, instead of making sure we do this on every P 282 // between starting the world and allocating on that P, we 283 // could leave allocate-black on, allow allocation to continue 284 // as usual, use a ragged barrier at the beginning of sweep to 285 // ensure all cached spans are swept, and then disable 286 // allocate-black. However, with this approach it's difficult 287 // to avoid spilling mark bits into the *next* GC cycle. 288 sg := mheap_.sweepgen 289 if c.flushGen == sg { 290 return 291 } else if c.flushGen != sg-2 { 292 println("bad flushGen", c.flushGen, "in prepareForSweep; sweepgen", sg) 293 throw("bad flushGen") 294 } 295 c.releaseAll() 296 stackcache_clear(c) 297 atomic.Store(&c.flushGen, mheap_.sweepgen) // Synchronizes with gcStart 298 } 299