Source file src/runtime/mcentral.go
1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Central free lists. 6 // 7 // See malloc.go for an overview. 8 // 9 // The mcentral doesn't actually contain the list of free objects; the mspan does. 10 // Each mcentral is two lists of mspans: those with free objects (c->nonempty) 11 // and those that are completely allocated (c->empty). 12 13 package runtime 14 15 import "runtime/internal/atomic" 16 17 // Central list of free objects of a given size. 18 // 19 //go:notinheap 20 type mcentral struct { 21 spanclass spanClass 22 23 // partial and full contain two mspan sets: one of swept in-use 24 // spans, and one of unswept in-use spans. These two trade 25 // roles on each GC cycle. The unswept set is drained either by 26 // allocation or by the background sweeper in every GC cycle, 27 // so only two roles are necessary. 28 // 29 // sweepgen is increased by 2 on each GC cycle, so the swept 30 // spans are in partial[sweepgen/2%2] and the unswept spans are in 31 // partial[1-sweepgen/2%2]. Sweeping pops spans from the 32 // unswept set and pushes spans that are still in-use on the 33 // swept set. Likewise, allocating an in-use span pushes it 34 // on the swept set. 35 // 36 // Some parts of the sweeper can sweep arbitrary spans, and hence 37 // can't remove them from the unswept set, but will add the span 38 // to the appropriate swept list. As a result, the parts of the 39 // sweeper and mcentral that do consume from the unswept list may 40 // encounter swept spans, and these should be ignored. 41 partial [2]spanSet // list of spans with a free object 42 full [2]spanSet // list of spans with no free objects 43 } 44 45 // Initialize a single central free list. 46 func (c *mcentral) init(spc spanClass) { 47 c.spanclass = spc 48 lockInit(&c.partial[0].spineLock, lockRankSpanSetSpine) 49 lockInit(&c.partial[1].spineLock, lockRankSpanSetSpine) 50 lockInit(&c.full[0].spineLock, lockRankSpanSetSpine) 51 lockInit(&c.full[1].spineLock, lockRankSpanSetSpine) 52 } 53 54 // partialUnswept returns the spanSet which holds partially-filled 55 // unswept spans for this sweepgen. 56 func (c *mcentral) partialUnswept(sweepgen uint32) *spanSet { 57 return &c.partial[1-sweepgen/2%2] 58 } 59 60 // partialSwept returns the spanSet which holds partially-filled 61 // swept spans for this sweepgen. 62 func (c *mcentral) partialSwept(sweepgen uint32) *spanSet { 63 return &c.partial[sweepgen/2%2] 64 } 65 66 // fullUnswept returns the spanSet which holds unswept spans without any 67 // free slots for this sweepgen. 68 func (c *mcentral) fullUnswept(sweepgen uint32) *spanSet { 69 return &c.full[1-sweepgen/2%2] 70 } 71 72 // fullSwept returns the spanSet which holds swept spans without any 73 // free slots for this sweepgen. 74 func (c *mcentral) fullSwept(sweepgen uint32) *spanSet { 75 return &c.full[sweepgen/2%2] 76 } 77 78 // Allocate a span to use in an mcache. 79 func (c *mcentral) cacheSpan() *mspan { 80 // Deduct credit for this span allocation and sweep if necessary. 81 spanBytes := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) * _PageSize 82 deductSweepCredit(spanBytes, 0) 83 84 traceDone := false 85 if trace.enabled { 86 traceGCSweepStart() 87 } 88 89 // If we sweep spanBudget spans without finding any free 90 // space, just allocate a fresh span. This limits the amount 91 // of time we can spend trying to find free space and 92 // amortizes the cost of small object sweeping over the 93 // benefit of having a full free span to allocate from. By 94 // setting this to 100, we limit the space overhead to 1%. 95 // 96 // TODO(austin,mknyszek): This still has bad worst-case 97 // throughput. For example, this could find just one free slot 98 // on the 100th swept span. That limits allocation latency, but 99 // still has very poor throughput. We could instead keep a 100 // running free-to-used budget and switch to fresh span 101 // allocation if the budget runs low. 102 spanBudget := 100 103 104 var s *mspan 105 var sl sweepLocker 106 107 // Try partial swept spans first. 108 sg := mheap_.sweepgen 109 if s = c.partialSwept(sg).pop(); s != nil { 110 goto havespan 111 } 112 113 sl = sweep.active.begin() 114 if sl.valid { 115 // Now try partial unswept spans. 116 for ; spanBudget >= 0; spanBudget-- { 117 s = c.partialUnswept(sg).pop() 118 if s == nil { 119 break 120 } 121 if s, ok := sl.tryAcquire(s); ok { 122 // We got ownership of the span, so let's sweep it and use it. 123 s.sweep(true) 124 sweep.active.end(sl) 125 goto havespan 126 } 127 // We failed to get ownership of the span, which means it's being or 128 // has been swept by an asynchronous sweeper that just couldn't remove it 129 // from the unswept list. That sweeper took ownership of the span and 130 // responsibility for either freeing it to the heap or putting it on the 131 // right swept list. Either way, we should just ignore it (and it's unsafe 132 // for us to do anything else). 133 } 134 // Now try full unswept spans, sweeping them and putting them into the 135 // right list if we fail to get a span. 136 for ; spanBudget >= 0; spanBudget-- { 137 s = c.fullUnswept(sg).pop() 138 if s == nil { 139 break 140 } 141 if s, ok := sl.tryAcquire(s); ok { 142 // We got ownership of the span, so let's sweep it. 143 s.sweep(true) 144 // Check if there's any free space. 145 freeIndex := s.nextFreeIndex() 146 if freeIndex != s.nelems { 147 s.freeindex = freeIndex 148 sweep.active.end(sl) 149 goto havespan 150 } 151 // Add it to the swept list, because sweeping didn't give us any free space. 152 c.fullSwept(sg).push(s.mspan) 153 } 154 // See comment for partial unswept spans. 155 } 156 sweep.active.end(sl) 157 } 158 if trace.enabled { 159 traceGCSweepDone() 160 traceDone = true 161 } 162 163 // We failed to get a span from the mcentral so get one from mheap. 164 s = c.grow() 165 if s == nil { 166 return nil 167 } 168 169 // At this point s is a span that should have free slots. 170 havespan: 171 if trace.enabled && !traceDone { 172 traceGCSweepDone() 173 } 174 n := int(s.nelems) - int(s.allocCount) 175 if n == 0 || s.freeindex == s.nelems || uintptr(s.allocCount) == s.nelems { 176 throw("span has no free objects") 177 } 178 freeByteBase := s.freeindex &^ (64 - 1) 179 whichByte := freeByteBase / 8 180 // Init alloc bits cache. 181 s.refillAllocCache(whichByte) 182 183 // Adjust the allocCache so that s.freeindex corresponds to the low bit in 184 // s.allocCache. 185 s.allocCache >>= s.freeindex % 64 186 187 return s 188 } 189 190 // Return span from an mcache. 191 // 192 // s must have a span class corresponding to this 193 // mcentral and it must not be empty. 194 func (c *mcentral) uncacheSpan(s *mspan) { 195 if s.allocCount == 0 { 196 throw("uncaching span but s.allocCount == 0") 197 } 198 199 sg := mheap_.sweepgen 200 stale := s.sweepgen == sg+1 201 202 // Fix up sweepgen. 203 if stale { 204 // Span was cached before sweep began. It's our 205 // responsibility to sweep it. 206 // 207 // Set sweepgen to indicate it's not cached but needs 208 // sweeping and can't be allocated from. sweep will 209 // set s.sweepgen to indicate s is swept. 210 atomic.Store(&s.sweepgen, sg-1) 211 } else { 212 // Indicate that s is no longer cached. 213 atomic.Store(&s.sweepgen, sg) 214 } 215 216 // Put the span in the appropriate place. 217 if stale { 218 // It's stale, so just sweep it. Sweeping will put it on 219 // the right list. 220 // 221 // We don't use a sweepLocker here. Stale cached spans 222 // aren't in the global sweep lists, so mark termination 223 // itself holds up sweep completion until all mcaches 224 // have been swept. 225 ss := sweepLocked{s} 226 ss.sweep(false) 227 } else { 228 if int(s.nelems)-int(s.allocCount) > 0 { 229 // Put it back on the partial swept list. 230 c.partialSwept(sg).push(s) 231 } else { 232 // There's no free space and it's not stale, so put it on the 233 // full swept list. 234 c.fullSwept(sg).push(s) 235 } 236 } 237 } 238 239 // grow allocates a new empty span from the heap and initializes it for c's size class. 240 func (c *mcentral) grow() *mspan { 241 npages := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) 242 size := uintptr(class_to_size[c.spanclass.sizeclass()]) 243 244 s := mheap_.alloc(npages, c.spanclass) 245 if s == nil { 246 return nil 247 } 248 249 // Use division by multiplication and shifts to quickly compute: 250 // n := (npages << _PageShift) / size 251 n := s.divideByElemSize(npages << _PageShift) 252 s.limit = s.base() + size*n 253 heapBitsForAddr(s.base()).initSpan(s) 254 return s 255 } 256