Source file src/runtime/mpagealloc_64bit.go
1 // Copyright 2019 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 //go:build amd64 || arm64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x 6 7 package runtime 8 9 import "unsafe" 10 11 const ( 12 // The number of levels in the radix tree. 13 summaryLevels = 5 14 15 // Constants for testing. 16 pageAlloc32Bit = 0 17 pageAlloc64Bit = 1 18 19 // Number of bits needed to represent all indices into the L1 of the 20 // chunks map. 21 // 22 // See (*pageAlloc).chunks for more details. Update the documentation 23 // there should this number change. 24 pallocChunksL1Bits = 13 25 ) 26 27 // levelBits is the number of bits in the radix for a given level in the super summary 28 // structure. 29 // 30 // The sum of all the entries of levelBits should equal heapAddrBits. 31 var levelBits = [summaryLevels]uint{ 32 summaryL0Bits, 33 summaryLevelBits, 34 summaryLevelBits, 35 summaryLevelBits, 36 summaryLevelBits, 37 } 38 39 // levelShift is the number of bits to shift to acquire the radix for a given level 40 // in the super summary structure. 41 // 42 // With levelShift, one can compute the index of the summary at level l related to a 43 // pointer p by doing: 44 // p >> levelShift[l] 45 var levelShift = [summaryLevels]uint{ 46 heapAddrBits - summaryL0Bits, 47 heapAddrBits - summaryL0Bits - 1*summaryLevelBits, 48 heapAddrBits - summaryL0Bits - 2*summaryLevelBits, 49 heapAddrBits - summaryL0Bits - 3*summaryLevelBits, 50 heapAddrBits - summaryL0Bits - 4*summaryLevelBits, 51 } 52 53 // levelLogPages is log2 the maximum number of runtime pages in the address space 54 // a summary in the given level represents. 55 // 56 // The leaf level always represents exactly log2 of 1 chunk's worth of pages. 57 var levelLogPages = [summaryLevels]uint{ 58 logPallocChunkPages + 4*summaryLevelBits, 59 logPallocChunkPages + 3*summaryLevelBits, 60 logPallocChunkPages + 2*summaryLevelBits, 61 logPallocChunkPages + 1*summaryLevelBits, 62 logPallocChunkPages, 63 } 64 65 // sysInit performs architecture-dependent initialization of fields 66 // in pageAlloc. pageAlloc should be uninitialized except for sysStat 67 // if any runtime statistic should be updated. 68 func (p *pageAlloc) sysInit() { 69 // Reserve memory for each level. This will get mapped in 70 // as R/W by setArenas. 71 for l, shift := range levelShift { 72 entries := 1 << (heapAddrBits - shift) 73 74 // Reserve b bytes of memory anywhere in the address space. 75 b := alignUp(uintptr(entries)*pallocSumBytes, physPageSize) 76 r := sysReserve(nil, b) 77 if r == nil { 78 throw("failed to reserve page summary memory") 79 } 80 81 // Put this reservation into a slice. 82 sl := notInHeapSlice{(*notInHeap)(r), 0, entries} 83 p.summary[l] = *(*[]pallocSum)(unsafe.Pointer(&sl)) 84 } 85 } 86 87 // sysGrow performs architecture-dependent operations on heap 88 // growth for the page allocator, such as mapping in new memory 89 // for summaries. It also updates the length of the slices in 90 // [.summary. 91 // 92 // base is the base of the newly-added heap memory and limit is 93 // the first address past the end of the newly-added heap memory. 94 // Both must be aligned to pallocChunkBytes. 95 // 96 // The caller must update p.start and p.end after calling sysGrow. 97 func (p *pageAlloc) sysGrow(base, limit uintptr) { 98 if base%pallocChunkBytes != 0 || limit%pallocChunkBytes != 0 { 99 print("runtime: base = ", hex(base), ", limit = ", hex(limit), "\n") 100 throw("sysGrow bounds not aligned to pallocChunkBytes") 101 } 102 103 // addrRangeToSummaryRange converts a range of addresses into a range 104 // of summary indices which must be mapped to support those addresses 105 // in the summary range. 106 addrRangeToSummaryRange := func(level int, r addrRange) (int, int) { 107 sumIdxBase, sumIdxLimit := addrsToSummaryRange(level, r.base.addr(), r.limit.addr()) 108 return blockAlignSummaryRange(level, sumIdxBase, sumIdxLimit) 109 } 110 111 // summaryRangeToSumAddrRange converts a range of indices in any 112 // level of p.summary into page-aligned addresses which cover that 113 // range of indices. 114 summaryRangeToSumAddrRange := func(level, sumIdxBase, sumIdxLimit int) addrRange { 115 baseOffset := alignDown(uintptr(sumIdxBase)*pallocSumBytes, physPageSize) 116 limitOffset := alignUp(uintptr(sumIdxLimit)*pallocSumBytes, physPageSize) 117 base := unsafe.Pointer(&p.summary[level][0]) 118 return addrRange{ 119 offAddr{uintptr(add(base, baseOffset))}, 120 offAddr{uintptr(add(base, limitOffset))}, 121 } 122 } 123 124 // addrRangeToSumAddrRange is a convienience function that converts 125 // an address range r to the address range of the given summary level 126 // that stores the summaries for r. 127 addrRangeToSumAddrRange := func(level int, r addrRange) addrRange { 128 sumIdxBase, sumIdxLimit := addrRangeToSummaryRange(level, r) 129 return summaryRangeToSumAddrRange(level, sumIdxBase, sumIdxLimit) 130 } 131 132 // Find the first inUse index which is strictly greater than base. 133 // 134 // Because this function will never be asked remap the same memory 135 // twice, this index is effectively the index at which we would insert 136 // this new growth, and base will never overlap/be contained within 137 // any existing range. 138 // 139 // This will be used to look at what memory in the summary array is already 140 // mapped before and after this new range. 141 inUseIndex := p.inUse.findSucc(base) 142 143 // Walk up the radix tree and map summaries in as needed. 144 for l := range p.summary { 145 // Figure out what part of the summary array this new address space needs. 146 needIdxBase, needIdxLimit := addrRangeToSummaryRange(l, makeAddrRange(base, limit)) 147 148 // Update the summary slices with a new upper-bound. This ensures 149 // we get tight bounds checks on at least the top bound. 150 // 151 // We must do this regardless of whether we map new memory. 152 if needIdxLimit > len(p.summary[l]) { 153 p.summary[l] = p.summary[l][:needIdxLimit] 154 } 155 156 // Compute the needed address range in the summary array for level l. 157 need := summaryRangeToSumAddrRange(l, needIdxBase, needIdxLimit) 158 159 // Prune need down to what needs to be newly mapped. Some parts of it may 160 // already be mapped by what inUse describes due to page alignment requirements 161 // for mapping. prune's invariants are guaranteed by the fact that this 162 // function will never be asked to remap the same memory twice. 163 if inUseIndex > 0 { 164 need = need.subtract(addrRangeToSumAddrRange(l, p.inUse.ranges[inUseIndex-1])) 165 } 166 if inUseIndex < len(p.inUse.ranges) { 167 need = need.subtract(addrRangeToSumAddrRange(l, p.inUse.ranges[inUseIndex])) 168 } 169 // It's possible that after our pruning above, there's nothing new to map. 170 if need.size() == 0 { 171 continue 172 } 173 174 // Map and commit need. 175 sysMap(unsafe.Pointer(need.base.addr()), need.size(), p.sysStat) 176 sysUsed(unsafe.Pointer(need.base.addr()), need.size()) 177 } 178 } 179