Black Lives Matter. Support the Equal Justice Initiative.

Source file src/runtime/mcache.go

Documentation: runtime

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"runtime/internal/atomic"
     9  	"unsafe"
    10  )
    11  
    12  // Per-thread (in Go, per-P) cache for small objects.
    13  // This includes a small object cache and local allocation stats.
    14  // No locking needed because it is per-thread (per-P).
    15  //
    16  // mcaches are allocated from non-GC'd memory, so any heap pointers
    17  // must be specially handled.
    18  //
    19  //go:notinheap
    20  type mcache struct {
    21  	// The following members are accessed on every malloc,
    22  	// so they are grouped here for better caching.
    23  	nextSample uintptr // trigger heap sample after allocating this many bytes
    24  	scanAlloc  uintptr // bytes of scannable heap allocated
    25  
    26  	// Allocator cache for tiny objects w/o pointers.
    27  	// See "Tiny allocator" comment in malloc.go.
    28  
    29  	// tiny points to the beginning of the current tiny block, or
    30  	// nil if there is no current tiny block.
    31  	//
    32  	// tiny is a heap pointer. Since mcache is in non-GC'd memory,
    33  	// we handle it by clearing it in releaseAll during mark
    34  	// termination.
    35  	//
    36  	// tinyAllocs is the number of tiny allocations performed
    37  	// by the P that owns this mcache.
    38  	tiny       uintptr
    39  	tinyoffset uintptr
    40  	tinyAllocs uintptr
    41  
    42  	// The rest is not accessed on every malloc.
    43  
    44  	alloc [numSpanClasses]*mspan // spans to allocate from, indexed by spanClass
    45  
    46  	stackcache [_NumStackOrders]stackfreelist
    47  
    48  	// flushGen indicates the sweepgen during which this mcache
    49  	// was last flushed. If flushGen != mheap_.sweepgen, the spans
    50  	// in this mcache are stale and need to the flushed so they
    51  	// can be swept. This is done in acquirep.
    52  	flushGen uint32
    53  }
    54  
    55  // A gclink is a node in a linked list of blocks, like mlink,
    56  // but it is opaque to the garbage collector.
    57  // The GC does not trace the pointers during collection,
    58  // and the compiler does not emit write barriers for assignments
    59  // of gclinkptr values. Code should store references to gclinks
    60  // as gclinkptr, not as *gclink.
    61  type gclink struct {
    62  	next gclinkptr
    63  }
    64  
    65  // A gclinkptr is a pointer to a gclink, but it is opaque
    66  // to the garbage collector.
    67  type gclinkptr uintptr
    68  
    69  // ptr returns the *gclink form of p.
    70  // The result should be used for accessing fields, not stored
    71  // in other data structures.
    72  func (p gclinkptr) ptr() *gclink {
    73  	return (*gclink)(unsafe.Pointer(p))
    74  }
    75  
    76  type stackfreelist struct {
    77  	list gclinkptr // linked list of free stacks
    78  	size uintptr   // total size of stacks in list
    79  }
    80  
    81  // dummy mspan that contains no free objects.
    82  var emptymspan mspan
    83  
    84  func allocmcache() *mcache {
    85  	var c *mcache
    86  	systemstack(func() {
    87  		lock(&mheap_.lock)
    88  		c = (*mcache)(mheap_.cachealloc.alloc())
    89  		c.flushGen = mheap_.sweepgen
    90  		unlock(&mheap_.lock)
    91  	})
    92  	for i := range c.alloc {
    93  		c.alloc[i] = &emptymspan
    94  	}
    95  	c.nextSample = nextSample()
    96  	return c
    97  }
    98  
    99  // freemcache releases resources associated with this
   100  // mcache and puts the object onto a free list.
   101  //
   102  // In some cases there is no way to simply release
   103  // resources, such as statistics, so donate them to
   104  // a different mcache (the recipient).
   105  func freemcache(c *mcache) {
   106  	systemstack(func() {
   107  		c.releaseAll()
   108  		stackcache_clear(c)
   109  
   110  		// NOTE(rsc,rlh): If gcworkbuffree comes back, we need to coordinate
   111  		// with the stealing of gcworkbufs during garbage collection to avoid
   112  		// a race where the workbuf is double-freed.
   113  		// gcworkbuffree(c.gcworkbuf)
   114  
   115  		lock(&mheap_.lock)
   116  		mheap_.cachealloc.free(unsafe.Pointer(c))
   117  		unlock(&mheap_.lock)
   118  	})
   119  }
   120  
   121  // getMCache is a convenience function which tries to obtain an mcache.
   122  //
   123  // Returns nil if we're not bootstrapping or we don't have a P. The caller's
   124  // P must not change, so we must be in a non-preemptible state.
   125  func getMCache() *mcache {
   126  	// Grab the mcache, since that's where stats live.
   127  	pp := getg().m.p.ptr()
   128  	var c *mcache
   129  	if pp == nil {
   130  		// We will be called without a P while bootstrapping,
   131  		// in which case we use mcache0, which is set in mallocinit.
   132  		// mcache0 is cleared when bootstrapping is complete,
   133  		// by procresize.
   134  		c = mcache0
   135  	} else {
   136  		c = pp.mcache
   137  	}
   138  	return c
   139  }
   140  
   141  // refill acquires a new span of span class spc for c. This span will
   142  // have at least one free object. The current span in c must be full.
   143  //
   144  // Must run in a non-preemptible context since otherwise the owner of
   145  // c could change.
   146  func (c *mcache) refill(spc spanClass) {
   147  	// Return the current cached span to the central lists.
   148  	s := c.alloc[spc]
   149  
   150  	if uintptr(s.allocCount) != s.nelems {
   151  		throw("refill of span with free space remaining")
   152  	}
   153  	if s != &emptymspan {
   154  		// Mark this span as no longer cached.
   155  		if s.sweepgen != mheap_.sweepgen+3 {
   156  			throw("bad sweepgen in refill")
   157  		}
   158  		mheap_.central[spc].mcentral.uncacheSpan(s)
   159  	}
   160  
   161  	// Get a new cached span from the central lists.
   162  	s = mheap_.central[spc].mcentral.cacheSpan()
   163  	if s == nil {
   164  		throw("out of memory")
   165  	}
   166  
   167  	if uintptr(s.allocCount) == s.nelems {
   168  		throw("span has no free space")
   169  	}
   170  
   171  	// Indicate that this span is cached and prevent asynchronous
   172  	// sweeping in the next sweep phase.
   173  	s.sweepgen = mheap_.sweepgen + 3
   174  
   175  	// Assume all objects from this span will be allocated in the
   176  	// mcache. If it gets uncached, we'll adjust this.
   177  	stats := memstats.heapStats.acquire()
   178  	atomic.Xadduintptr(&stats.smallAllocCount[spc.sizeclass()], uintptr(s.nelems)-uintptr(s.allocCount))
   179  
   180  	// Flush tinyAllocs.
   181  	if spc == tinySpanClass {
   182  		atomic.Xadduintptr(&stats.tinyAllocCount, c.tinyAllocs)
   183  		c.tinyAllocs = 0
   184  	}
   185  	memstats.heapStats.release()
   186  
   187  	// Update gcController.heapLive with the same assumption.
   188  	usedBytes := uintptr(s.allocCount) * s.elemsize
   189  	atomic.Xadd64(&gcController.heapLive, int64(s.npages*pageSize)-int64(usedBytes))
   190  
   191  	// While we're here, flush scanAlloc, since we have to call
   192  	// revise anyway.
   193  	atomic.Xadd64(&gcController.heapScan, int64(c.scanAlloc))
   194  	c.scanAlloc = 0
   195  
   196  	if trace.enabled {
   197  		// gcController.heapLive changed.
   198  		traceHeapAlloc()
   199  	}
   200  	if gcBlackenEnabled != 0 {
   201  		// gcController.heapLive and heapScan changed.
   202  		gcController.revise()
   203  	}
   204  
   205  	c.alloc[spc] = s
   206  }
   207  
   208  // allocLarge allocates a span for a large object.
   209  // The boolean result indicates whether the span is known-zeroed.
   210  // If it did not need to be zeroed, it may not have been zeroed;
   211  // but if it came directly from the OS, it is already zeroed.
   212  func (c *mcache) allocLarge(size uintptr, needzero bool, noscan bool) (*mspan, bool) {
   213  	if size+_PageSize < size {
   214  		throw("out of memory")
   215  	}
   216  	npages := size >> _PageShift
   217  	if size&_PageMask != 0 {
   218  		npages++
   219  	}
   220  
   221  	// Deduct credit for this span allocation and sweep if
   222  	// necessary. mHeap_Alloc will also sweep npages, so this only
   223  	// pays the debt down to npage pages.
   224  	deductSweepCredit(npages*_PageSize, npages)
   225  
   226  	spc := makeSpanClass(0, noscan)
   227  	s, isZeroed := mheap_.alloc(npages, spc, needzero)
   228  	if s == nil {
   229  		throw("out of memory")
   230  	}
   231  	stats := memstats.heapStats.acquire()
   232  	atomic.Xadduintptr(&stats.largeAlloc, npages*pageSize)
   233  	atomic.Xadduintptr(&stats.largeAllocCount, 1)
   234  	memstats.heapStats.release()
   235  
   236  	// Update gcController.heapLive and revise pacing if needed.
   237  	atomic.Xadd64(&gcController.heapLive, int64(npages*pageSize))
   238  	if trace.enabled {
   239  		// Trace that a heap alloc occurred because gcController.heapLive changed.
   240  		traceHeapAlloc()
   241  	}
   242  	if gcBlackenEnabled != 0 {
   243  		gcController.revise()
   244  	}
   245  
   246  	// Put the large span in the mcentral swept list so that it's
   247  	// visible to the background sweeper.
   248  	mheap_.central[spc].mcentral.fullSwept(mheap_.sweepgen).push(s)
   249  	s.limit = s.base() + size
   250  	heapBitsForAddr(s.base()).initSpan(s)
   251  	return s, isZeroed
   252  }
   253  
   254  func (c *mcache) releaseAll() {
   255  	// Take this opportunity to flush scanAlloc.
   256  	atomic.Xadd64(&gcController.heapScan, int64(c.scanAlloc))
   257  	c.scanAlloc = 0
   258  
   259  	sg := mheap_.sweepgen
   260  	for i := range c.alloc {
   261  		s := c.alloc[i]
   262  		if s != &emptymspan {
   263  			// Adjust nsmallalloc in case the span wasn't fully allocated.
   264  			n := uintptr(s.nelems) - uintptr(s.allocCount)
   265  			stats := memstats.heapStats.acquire()
   266  			atomic.Xadduintptr(&stats.smallAllocCount[spanClass(i).sizeclass()], -n)
   267  			memstats.heapStats.release()
   268  			if s.sweepgen != sg+1 {
   269  				// refill conservatively counted unallocated slots in gcController.heapLive.
   270  				// Undo this.
   271  				//
   272  				// If this span was cached before sweep, then
   273  				// gcController.heapLive was totally recomputed since
   274  				// caching this span, so we don't do this for
   275  				// stale spans.
   276  				atomic.Xadd64(&gcController.heapLive, -int64(n)*int64(s.elemsize))
   277  			}
   278  			// Release the span to the mcentral.
   279  			mheap_.central[i].mcentral.uncacheSpan(s)
   280  			c.alloc[i] = &emptymspan
   281  		}
   282  	}
   283  	// Clear tinyalloc pool.
   284  	c.tiny = 0
   285  	c.tinyoffset = 0
   286  
   287  	// Flush tinyAllocs.
   288  	stats := memstats.heapStats.acquire()
   289  	atomic.Xadduintptr(&stats.tinyAllocCount, c.tinyAllocs)
   290  	c.tinyAllocs = 0
   291  	memstats.heapStats.release()
   292  
   293  	// Updated heapScan and possible gcController.heapLive.
   294  	if gcBlackenEnabled != 0 {
   295  		gcController.revise()
   296  	}
   297  }
   298  
   299  // prepareForSweep flushes c if the system has entered a new sweep phase
   300  // since c was populated. This must happen between the sweep phase
   301  // starting and the first allocation from c.
   302  func (c *mcache) prepareForSweep() {
   303  	// Alternatively, instead of making sure we do this on every P
   304  	// between starting the world and allocating on that P, we
   305  	// could leave allocate-black on, allow allocation to continue
   306  	// as usual, use a ragged barrier at the beginning of sweep to
   307  	// ensure all cached spans are swept, and then disable
   308  	// allocate-black. However, with this approach it's difficult
   309  	// to avoid spilling mark bits into the *next* GC cycle.
   310  	sg := mheap_.sweepgen
   311  	if c.flushGen == sg {
   312  		return
   313  	} else if c.flushGen != sg-2 {
   314  		println("bad flushGen", c.flushGen, "in prepareForSweep; sweepgen", sg)
   315  		throw("bad flushGen")
   316  	}
   317  	c.releaseAll()
   318  	stackcache_clear(c)
   319  	atomic.Store(&c.flushGen, mheap_.sweepgen) // Synchronizes with gcStart
   320  }
   321  

View as plain text