Skip to content

Commit 4c75672

Browse files
mknyszekgopherbot
authored andcommitted
runtime: set mspan limit field early and eagerly
Currently the mspan limit field is set after allocSpan returns, *after* the span has already been published to the GC (including the conservative scanner). But the limit field is load-bearing, because it's checked to filter out invalid pointers. A stale limit value could cause a crash by having the conservative scanner access allocBits out of bounds. Fix this by setting the mspan limit field before publishing the span. For large objects and arena chunks, we adjust the limit down after allocSpan because we don't have access to the true object's size from allocSpan. However this is safe, since we first initialize the limit to something definitely safe (the actual span bounds) and only adjust it down after. Adjusting it down has the benefit of more precise debug output, but the window in which it's imprecise is also fine because a single object (logically, with arena chunks) occupies the whole span, so the 'invalid' part of the memory will just safely point back to that object. We can't do this for smaller objects because the limit will include space that does *not* contain any valid objects. Fixes #74288. Change-Id: I0a22e5b9bccc1bfdf51d2b73ea7130f1b99c0c7c Reviewed-on: https://go-review.googlesource.com/c/go/+/682655 Reviewed-by: Keith Randall <khr@google.com> Auto-Submit: Michael Knyszek <mknyszek@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Keith Randall <khr@golang.org>
1 parent c6ac736 commit 4c75672

File tree

4 files changed

+21
-5
lines changed

4 files changed

+21
-5
lines changed

src/runtime/arena.go

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1052,10 +1052,18 @@ func (h *mheap) allocUserArenaChunk() *mspan {
10521052
h.initSpan(s, spanAllocHeap, spc, base, userArenaChunkPages)
10531053
s.isUserArenaChunk = true
10541054
s.elemsize -= userArenaChunkReserveBytes()
1055-
s.limit = s.base() + s.elemsize
10561055
s.freeindex = 1
10571056
s.allocCount = 1
10581057

1058+
// Adjust s.limit down to the object-containing part of the span.
1059+
//
1060+
// This is just to create a slightly tighter bound on the limit.
1061+
// It's totally OK if the garbage collector, in particular
1062+
// conservative scanning, can temporarily observes an inflated
1063+
// limit. It will simply mark the whole chunk or just skip it
1064+
// since we're in the mark phase anyway.
1065+
s.limit = s.base() + s.elemsize
1066+
10591067
// Adjust size to include redzone.
10601068
if asanenabled {
10611069
s.elemsize -= redZoneSize(s.elemsize)

src/runtime/mcache.go

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -253,6 +253,14 @@ func (c *mcache) allocLarge(size uintptr, noscan bool) *mspan {
253253
// Put the large span in the mcentral swept list so that it's
254254
// visible to the background sweeper.
255255
mheap_.central[spc].mcentral.fullSwept(mheap_.sweepgen).push(s)
256+
257+
// Adjust s.limit down to the object-containing part of the span.
258+
//
259+
// This is just to create a slightly tighter bound on the limit.
260+
// It's totally OK if the garbage collector, in particular
261+
// conservative scanning, can temporarily observes an inflated
262+
// limit. It will simply mark the whole object or just skip it
263+
// since we're in the mark phase anyway.
256264
s.limit = s.base() + size
257265
s.initHeapBits()
258266
return s

src/runtime/mcentral.go

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -250,13 +250,10 @@ func (c *mcentral) uncacheSpan(s *mspan) {
250250
// grow allocates a new empty span from the heap and initializes it for c's size class.
251251
func (c *mcentral) grow() *mspan {
252252
npages := uintptr(gc.SizeClassToNPages[c.spanclass.sizeclass()])
253-
size := uintptr(gc.SizeClassToSize[c.spanclass.sizeclass()])
254-
255253
s := mheap_.alloc(npages, c.spanclass)
256254
if s == nil {
257255
return nil
258256
}
259-
s.limit = s.base() + size*uintptr(s.nelems)
260257
s.initHeapBits()
261258
return s
262259
}

src/runtime/mheap.go

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1445,7 +1445,6 @@ func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base,
14451445
if typ.manual() {
14461446
s.manualFreeList = 0
14471447
s.nelems = 0
1448-
s.limit = s.base() + s.npages*pageSize
14491448
s.state.set(mSpanManual)
14501449
} else {
14511450
// We must set span properties before the span is published anywhere
@@ -1486,6 +1485,9 @@ func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base,
14861485
s.gcmarkBits = newMarkBits(uintptr(s.nelems))
14871486
s.allocBits = newAllocBits(uintptr(s.nelems))
14881487

1488+
// Adjust s.limit down to the object-containing part of the span.
1489+
s.limit = s.base() + uintptr(s.elemsize)*uintptr(s.nelems)
1490+
14891491
// It's safe to access h.sweepgen without the heap lock because it's
14901492
// only ever updated with the world stopped and we run on the
14911493
// systemstack which blocks a STW transition.
@@ -1785,6 +1787,7 @@ func (span *mspan) init(base uintptr, npages uintptr) {
17851787
span.list = nil
17861788
span.startAddr = base
17871789
span.npages = npages
1790+
span.limit = base + npages*gc.PageSize // see go.dev/issue/74288; adjusted later for heap spans
17881791
span.allocCount = 0
17891792
span.spanclass = 0
17901793
span.elemsize = 0

0 commit comments

Comments
 (0)