Skip to content

Commit 3ea94ae

Browse files
mknyszekgopherbot
authored andcommitted
runtime: help the race detector detect possible concurrent cleanups
This change makes it so that cleanup goroutines, in race mode, create a fake race context and switch to it, emulating cleanups running on new goroutines. This helps in catching races between cleanups that might run concurrently. Change-Id: I4c4e33054313798d4ac4e5d91ff2487ea3eb4b16 Reviewed-on: https://go-review.googlesource.com/c/go/+/652635 LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Auto-Submit: Michael Knyszek <mknyszek@google.com> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Michael Pratt <mpratt@google.com>
1 parent b30fa1b commit 3ea94ae

File tree

4 files changed

+125
-0
lines changed

4 files changed

+125
-0
lines changed

src/runtime/mcleanup.go

Lines changed: 76 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -575,15 +575,41 @@ func runCleanups() {
575575
for {
576576
b := gcCleanups.dequeue()
577577
if raceenabled {
578+
// Approximately: adds a happens-before edge between the cleanup
579+
// argument being mutated and the call to the cleanup below.
578580
racefingo()
579581
}
580582

581583
gcCleanups.beginRunningCleanups()
582584
for i := 0; i < int(b.n); i++ {
583585
fn := b.cleanups[i]
586+
587+
var racectx uintptr
588+
if raceenabled {
589+
// Enter a new race context so the race detector can catch
590+
// potential races between cleanups, even if they execute on
591+
// the same goroutine.
592+
//
593+
// Synchronize on fn. This would fail to find races on the
594+
// closed-over values in fn (suppose fn is passed to multiple
595+
// AddCleanup calls) if fn was not unique, but it is. Update
596+
// the synchronization on fn if you intend to optimize it
597+
// and store the cleanup function and cleanup argument on the
598+
// queue directly.
599+
racerelease(unsafe.Pointer(fn))
600+
racectx = raceEnterNewCtx()
601+
raceacquire(unsafe.Pointer(fn))
602+
}
603+
604+
// Execute the next cleanup.
584605
cleanup := *(*func())(unsafe.Pointer(&fn))
585606
cleanup()
586607
b.cleanups[i] = nil
608+
609+
if raceenabled {
610+
// Restore the old context.
611+
raceRestoreCtx(racectx)
612+
}
587613
}
588614
gcCleanups.endRunningCleanups()
589615

@@ -621,3 +647,53 @@ func unique_runtime_blockUntilEmptyCleanupQueue(timeout int64) bool {
621647
func sync_test_runtime_blockUntilEmptyCleanupQueue(timeout int64) bool {
622648
return gcCleanups.blockUntilEmpty(timeout)
623649
}
650+
651+
// raceEnterNewCtx creates a new racectx and switches the current
652+
// goroutine to it. Returns the old racectx.
653+
//
654+
// Must be running on a user goroutine. nosplit to match other race
655+
// instrumentation.
656+
//
657+
//go:nosplit
658+
func raceEnterNewCtx() uintptr {
659+
// We use the existing ctx as the spawn context, but gp.gopc
660+
// as the spawn PC to make the error output a little nicer
661+
// (pointing to AddCleanup, where the goroutines are created).
662+
//
663+
// We also need to carefully indicate to the race detector
664+
// that the goroutine stack will only be accessed by the new
665+
// race context, to avoid false positives on stack locations.
666+
// We do this by marking the stack as free in the first context
667+
// and then re-marking it as allocated in the second. Crucially,
668+
// there must be (1) no race operations and (2) no stack changes
669+
// in between. (1) is easy to avoid because we're in the runtime
670+
// so there's no implicit race instrumentation. To avoid (2) we
671+
// defensively become non-preemptible so the GC can't stop us,
672+
// and rely on the fact that racemalloc, racefreem, and racectx
673+
// are nosplit.
674+
mp := acquirem()
675+
gp := getg()
676+
ctx := getg().racectx
677+
racefree(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
678+
getg().racectx = racectxstart(gp.gopc, ctx)
679+
racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
680+
releasem(mp)
681+
return ctx
682+
}
683+
684+
// raceRestoreCtx restores ctx on the goroutine. It is the inverse of
685+
// raceenternewctx and must be called with its result.
686+
//
687+
// Must be running on a user goroutine. nosplit to match other race
688+
// instrumentation.
689+
//
690+
//go:nosplit
691+
func raceRestoreCtx(ctx uintptr) {
692+
mp := acquirem()
693+
gp := getg()
694+
racefree(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
695+
racectxend(getg().racectx)
696+
racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
697+
getg().racectx = ctx
698+
releasem(mp)
699+
}

src/runtime/race.go

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -566,6 +566,13 @@ func racegoend() {
566566
racecall(&__tsan_go_end, getg().racectx, 0, 0, 0)
567567
}
568568

569+
//go:nosplit
570+
func racectxstart(pc, spawnctx uintptr) uintptr {
571+
var racectx uintptr
572+
racecall(&__tsan_go_start, spawnctx, uintptr(unsafe.Pointer(&racectx)), pc, 0)
573+
return racectx
574+
}
575+
569576
//go:nosplit
570577
func racectxend(racectx uintptr) {
571578
racecall(&__tsan_go_end, racectx, 0, 0, 0)

src/runtime/race/testdata/finalizer_test.go

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ import (
99
"sync"
1010
"testing"
1111
"time"
12+
"unsafe"
1213
)
1314

1415
func TestNoRaceFin(t *testing.T) {
@@ -66,3 +67,43 @@ func TestRaceFin(t *testing.T) {
6667
time.Sleep(100 * time.Millisecond)
6768
y = 66
6869
}
70+
71+
func TestNoRaceCleanup(t *testing.T) {
72+
c := make(chan bool)
73+
go func() {
74+
x := new(string)
75+
y := new(string)
76+
runtime.AddCleanup(x, func(y *string) {
77+
*y = "foo"
78+
}, y)
79+
*y = "bar"
80+
runtime.KeepAlive(x)
81+
c <- true
82+
}()
83+
<-c
84+
runtime.GC()
85+
time.Sleep(100 * time.Millisecond)
86+
}
87+
88+
func TestRaceBetweenCleanups(t *testing.T) {
89+
// Allocate struct with pointer to avoid hitting tinyalloc.
90+
// Otherwise we can't be sure when the allocation will
91+
// be freed.
92+
type T struct {
93+
v int
94+
p unsafe.Pointer
95+
}
96+
sharedVar := new(int)
97+
v0 := new(T)
98+
v1 := new(T)
99+
cleanup := func(x int) {
100+
*sharedVar = x
101+
}
102+
runtime.AddCleanup(v0, cleanup, 0)
103+
runtime.AddCleanup(v1, cleanup, 0)
104+
v0 = nil
105+
v1 = nil
106+
107+
runtime.GC()
108+
time.Sleep(100 * time.Millisecond)
109+
}

src/runtime/race0.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -41,4 +41,5 @@ func racemalloc(p unsafe.Pointer, sz uintptr) { th
4141
func racefree(p unsafe.Pointer, sz uintptr) { throw("race") }
4242
func racegostart(pc uintptr) uintptr { throw("race"); return 0 }
4343
func racegoend() { throw("race") }
44+
func racectxstart(spawnctx, racectx uintptr) uintptr { throw("race"); return 0 }
4445
func racectxend(racectx uintptr) { throw("race") }

0 commit comments

Comments
 (0)