@@ -575,15 +575,41 @@ func runCleanups() {
575
575
for {
576
576
b := gcCleanups .dequeue ()
577
577
if raceenabled {
578
+ // Approximately: adds a happens-before edge between the cleanup
579
+ // argument being mutated and the call to the cleanup below.
578
580
racefingo ()
579
581
}
580
582
581
583
gcCleanups .beginRunningCleanups ()
582
584
for i := 0 ; i < int (b .n ); i ++ {
583
585
fn := b .cleanups [i ]
586
+
587
+ var racectx uintptr
588
+ if raceenabled {
589
+ // Enter a new race context so the race detector can catch
590
+ // potential races between cleanups, even if they execute on
591
+ // the same goroutine.
592
+ //
593
+ // Synchronize on fn. This would fail to find races on the
594
+ // closed-over values in fn (suppose fn is passed to multiple
595
+ // AddCleanup calls) if fn was not unique, but it is. Update
596
+ // the synchronization on fn if you intend to optimize it
597
+ // and store the cleanup function and cleanup argument on the
598
+ // queue directly.
599
+ racerelease (unsafe .Pointer (fn ))
600
+ racectx = raceEnterNewCtx ()
601
+ raceacquire (unsafe .Pointer (fn ))
602
+ }
603
+
604
+ // Execute the next cleanup.
584
605
cleanup := * (* func ())(unsafe .Pointer (& fn ))
585
606
cleanup ()
586
607
b .cleanups [i ] = nil
608
+
609
+ if raceenabled {
610
+ // Restore the old context.
611
+ raceRestoreCtx (racectx )
612
+ }
587
613
}
588
614
gcCleanups .endRunningCleanups ()
589
615
@@ -621,3 +647,53 @@ func unique_runtime_blockUntilEmptyCleanupQueue(timeout int64) bool {
621
647
func sync_test_runtime_blockUntilEmptyCleanupQueue (timeout int64 ) bool {
622
648
return gcCleanups .blockUntilEmpty (timeout )
623
649
}
650
+
651
+ // raceEnterNewCtx creates a new racectx and switches the current
652
+ // goroutine to it. Returns the old racectx.
653
+ //
654
+ // Must be running on a user goroutine. nosplit to match other race
655
+ // instrumentation.
656
+ //
657
+ //go:nosplit
658
+ func raceEnterNewCtx () uintptr {
659
+ // We use the existing ctx as the spawn context, but gp.gopc
660
+ // as the spawn PC to make the error output a little nicer
661
+ // (pointing to AddCleanup, where the goroutines are created).
662
+ //
663
+ // We also need to carefully indicate to the race detector
664
+ // that the goroutine stack will only be accessed by the new
665
+ // race context, to avoid false positives on stack locations.
666
+ // We do this by marking the stack as free in the first context
667
+ // and then re-marking it as allocated in the second. Crucially,
668
+ // there must be (1) no race operations and (2) no stack changes
669
+ // in between. (1) is easy to avoid because we're in the runtime
670
+ // so there's no implicit race instrumentation. To avoid (2) we
671
+ // defensively become non-preemptible so the GC can't stop us,
672
+ // and rely on the fact that racemalloc, racefreem, and racectx
673
+ // are nosplit.
674
+ mp := acquirem ()
675
+ gp := getg ()
676
+ ctx := getg ().racectx
677
+ racefree (unsafe .Pointer (gp .stack .lo ), gp .stack .hi - gp .stack .lo )
678
+ getg ().racectx = racectxstart (gp .gopc , ctx )
679
+ racemalloc (unsafe .Pointer (gp .stack .lo ), gp .stack .hi - gp .stack .lo )
680
+ releasem (mp )
681
+ return ctx
682
+ }
683
+
684
+ // raceRestoreCtx restores ctx on the goroutine. It is the inverse of
685
+ // raceenternewctx and must be called with its result.
686
+ //
687
+ // Must be running on a user goroutine. nosplit to match other race
688
+ // instrumentation.
689
+ //
690
+ //go:nosplit
691
+ func raceRestoreCtx (ctx uintptr ) {
692
+ mp := acquirem ()
693
+ gp := getg ()
694
+ racefree (unsafe .Pointer (gp .stack .lo ), gp .stack .hi - gp .stack .lo )
695
+ racectxend (getg ().racectx )
696
+ racemalloc (unsafe .Pointer (gp .stack .lo ), gp .stack .hi - gp .stack .lo )
697
+ getg ().racectx = ctx
698
+ releasem (mp )
699
+ }
0 commit comments