Skip to content

Commit 550dfc8

Browse files
committed
runtime: eliminate work.markrootdone and second root marking pass
Before STW and concurrent GC were unified, there could be either one or two root marking passes per GC cycle. There were several tasks we had to make sure happened once and only once (whether that was at the beginning of concurrent mark for concurrent GC or during mark termination for STW GC). We kept track of this in work.markrootdone. Now that STW and concurrent GC both use the concurrent marking code and we've eliminated all work done by the second root marking pass, we only ever need a single root marking pass. Hence, we can eliminate work.markrootdone and all of the code that's conditional on it. Updates #26903. Change-Id: I654a0f5e21b9322279525560a31e64b8d33b790f Reviewed-on: https://go-review.googlesource.com/c/134784 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Rick Hudson <rlh@golang.org>
1 parent 873bd47 commit 550dfc8

File tree

2 files changed

+41
-87
lines changed

2 files changed

+41
-87
lines changed

src/runtime/mgc.go

+6-20
Original file line numberDiff line numberDiff line change
@@ -948,14 +948,6 @@ var work struct {
948948
nFlushCacheRoots int
949949
nDataRoots, nBSSRoots, nSpanRoots, nStackRoots int
950950

951-
// markrootDone indicates that roots have been marked at least
952-
// once during the current GC cycle. This is checked by root
953-
// marking operations that have to happen only during the
954-
// first root marking pass, whether that's during the
955-
// concurrent mark phase in current GC or mark termination in
956-
// STW GC.
957-
markrootDone bool
958-
959951
// Each type of GC state transition is protected by a lock.
960952
// Since multiple threads can simultaneously detect the state
961953
// transition condition, any thread that detects a transition
@@ -1456,9 +1448,6 @@ top:
14561448
// below. The important thing is that the wb remains active until
14571449
// all marking is complete. This includes writes made by the GC.
14581450

1459-
// Record that one root marking pass has completed.
1460-
work.markrootDone = true
1461-
14621451
// Disable assists and background workers. We must do
14631452
// this before waking blocked assists.
14641453
atomic.Store(&gcBlackenEnabled, 0)
@@ -1909,19 +1898,20 @@ func gcMark(start_time int64) {
19091898
}
19101899
work.tstart = start_time
19111900

1912-
// Queue root marking jobs.
1913-
gcMarkRootPrepare()
1914-
19151901
work.nwait = 0
19161902
work.ndone = 0
19171903
work.nproc = uint32(gcprocs())
19181904

19191905
// Check that there's no marking work remaining.
1920-
if work.full != 0 || work.nDataRoots+work.nBSSRoots+work.nSpanRoots+work.nStackRoots != 0 {
1921-
print("runtime: full=", hex(work.full), " nDataRoots=", work.nDataRoots, " nBSSRoots=", work.nBSSRoots, " nSpanRoots=", work.nSpanRoots, " nStackRoots=", work.nStackRoots, "\n")
1906+
if work.full != 0 || work.markrootNext < work.markrootJobs {
1907+
print("runtime: full=", hex(work.full), " next=", work.markrootNext, " jobs=", work.markrootJobs, " nDataRoots=", work.nDataRoots, " nBSSRoots=", work.nBSSRoots, " nSpanRoots=", work.nSpanRoots, " nStackRoots=", work.nStackRoots, "\n")
19221908
panic("non-empty mark queue after concurrent mark")
19231909
}
19241910

1911+
// Clear root marking queue.
1912+
work.markrootNext = 0
1913+
work.markrootJobs = 0
1914+
19251915
if work.nproc > 1 {
19261916
noteclear(&work.alldone)
19271917
helpgc(int32(work.nproc))
@@ -1945,9 +1935,6 @@ func gcMark(start_time int64) {
19451935
notesleep(&work.alldone)
19461936
}
19471937

1948-
// Record that at least one root marking pass has completed.
1949-
work.markrootDone = true
1950-
19511938
// Clear out buffers and double-check that all gcWork caches
19521939
// are empty. This should be ensured by gcMarkDone before we
19531940
// enter mark termination.
@@ -2061,7 +2048,6 @@ func gcResetMarkState() {
20612048

20622049
work.bytesMarked = 0
20632050
work.initialHeapLive = atomic.Load64(&memstats.heap_live)
2064-
work.markrootDone = false
20652051
}
20662052

20672053
// Hooks for other packages

src/runtime/mgcmark.go

+35-67
Original file line numberDiff line numberDiff line change
@@ -62,57 +62,41 @@ func gcMarkRootPrepare() {
6262
work.nDataRoots = 0
6363
work.nBSSRoots = 0
6464

65-
// Only scan globals once per cycle; preferably concurrently.
66-
if !work.markrootDone {
67-
for _, datap := range activeModules() {
68-
nDataRoots := nBlocks(datap.edata - datap.data)
69-
if nDataRoots > work.nDataRoots {
70-
work.nDataRoots = nDataRoots
71-
}
65+
// Scan globals.
66+
for _, datap := range activeModules() {
67+
nDataRoots := nBlocks(datap.edata - datap.data)
68+
if nDataRoots > work.nDataRoots {
69+
work.nDataRoots = nDataRoots
7270
}
71+
}
7372

74-
for _, datap := range activeModules() {
75-
nBSSRoots := nBlocks(datap.ebss - datap.bss)
76-
if nBSSRoots > work.nBSSRoots {
77-
work.nBSSRoots = nBSSRoots
78-
}
73+
for _, datap := range activeModules() {
74+
nBSSRoots := nBlocks(datap.ebss - datap.bss)
75+
if nBSSRoots > work.nBSSRoots {
76+
work.nBSSRoots = nBSSRoots
7977
}
8078
}
8179

82-
if !work.markrootDone {
83-
// On the first markroot, we need to scan span roots.
84-
// In concurrent GC, this happens during concurrent
85-
// mark and we depend on addfinalizer to ensure the
86-
// above invariants for objects that get finalizers
87-
// after concurrent mark. In STW GC, this will happen
88-
// during mark termination.
89-
//
90-
// We're only interested in scanning the in-use spans,
91-
// which will all be swept at this point. More spans
92-
// may be added to this list during concurrent GC, but
93-
// we only care about spans that were allocated before
94-
// this mark phase.
95-
work.nSpanRoots = mheap_.sweepSpans[mheap_.sweepgen/2%2].numBlocks()
96-
97-
// On the first markroot, we need to scan all Gs. Gs
98-
// may be created after this point, but it's okay that
99-
// we ignore them because they begin life without any
100-
// roots, so there's nothing to scan, and any roots
101-
// they create during the concurrent phase will be
102-
// scanned during mark termination. During mark
103-
// termination, allglen isn't changing, so we'll scan
104-
// all Gs.
105-
work.nStackRoots = int(atomic.Loaduintptr(&allglen))
106-
} else {
107-
// We've already scanned span roots and kept the scan
108-
// up-to-date during concurrent mark.
109-
work.nSpanRoots = 0
110-
111-
// The hybrid barrier ensures that stacks can't
112-
// contain pointers to unmarked objects, so on the
113-
// second markroot, there's no need to scan stacks.
114-
work.nStackRoots = 0
115-
}
80+
// Scan span roots for finalizer specials.
81+
//
82+
// We depend on addfinalizer to mark objects that get
83+
// finalizers after root marking.
84+
//
85+
// We're only interested in scanning the in-use spans,
86+
// which will all be swept at this point. More spans
87+
// may be added to this list during concurrent GC, but
88+
// we only care about spans that were allocated before
89+
// this mark phase.
90+
work.nSpanRoots = mheap_.sweepSpans[mheap_.sweepgen/2%2].numBlocks()
91+
92+
// Scan stacks.
93+
//
94+
// Gs may be created after this point, but it's okay that we
95+
// ignore them because they begin life without any roots, so
96+
// there's nothing to scan, and any roots they create during
97+
// the concurrent phase will be scanned during mark
98+
// termination.
99+
work.nStackRoots = int(atomic.Loaduintptr(&allglen))
116100

117101
work.markrootNext = 0
118102
work.markrootJobs = uint32(fixedRootCount + work.nFlushCacheRoots + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots)
@@ -183,24 +167,15 @@ func markroot(gcw *gcWork, i uint32) {
183167
}
184168

185169
case i == fixedRootFinalizers:
186-
// Only do this once per GC cycle since we don't call
187-
// queuefinalizer during marking.
188-
if work.markrootDone {
189-
break
190-
}
191170
for fb := allfin; fb != nil; fb = fb.alllink {
192171
cnt := uintptr(atomic.Load(&fb.cnt))
193172
scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), cnt*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw)
194173
}
195174

196175
case i == fixedRootFreeGStacks:
197-
// Only do this once per GC cycle; preferably
198-
// concurrently.
199-
if !work.markrootDone {
200-
// Switch to the system stack so we can call
201-
// stackfree.
202-
systemstack(markrootFreeGStacks)
203-
}
176+
// Switch to the system stack so we can call
177+
// stackfree.
178+
systemstack(markrootFreeGStacks)
204179

205180
case baseSpans <= i && i < baseStacks:
206181
// mark MSpan.specials
@@ -324,10 +299,6 @@ func markrootSpans(gcw *gcWork, shard int) {
324299
// TODO(austin): There are several ideas for making this more
325300
// efficient in issue #11485.
326301

327-
if work.markrootDone {
328-
throw("markrootSpans during second markroot")
329-
}
330-
331302
sg := mheap_.sweepgen
332303
spans := mheap_.sweepSpans[mheap_.sweepgen/2%2].block(shard)
333304
// Note that work.spans may not include spans that were
@@ -719,11 +690,8 @@ func scanstack(gp *g, gcw *gcWork) {
719690
throw("can't scan gchelper stack")
720691
}
721692

722-
// Shrink the stack if not much of it is being used. During
723-
// concurrent GC, we can do this during concurrent mark.
724-
if !work.markrootDone {
725-
shrinkstack(gp)
726-
}
693+
// Shrink the stack if not much of it is being used.
694+
shrinkstack(gp)
727695

728696
// Scan the saved context register. This is effectively a live
729697
// register that gets moved back and forth between the

0 commit comments

Comments
 (0)