Skip to content

Commit 457c8f4

Browse files
committed
runtime: eliminate blocking GC work drains
Now work.helperDrainBlock is always false, so we can remove it and code paths that only ran when it was true. That means we no longer use the gcDrainBlock mode of gcDrain, so we can eliminate that. That means we no longer use gcWork.get, so we can eliminate that. That means we no longer use getfull, so we can eliminate that. Updates #26903. This is a follow-up to unifying STW GC and concurrent GC. Change-Id: I8dbcf8ce24861df0a6149e0b7c5cd0eadb5c13f6 Reviewed-on: https://go-review.googlesource.com/c/134782 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Rick Hudson <rlh@golang.org>
1 parent 143b13a commit 457c8f4

File tree

3 files changed

+18
-139
lines changed

3 files changed

+18
-139
lines changed

src/runtime/mgc.go

+4-21
Original file line numberDiff line numberDiff line change
@@ -944,14 +944,6 @@ var work struct {
944944
ndone uint32
945945
alldone note
946946

947-
// helperDrainBlock indicates that GC mark termination helpers
948-
// should pass gcDrainBlock to gcDrain to block in the
949-
// getfull() barrier. Otherwise, they should pass gcDrainNoBlock.
950-
//
951-
// TODO: This is a temporary fallback to work around races
952-
// that cause early mark termination.
953-
helperDrainBlock bool
954-
955947
// Number of roots of various root types. Set by gcMarkRootPrepare.
956948
nFlushCacheRoots int
957949
nDataRoots, nBSSRoots, nSpanRoots, nStackRoots int
@@ -1528,7 +1520,7 @@ func gcMarkTermination(nextTriggerRatio float64) {
15281520
gcResetMarkState()
15291521
initCheckmarks()
15301522
gcw := &getg().m.p.ptr().gcw
1531-
gcDrain(gcw, gcDrainNoBlock)
1523+
gcDrain(gcw, 0)
15321524
wbBufFlush1(getg().m.p.ptr())
15331525
gcw.dispose()
15341526
clearCheckmarks()
@@ -1814,7 +1806,7 @@ func gcBgMarkWorker(_p_ *p) {
18141806
}
18151807
// Go back to draining, this time
18161808
// without preemption.
1817-
gcDrain(&_p_.gcw, gcDrainNoBlock|gcDrainFlushBgCredit)
1809+
gcDrain(&_p_.gcw, gcDrainFlushBgCredit)
18181810
case gcMarkWorkerFractionalMode:
18191811
gcDrain(&_p_.gcw, gcDrainFractional|gcDrainUntilPreempt|gcDrainFlushBgCredit)
18201812
case gcMarkWorkerIdleMode:
@@ -1905,7 +1897,6 @@ func gcMark(start_time int64) {
19051897
work.nwait = 0
19061898
work.ndone = 0
19071899
work.nproc = uint32(gcprocs())
1908-
work.helperDrainBlock = false
19091900

19101901
// Check that there's no marking work remaining.
19111902
if work.full != 0 || work.nDataRoots+work.nBSSRoots+work.nSpanRoots+work.nStackRoots != 0 {
@@ -1921,11 +1912,7 @@ func gcMark(start_time int64) {
19211912
gchelperstart()
19221913

19231914
gcw := &getg().m.p.ptr().gcw
1924-
if work.helperDrainBlock {
1925-
gcDrain(gcw, gcDrainBlock)
1926-
} else {
1927-
gcDrain(gcw, gcDrainNoBlock)
1928-
}
1915+
gcDrain(gcw, 0)
19291916

19301917
if debug.gccheckmark > 0 {
19311918
// This is expensive when there's a large number of
@@ -2119,11 +2106,7 @@ func gchelper() {
21192106
// Parallel mark over GC roots and heap
21202107
if gcphase == _GCmarktermination {
21212108
gcw := &_g_.m.p.ptr().gcw
2122-
if work.helperDrainBlock {
2123-
gcDrain(gcw, gcDrainBlock) // blocks in getfull
2124-
} else {
2125-
gcDrain(gcw, gcDrainNoBlock)
2126-
}
2109+
gcDrain(gcw, 0)
21272110
}
21282111

21292112
nproc := atomic.Load(&work.nproc) // work.nproc can change right after we increment work.ndone

src/runtime/mgcmark.go

+13-31
Original file line numberDiff line numberDiff line change
@@ -771,34 +771,26 @@ type gcDrainFlags int
771771

772772
const (
773773
gcDrainUntilPreempt gcDrainFlags = 1 << iota
774-
gcDrainNoBlock
775774
gcDrainFlushBgCredit
776775
gcDrainIdle
777776
gcDrainFractional
778-
779-
// gcDrainBlock means neither gcDrainUntilPreempt or
780-
// gcDrainNoBlock. It is the default, but callers should use
781-
// the constant for documentation purposes.
782-
gcDrainBlock gcDrainFlags = 0
783777
)
784778

785779
// gcDrain scans roots and objects in work buffers, blackening grey
786-
// objects until all roots and work buffers have been drained.
780+
// objects until it is unable to get more work. It may return before
781+
// GC is done; it's the caller's responsibility to balance work from
782+
// other Ps.
787783
//
788784
// If flags&gcDrainUntilPreempt != 0, gcDrain returns when g.preempt
789-
// is set. This implies gcDrainNoBlock.
785+
// is set.
790786
//
791787
// If flags&gcDrainIdle != 0, gcDrain returns when there is other work
792-
// to do. This implies gcDrainNoBlock.
788+
// to do.
793789
//
794790
// If flags&gcDrainFractional != 0, gcDrain self-preempts when
795791
// pollFractionalWorkerExit() returns true. This implies
796792
// gcDrainNoBlock.
797793
//
798-
// If flags&gcDrainNoBlock != 0, gcDrain returns as soon as it is
799-
// unable to get more work. Otherwise, it will block until all
800-
// blocking calls are blocked in gcDrain.
801-
//
802794
// If flags&gcDrainFlushBgCredit != 0, gcDrain flushes scan work
803795
// credit to gcController.bgScanCredit every gcCreditSlack units of
804796
// scan work.
@@ -811,7 +803,6 @@ func gcDrain(gcw *gcWork, flags gcDrainFlags) {
811803

812804
gp := getg().m.curg
813805
preemptible := flags&gcDrainUntilPreempt != 0
814-
blocking := flags&(gcDrainUntilPreempt|gcDrainIdle|gcDrainFractional|gcDrainNoBlock) == 0
815806
flushBgCredit := flags&gcDrainFlushBgCredit != 0
816807
idle := flags&gcDrainIdle != 0
817808

@@ -855,24 +846,19 @@ func gcDrain(gcw *gcWork, flags gcDrainFlags) {
855846
gcw.balance()
856847
}
857848

858-
var b uintptr
859-
if blocking {
860-
b = gcw.get()
861-
} else {
862-
b = gcw.tryGetFast()
849+
b := gcw.tryGetFast()
850+
if b == 0 {
851+
b = gcw.tryGet()
863852
if b == 0 {
853+
// Flush the write barrier
854+
// buffer; this may create
855+
// more work.
856+
wbBufFlush(nil, 0)
864857
b = gcw.tryGet()
865-
if b == 0 {
866-
// Flush the write barrier
867-
// buffer; this may create
868-
// more work.
869-
wbBufFlush(nil, 0)
870-
b = gcw.tryGet()
871-
}
872858
}
873859
}
874860
if b == 0 {
875-
// work barrier reached or tryGet failed.
861+
// Unable to get work.
876862
break
877863
}
878864
scanobject(b, gcw)
@@ -898,10 +884,6 @@ func gcDrain(gcw *gcWork, flags gcDrainFlags) {
898884
}
899885
}
900886

901-
// In blocking mode, write barriers are not allowed after this
902-
// point because we must preserve the condition that the work
903-
// buffers are empty.
904-
905887
done:
906888
// Flush remaining scan work credit.
907889
if gcw.scanWork > 0 {

src/runtime/mgcwork.go

+1-87
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ func init() {
4646
//
4747
// (preemption must be disabled)
4848
// gcw := &getg().m.p.ptr().gcw
49-
// .. call gcw.put() to produce and gcw.get() to consume ..
49+
// .. call gcw.put() to produce and gcw.tryGet() to consume ..
5050
//
5151
// It's important that any use of gcWork during the mark phase prevent
5252
// the garbage collector from transitioning to mark termination since
@@ -236,37 +236,6 @@ func (w *gcWork) tryGetFast() uintptr {
236236
return wbuf.obj[wbuf.nobj]
237237
}
238238

239-
// get dequeues a pointer for the garbage collector to trace, blocking
240-
// if necessary to ensure all pointers from all queues and caches have
241-
// been retrieved. get returns 0 if there are no pointers remaining.
242-
//go:nowritebarrierrec
243-
func (w *gcWork) get() uintptr {
244-
wbuf := w.wbuf1
245-
if wbuf == nil {
246-
w.init()
247-
wbuf = w.wbuf1
248-
// wbuf is empty at this point.
249-
}
250-
if wbuf.nobj == 0 {
251-
w.wbuf1, w.wbuf2 = w.wbuf2, w.wbuf1
252-
wbuf = w.wbuf1
253-
if wbuf.nobj == 0 {
254-
owbuf := wbuf
255-
wbuf = getfull()
256-
if wbuf == nil {
257-
return 0
258-
}
259-
putempty(owbuf)
260-
w.wbuf1 = wbuf
261-
}
262-
}
263-
264-
// TODO: This might be a good place to add prefetch code
265-
266-
wbuf.nobj--
267-
return wbuf.obj[wbuf.nobj]
268-
}
269-
270239
// dispose returns any cached pointers to the global queue.
271240
// The buffers are being put on the full queue so that the
272241
// write barriers will not simply reacquire them before the
@@ -449,61 +418,6 @@ func trygetfull() *workbuf {
449418
return b
450419
}
451420

452-
// Get a full work buffer off the work.full list.
453-
// If nothing is available wait until all the other gc helpers have
454-
// finished and then return nil.
455-
// getfull acts as a barrier for work.nproc helpers. As long as one
456-
// gchelper is actively marking objects it
457-
// may create a workbuffer that the other helpers can work on.
458-
// The for loop either exits when a work buffer is found
459-
// or when _all_ of the work.nproc GC helpers are in the loop
460-
// looking for work and thus not capable of creating new work.
461-
// This is in fact the termination condition for the STW mark
462-
// phase.
463-
//go:nowritebarrier
464-
func getfull() *workbuf {
465-
b := (*workbuf)(work.full.pop())
466-
if b != nil {
467-
b.checknonempty()
468-
return b
469-
}
470-
471-
incnwait := atomic.Xadd(&work.nwait, +1)
472-
if incnwait > work.nproc {
473-
println("runtime: work.nwait=", incnwait, "work.nproc=", work.nproc)
474-
throw("work.nwait > work.nproc")
475-
}
476-
for i := 0; ; i++ {
477-
if work.full != 0 {
478-
decnwait := atomic.Xadd(&work.nwait, -1)
479-
if decnwait == work.nproc {
480-
println("runtime: work.nwait=", decnwait, "work.nproc=", work.nproc)
481-
throw("work.nwait > work.nproc")
482-
}
483-
b = (*workbuf)(work.full.pop())
484-
if b != nil {
485-
b.checknonempty()
486-
return b
487-
}
488-
incnwait := atomic.Xadd(&work.nwait, +1)
489-
if incnwait > work.nproc {
490-
println("runtime: work.nwait=", incnwait, "work.nproc=", work.nproc)
491-
throw("work.nwait > work.nproc")
492-
}
493-
}
494-
if work.nwait == work.nproc && work.markrootNext >= work.markrootJobs {
495-
return nil
496-
}
497-
if i < 10 {
498-
procyield(20)
499-
} else if i < 20 {
500-
osyield()
501-
} else {
502-
usleep(100)
503-
}
504-
}
505-
}
506-
507421
//go:nowritebarrier
508422
func handoff(b *workbuf) *workbuf {
509423
// Make new buffer with half of b's pointers.

0 commit comments

Comments
 (0)