@@ -62,57 +62,41 @@ func gcMarkRootPrepare() {
62
62
work .nDataRoots = 0
63
63
work .nBSSRoots = 0
64
64
65
- // Only scan globals once per cycle; preferably concurrently.
66
- if ! work .markrootDone {
67
- for _ , datap := range activeModules () {
68
- nDataRoots := nBlocks (datap .edata - datap .data )
69
- if nDataRoots > work .nDataRoots {
70
- work .nDataRoots = nDataRoots
71
- }
65
+ // Scan globals.
66
+ for _ , datap := range activeModules () {
67
+ nDataRoots := nBlocks (datap .edata - datap .data )
68
+ if nDataRoots > work .nDataRoots {
69
+ work .nDataRoots = nDataRoots
72
70
}
71
+ }
73
72
74
- for _ , datap := range activeModules () {
75
- nBSSRoots := nBlocks (datap .ebss - datap .bss )
76
- if nBSSRoots > work .nBSSRoots {
77
- work .nBSSRoots = nBSSRoots
78
- }
73
+ for _ , datap := range activeModules () {
74
+ nBSSRoots := nBlocks (datap .ebss - datap .bss )
75
+ if nBSSRoots > work .nBSSRoots {
76
+ work .nBSSRoots = nBSSRoots
79
77
}
80
78
}
81
79
82
- if ! work .markrootDone {
83
- // On the first markroot, we need to scan span roots.
84
- // In concurrent GC, this happens during concurrent
85
- // mark and we depend on addfinalizer to ensure the
86
- // above invariants for objects that get finalizers
87
- // after concurrent mark. In STW GC, this will happen
88
- // during mark termination.
89
- //
90
- // We're only interested in scanning the in-use spans,
91
- // which will all be swept at this point. More spans
92
- // may be added to this list during concurrent GC, but
93
- // we only care about spans that were allocated before
94
- // this mark phase.
95
- work .nSpanRoots = mheap_ .sweepSpans [mheap_ .sweepgen / 2 % 2 ].numBlocks ()
96
-
97
- // On the first markroot, we need to scan all Gs. Gs
98
- // may be created after this point, but it's okay that
99
- // we ignore them because they begin life without any
100
- // roots, so there's nothing to scan, and any roots
101
- // they create during the concurrent phase will be
102
- // scanned during mark termination. During mark
103
- // termination, allglen isn't changing, so we'll scan
104
- // all Gs.
105
- work .nStackRoots = int (atomic .Loaduintptr (& allglen ))
106
- } else {
107
- // We've already scanned span roots and kept the scan
108
- // up-to-date during concurrent mark.
109
- work .nSpanRoots = 0
110
-
111
- // The hybrid barrier ensures that stacks can't
112
- // contain pointers to unmarked objects, so on the
113
- // second markroot, there's no need to scan stacks.
114
- work .nStackRoots = 0
115
- }
80
+ // Scan span roots for finalizer specials.
81
+ //
82
+ // We depend on addfinalizer to mark objects that get
83
+ // finalizers after root marking.
84
+ //
85
+ // We're only interested in scanning the in-use spans,
86
+ // which will all be swept at this point. More spans
87
+ // may be added to this list during concurrent GC, but
88
+ // we only care about spans that were allocated before
89
+ // this mark phase.
90
+ work .nSpanRoots = mheap_ .sweepSpans [mheap_ .sweepgen / 2 % 2 ].numBlocks ()
91
+
92
+ // Scan stacks.
93
+ //
94
+ // Gs may be created after this point, but it's okay that we
95
+ // ignore them because they begin life without any roots, so
96
+ // there's nothing to scan, and any roots they create during
97
+ // the concurrent phase will be scanned during mark
98
+ // termination.
99
+ work .nStackRoots = int (atomic .Loaduintptr (& allglen ))
116
100
117
101
work .markrootNext = 0
118
102
work .markrootJobs = uint32 (fixedRootCount + work .nFlushCacheRoots + work .nDataRoots + work .nBSSRoots + work .nSpanRoots + work .nStackRoots )
@@ -183,24 +167,15 @@ func markroot(gcw *gcWork, i uint32) {
183
167
}
184
168
185
169
case i == fixedRootFinalizers :
186
- // Only do this once per GC cycle since we don't call
187
- // queuefinalizer during marking.
188
- if work .markrootDone {
189
- break
190
- }
191
170
for fb := allfin ; fb != nil ; fb = fb .alllink {
192
171
cnt := uintptr (atomic .Load (& fb .cnt ))
193
172
scanblock (uintptr (unsafe .Pointer (& fb .fin [0 ])), cnt * unsafe .Sizeof (fb .fin [0 ]), & finptrmask [0 ], gcw )
194
173
}
195
174
196
175
case i == fixedRootFreeGStacks :
197
- // Only do this once per GC cycle; preferably
198
- // concurrently.
199
- if ! work .markrootDone {
200
- // Switch to the system stack so we can call
201
- // stackfree.
202
- systemstack (markrootFreeGStacks )
203
- }
176
+ // Switch to the system stack so we can call
177
+ // stackfree.
178
+ systemstack (markrootFreeGStacks )
204
179
205
180
case baseSpans <= i && i < baseStacks :
206
181
// mark MSpan.specials
@@ -324,10 +299,6 @@ func markrootSpans(gcw *gcWork, shard int) {
324
299
// TODO(austin): There are several ideas for making this more
325
300
// efficient in issue #11485.
326
301
327
- if work .markrootDone {
328
- throw ("markrootSpans during second markroot" )
329
- }
330
-
331
302
sg := mheap_ .sweepgen
332
303
spans := mheap_ .sweepSpans [mheap_ .sweepgen / 2 % 2 ].block (shard )
333
304
// Note that work.spans may not include spans that were
@@ -719,11 +690,8 @@ func scanstack(gp *g, gcw *gcWork) {
719
690
throw ("can't scan gchelper stack" )
720
691
}
721
692
722
- // Shrink the stack if not much of it is being used. During
723
- // concurrent GC, we can do this during concurrent mark.
724
- if ! work .markrootDone {
725
- shrinkstack (gp )
726
- }
693
+ // Shrink the stack if not much of it is being used.
694
+ shrinkstack (gp )
727
695
728
696
// Scan the saved context register. This is effectively a live
729
697
// register that gets moved back and forth between the
0 commit comments