@@ -19,6 +19,9 @@ rustc_index::newtype_index! {
19
19
}
20
20
21
21
bitflags:: bitflags! {
22
+ /// Whether and how this goal has been used as the root of a
23
+ /// cycle. We track the kind of cycle as we're otherwise forced
24
+ /// to always rerun at least once.
22
25
#[ derive( Debug , Clone , Copy , PartialEq , Eq ) ]
23
26
struct HasBeenUsed : u8 {
24
27
const INDUCTIVE_CYCLE = 1 << 0 ;
@@ -29,23 +32,30 @@ bitflags::bitflags! {
29
32
#[ derive( Debug ) ]
30
33
struct StackEntry < ' tcx > {
31
34
input : CanonicalInput < ' tcx > ,
35
+
32
36
available_depth : Limit ,
37
+
33
38
/// The maximum depth reached by this stack entry, only up-to date
34
39
/// for the top of the stack and lazily updated for the rest.
35
40
reached_depth : StackDepth ,
36
- /// Whether this entry is a cycle participant which is not a root.
41
+
42
+ /// Whether this entry is a non-root cycle participant.
37
43
///
38
- /// If so, it must not be moved to the global cache. See
39
- /// [SearchGraph::cycle_participants] for more details.
44
+ /// We must not move the result of non-root cycle participants to the
45
+ /// global cache. See [SearchGraph::cycle_participants] for more details.
46
+ /// We store the highest stack depth of a head of a cycle this goal is involved
47
+ /// in. This necessary to soundly cache its provisional result.
40
48
non_root_cycle_participant : Option < StackDepth > ,
41
49
42
50
encountered_overflow : bool ,
51
+
43
52
has_been_used : HasBeenUsed ,
44
53
/// Starts out as `None` and gets set when rerunning this
45
54
/// goal in case we encounter a cycle.
46
55
provisional_result : Option < QueryResult < ' tcx > > ,
47
56
}
48
57
58
+ /// The provisional result for a goal which is not on the stack.
49
59
struct DetachedEntry < ' tcx > {
50
60
/// The head of the smallest non-trivial cycle involving this entry.
51
61
///
@@ -59,6 +69,16 @@ struct DetachedEntry<'tcx> {
59
69
result : QueryResult < ' tcx > ,
60
70
}
61
71
72
+ /// Stores the stack depth of a currently evaluated goal *and* already
73
+ /// computed results for goals which depend on other goals still on the stack.
74
+ ///
75
+ /// The provisional result may depend on whether the stack above it is inductive
76
+ /// or coinductive. Because of this, we store separate provisional results for
77
+ /// each case. If an provisional entry is not applicable, it may be the case
78
+ /// that we already have provisional result while computing a goal. In this case
79
+ /// we prefer the provisional result to potentially avoid fixpoint iterations.
80
+ ///
81
+ /// See tests/ui/traits/next-solver/cycles/mixed-cycle-2.rs for an example.
62
82
#[ derive( Default ) ]
63
83
struct ProvisionalCacheEntry < ' tcx > {
64
84
stack_depth : Option < StackDepth > ,
@@ -200,6 +220,16 @@ impl<'tcx> SearchGraph<'tcx> {
200
220
. all ( |entry| entry. input . value . goal . predicate . is_coinductive ( tcx) )
201
221
}
202
222
223
+ // When encountering a solver cycle, the result of the current goal
224
+ // depends on goals lower on the stack.
225
+ //
226
+ // We have to therefore be careful when caching goals. Only the final result
227
+ // of the cycle root, i.e. the lowest goal on the stack involved in this cycle,
228
+ // is moved to the global cache while all others are stored in a provisional cache.
229
+ //
230
+ // We update both the head of this cycle to rerun its evaluation until
231
+ // we reach a fixpoint and all other cycle participants to make sure that
232
+ // their result does not get moved to the global cache.
203
233
fn tag_cycle_participants (
204
234
stack : & mut IndexVec < StackDepth , StackEntry < ' tcx > > ,
205
235
cycle_participants : & mut FxHashSet < CanonicalInput < ' tcx > > ,
@@ -281,24 +311,20 @@ impl<'tcx> SearchGraph<'tcx> {
281
311
}
282
312
283
313
// Check whether the goal is in the provisional cache.
314
+ // The provisional result may rely on the path to its cycle roots,
315
+ // so we have to check the path of the current goal matches that of
316
+ // the cache entry.
284
317
let cache_entry = self . provisional_cache . entry ( input) . or_default ( ) ;
285
- if let Some ( with_coinductive_stack) = & cache_entry. with_coinductive_stack
286
- && Self :: stack_coinductive_from ( tcx, & self . stack , with_coinductive_stack. head )
287
- {
288
- // We have a nested goal which is already in the provisional cache, use
289
- // its result. We do not provide any usage kind as that should have been
290
- // already set correctly while computing the cache entry.
291
- inspect
292
- . goal_evaluation_kind ( inspect:: WipCanonicalGoalEvaluationKind :: ProvisionalCacheHit ) ;
293
- Self :: tag_cycle_participants (
294
- & mut self . stack ,
295
- & mut self . cycle_participants ,
296
- HasBeenUsed :: empty ( ) ,
297
- with_coinductive_stack. head ,
298
- ) ;
299
- return with_coinductive_stack. result ;
300
- } else if let Some ( with_inductive_stack) = & cache_entry. with_inductive_stack
301
- && !Self :: stack_coinductive_from ( tcx, & self . stack , with_inductive_stack. head )
318
+ if let Some ( entry) = cache_entry
319
+ . with_coinductive_stack
320
+ . as_ref ( )
321
+ . filter ( |p| Self :: stack_coinductive_from ( tcx, & self . stack , p. head ) )
322
+ . or_else ( || {
323
+ cache_entry
324
+ . with_inductive_stack
325
+ . as_ref ( )
326
+ . filter ( |p| !Self :: stack_coinductive_from ( tcx, & self . stack , p. head ) )
327
+ } )
302
328
{
303
329
// We have a nested goal which is already in the provisional cache, use
304
330
// its result. We do not provide any usage kind as that should have been
@@ -309,20 +335,17 @@ impl<'tcx> SearchGraph<'tcx> {
309
335
& mut self . stack ,
310
336
& mut self . cycle_participants ,
311
337
HasBeenUsed :: empty ( ) ,
312
- with_inductive_stack . head ,
338
+ entry . head ,
313
339
) ;
314
- return with_inductive_stack . result ;
340
+ return entry . result ;
315
341
} else if let Some ( stack_depth) = cache_entry. stack_depth {
316
342
debug ! ( "encountered cycle with depth {stack_depth:?}" ) ;
317
- // We have a nested goal which relies on a goal `root` deeper in the stack.
343
+ // We have a nested goal which directly relies on a goal deeper in the stack.
318
344
//
319
- // We first store that we may have to reprove `root` in case the provisional
320
- // response is not equal to the final response. We also update the depth of all
321
- // goals which recursively depend on our current goal to depend on `root`
322
- // instead.
345
+ // We start by tagging all cycle participants, as that's necessary for caching.
323
346
//
324
- // Finally we can return either the provisional response for that goal if we have a
325
- // coinductive cycle or an ambiguous result if the cycle is inductive .
347
+ // Finally we can return either the provisional response or the initial response
348
+ // in case we're in the first fixpoint iteration for this goal .
326
349
inspect. goal_evaluation_kind ( inspect:: WipCanonicalGoalEvaluationKind :: CycleInStack ) ;
327
350
let is_coinductive_cycle = Self :: stack_coinductive_from ( tcx, & self . stack , stack_depth) ;
328
351
let usage_kind = if is_coinductive_cycle {
@@ -410,10 +433,10 @@ impl<'tcx> SearchGraph<'tcx> {
410
433
false
411
434
} ;
412
435
436
+ // If we did not reach a fixpoint, update the provisional result and reevaluate.
413
437
if reached_fixpoint {
414
438
return ( stack_entry, result) ;
415
439
} else {
416
- // Did not reach a fixpoint, update the provisional result and reevaluate.
417
440
let depth = self . stack . push ( StackEntry {
418
441
has_been_used : HasBeenUsed :: empty ( ) ,
419
442
provisional_result : Some ( result) ,
@@ -435,9 +458,6 @@ impl<'tcx> SearchGraph<'tcx> {
435
458
// We're now done with this goal. In case this goal is involved in a larger cycle
436
459
// do not remove it from the provisional cache and update its provisional result.
437
460
// We only add the root of cycles to the global cache.
438
- //
439
- // It is not possible for any nested goal to depend on something deeper on the
440
- // stack, as this would have also updated the depth of the current goal.
441
461
if let Some ( head) = final_entry. non_root_cycle_participant {
442
462
let coinductive_stack = Self :: stack_coinductive_from ( tcx, & self . stack , head) ;
443
463
@@ -449,6 +469,9 @@ impl<'tcx> SearchGraph<'tcx> {
449
469
entry. with_inductive_stack = Some ( DetachedEntry { head, result } ) ;
450
470
}
451
471
} else {
472
+ self . provisional_cache . remove ( & input) ;
473
+ let reached_depth = final_entry. reached_depth . as_usize ( ) - self . stack . len ( ) ;
474
+ let cycle_participants = mem:: take ( & mut self . cycle_participants ) ;
452
475
// When encountering a cycle, both inductive and coinductive, we only
453
476
// move the root into the global cache. We also store all other cycle
454
477
// participants involved.
@@ -457,9 +480,6 @@ impl<'tcx> SearchGraph<'tcx> {
457
480
// participant is on the stack. This is necessary to prevent unstable
458
481
// results. See the comment of `SearchGraph::cycle_participants` for
459
482
// more details.
460
- self . provisional_cache . remove ( & input) ;
461
- let reached_depth = final_entry. reached_depth . as_usize ( ) - self . stack . len ( ) ;
462
- let cycle_participants = mem:: take ( & mut self . cycle_participants ) ;
463
483
self . global_cache ( tcx) . insert (
464
484
tcx,
465
485
input,
0 commit comments