@@ -651,24 +651,15 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
651
651
fn leave_top_scope ( & mut self , block : BasicBlock ) -> BasicBlock {
652
652
// If we are emitting a `drop` statement, we need to have the cached
653
653
// diverge cleanup pads ready in case that drop panics.
654
- let scope = self . scopes . scopes . last ( ) . expect ( "exit_top_scope called with no scopes" ) ;
654
+ let needs_cleanup = self . scopes . scopes . last ( ) . map_or ( false , |scope| scope . needs_cleanup ( ) ) ;
655
655
let is_generator = self . is_generator ;
656
- let needs_cleanup = scope. needs_cleanup ( ) ;
657
-
658
656
let unwind_to = if needs_cleanup {
659
- let mut drops = self . scopes . scopes . iter ( )
660
- . flat_map ( |scope| & scope. drops )
661
- . filter ( |drop| is_generator || drop. kind == DropKind :: Value ) ;
662
- let mut next_drop = ROOT_NODE ;
663
- let mut drop_info = drops. next ( ) . unwrap ( ) ;
664
- for previous_drop_info in drops {
665
- next_drop = self . scopes . unwind_drops . add_drop ( * drop_info, next_drop) ;
666
- drop_info = previous_drop_info;
667
- }
668
- next_drop
657
+ self . diverge_cleanup ( )
669
658
} else {
670
659
DropIdx :: MAX
671
660
} ;
661
+
662
+ let scope = self . scopes . scopes . last ( ) . expect ( "exit_top_scope called with no scopes" ) ;
672
663
unpack ! ( build_scope_drops(
673
664
& mut self . cfg,
674
665
& mut self . scopes. unwind_drops,
@@ -1098,16 +1089,18 @@ fn build_scope_drops<'tcx>(
1098
1089
1099
1090
match drop_data. kind {
1100
1091
DropKind :: Value => {
1092
+ debug_assert_eq ! ( unwind_drops. drops[ unwind_to] . 0 . local, drop_data. local) ;
1093
+ debug_assert_eq ! ( unwind_drops. drops[ unwind_to] . 0 . kind, drop_data. kind) ;
1094
+ unwind_to = unwind_drops. drops [ unwind_to] . 1 ;
1101
1095
// If the operand has been moved, and we are not on an unwind
1102
1096
// path, then don't generate the drop. (We only take this into
1103
1097
// account for non-unwind paths so as not to disturb the
1104
1098
// caching mechanism.)
1105
1099
if scope. moved_locals . iter ( ) . any ( |& o| o == local) {
1106
- unwind_to = unwind_drops. drops [ unwind_to] . 1 ;
1107
1100
continue ;
1108
1101
}
1109
1102
1110
- unwind_drops. entry_points . push ( ( unwind_to , block ) ) ;
1103
+ unwind_drops. add_entry ( block , unwind_to ) ;
1111
1104
1112
1105
let next = cfg. start_new_block ( ) ;
1113
1106
cfg. terminate ( block, source_info, TerminatorKind :: Drop {
@@ -1119,6 +1112,8 @@ fn build_scope_drops<'tcx>(
1119
1112
}
1120
1113
DropKind :: Storage => {
1121
1114
if storage_dead_on_unwind {
1115
+ debug_assert_eq ! ( unwind_drops. drops[ unwind_to] . 0 . local, drop_data. local) ;
1116
+ debug_assert_eq ! ( unwind_drops. drops[ unwind_to] . 0 . kind, drop_data. kind) ;
1122
1117
unwind_to = unwind_drops. drops [ unwind_to] . 1 ;
1123
1118
}
1124
1119
// Only temps and vars need their storage dead.
@@ -1224,6 +1219,7 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> {
1224
1219
// optimization is, but it is here.
1225
1220
for ( drop_idx, drop_data) in drops. drops . iter_enumerated ( ) {
1226
1221
if let DropKind :: Value = drop_data. 0 . kind {
1222
+ debug_assert ! ( drop_data. 1 < drops. drops. next_index( ) ) ;
1227
1223
drops. entry_points . push ( ( drop_data. 1 , blocks[ drop_idx] . unwrap ( ) ) ) ;
1228
1224
}
1229
1225
}
0 commit comments