@@ -21,7 +21,7 @@ use rustc_data_structures::fx::FxHashMap;
21
21
22
22
use super :: {
23
23
Immediate , Operand , MemPlace , MPlaceTy , Place , PlaceTy , ScalarMaybeUndef ,
24
- Memory , Machine
24
+ Memory , Machine , PointerArithmetic , FnVal , StackPopInfo
25
25
} ;
26
26
27
27
pub struct InterpCx < ' mir , ' tcx , M : Machine < ' mir , ' tcx > > {
@@ -96,7 +96,9 @@ pub enum StackPopCleanup {
96
96
/// Jump to the next block in the caller, or cause UB if None (that's a function
97
97
/// that may never return). Also store layout of return place so
98
98
/// we can validate it at that layout.
99
- Goto ( Option < mir:: BasicBlock > ) ,
99
+ /// 'ret' stores the block we jump to on a normal return, while 'unwind'
100
+ /// stores the block used for cleanup during unwinding
101
+ Goto { ret : Option < mir:: BasicBlock > , unwind : Option < mir:: BasicBlock > } ,
100
102
/// Just do nohing: Used by Main and for the box_alloc hook in miri.
101
103
/// `cleanup` says whether locals are deallocated. Static computation
102
104
/// wants them leaked to intern what they need (and just throw away
@@ -547,56 +549,142 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
547
549
}
548
550
}
549
551
550
- pub ( super ) fn pop_stack_frame ( & mut self ) -> InterpResult < ' tcx > {
551
- info ! ( "LEAVING({}) {}" , self . cur_frame( ) , self . frame( ) . instance) ;
552
+ pub ( super ) fn pop_stack_frame_internal (
553
+ & mut self ,
554
+ unwinding : bool
555
+ ) -> InterpResult < ' tcx , ( StackPopCleanup , StackPopInfo ) > {
556
+ info ! ( "LEAVING({}) {} (unwinding = {})" ,
557
+ self . cur_frame( ) , self . frame( ) . instance, unwinding) ;
558
+
552
559
:: log_settings:: settings ( ) . indentation -= 1 ;
553
560
let frame = self . stack . pop ( ) . expect (
554
561
"tried to pop a stack frame, but there were none" ,
555
562
) ;
556
- M :: stack_pop ( self , frame. extra ) ?;
563
+ let stack_pop_info = M :: stack_pop ( self , frame. extra ) ?;
564
+
557
565
// Abort early if we do not want to clean up: We also avoid validation in that case,
558
566
// because this is CTFE and the final value will be thoroughly validated anyway.
559
567
match frame. return_to_block {
560
- StackPopCleanup :: Goto ( _) => { } ,
561
- StackPopCleanup :: None { cleanup } => {
568
+ StackPopCleanup :: Goto { .. } => { } ,
569
+ StackPopCleanup :: None { cleanup, .. } => {
570
+ assert ! ( !unwinding, "Encountered StackPopCleanup::None while unwinding" ) ;
571
+
562
572
if !cleanup {
563
573
assert ! ( self . stack. is_empty( ) , "only the topmost frame should ever be leaked" ) ;
564
574
// Leak the locals, skip validation.
565
- return Ok ( ( ) ) ;
575
+ return Ok ( ( frame . return_to_block , stack_pop_info ) ) ;
566
576
}
567
577
}
568
578
}
569
579
// Deallocate all locals that are backed by an allocation.
570
580
for local in frame. locals {
571
581
self . deallocate_local ( local. value ) ?;
572
582
}
573
- // Validate the return value. Do this after deallocating so that we catch dangling
574
- // references.
575
- if let Some ( return_place) = frame. return_place {
576
- if M :: enforce_validity ( self ) {
577
- // Data got changed, better make sure it matches the type!
578
- // It is still possible that the return place held invalid data while
579
- // the function is running, but that's okay because nobody could have
580
- // accessed that same data from the "outside" to observe any broken
581
- // invariant -- that is, unless a function somehow has a ptr to
582
- // its return place... but the way MIR is currently generated, the
583
- // return place is always a local and then this cannot happen.
584
- self . validate_operand (
585
- self . place_to_op ( return_place) ?,
586
- vec ! [ ] ,
587
- None ,
588
- ) ?;
583
+
584
+ // If we're popping frames due to unwinding, and we didn't just exit
585
+ // unwinding, we skip a bunch of validation and cleanup logic (including
586
+ // jumping to the regular return block specified in the StackPopCleanu)
587
+ let cur_unwinding = unwinding && stack_pop_info != StackPopInfo :: StopUnwinding ;
588
+
589
+ info ! ( "StackPopCleanup: {:?} StackPopInfo: {:?} cur_unwinding = {:?}" ,
590
+ frame. return_to_block, stack_pop_info, cur_unwinding) ;
591
+
592
+
593
+ // When we're popping a stack frame for unwinding purposes,
594
+ // we don't care at all about returning-related stuff (i.e. return_place
595
+ // and return_to_block), because we're not performing a return from this frame.
596
+ if !cur_unwinding {
597
+ // Validate the return value. Do this after deallocating so that we catch dangling
598
+ // references.
599
+ if let Some ( return_place) = frame. return_place {
600
+ if M :: enforce_validity ( self ) {
601
+ // Data got changed, better make sure it matches the type!
602
+ // It is still possible that the return place held invalid data while
603
+ // the function is running, but that's okay because nobody could have
604
+ // accessed that same data from the "outside" to observe any broken
605
+ // invariant -- that is, unless a function somehow has a ptr to
606
+ // its return place... but the way MIR is currently generated, the
607
+ // return place is always a local and then this cannot happen.
608
+ self . validate_operand (
609
+ self . place_to_op ( return_place) ?,
610
+ vec ! [ ] ,
611
+ None ,
612
+ ) ?;
613
+ }
614
+ } else {
615
+ // Uh, that shouldn't happen... the function did not intend to return
616
+ throw_ub ! ( Unreachable ) ;
617
+ }
618
+
619
+ // Jump to new block -- *after* validation so that the spans make more sense.
620
+ match frame. return_to_block {
621
+ StackPopCleanup :: Goto { ret, .. } => {
622
+ self . goto_block ( ret) ?;
623
+ }
624
+ StackPopCleanup :: None { .. } => { }
589
625
}
590
- } else {
591
- // Uh, that shouldn't happen... the function did not intend to return
592
- throw_ub ! ( Unreachable )
593
626
}
594
- // Jump to new block -- *after* validation so that the spans make more sense.
595
- match frame. return_to_block {
596
- StackPopCleanup :: Goto ( block) => {
597
- self . goto_block ( block) ?;
627
+
628
+
629
+ Ok ( ( frame. return_to_block , stack_pop_info) )
630
+ }
631
+
632
+ pub ( super ) fn pop_stack_frame ( & mut self , unwinding : bool ) -> InterpResult < ' tcx > {
633
+ let ( mut cleanup, mut stack_pop_info) = self . pop_stack_frame_internal ( unwinding) ?;
634
+
635
+ // There are two cases where we want to unwind the stack:
636
+ // * The caller explicitly told us (i.e. we hit a Resume terminator)
637
+ // * The machine indicated that we've just started unwinding (i.e.
638
+ // a panic has just occured)
639
+ if unwinding || stack_pop_info == StackPopInfo :: StartUnwinding {
640
+ trace ! ( "unwinding: starting stack unwind..." ) ;
641
+ // Overwrite our current stack_pop_info, so that the check
642
+ // below doesn't fail.
643
+ stack_pop_info = StackPopInfo :: Normal ;
644
+ // There are three posible ways that we can exit the loop:
645
+ // 1) We find an unwind block - we jump to it to allow cleanup
646
+ // to occur for that frame
647
+ // 2) pop_stack_frame_internal reports that we're no longer unwinding
648
+ // - this means that the panic has been caught, and that execution
649
+ // should continue as normal
650
+ // 3) We pop all of our frames off the stack - this should never happen.
651
+ while !self . stack . is_empty ( ) {
652
+ match stack_pop_info {
653
+ // We tried to start unwinding while we were already
654
+ // unwinding. Note that this **is not** the same thing
655
+ // as a double panic, which will be intercepted by
656
+ // libcore/libstd before we actually attempt to unwind.
657
+ StackPopInfo :: StartUnwinding => {
658
+ throw_ub_format ! ( "Attempted to start unwinding while already unwinding!" ) ;
659
+ } ,
660
+ StackPopInfo :: StopUnwinding => {
661
+ trace ! ( "unwinding: no longer unwinding!" ) ;
662
+ break ;
663
+ }
664
+ StackPopInfo :: Normal => { }
665
+ }
666
+
667
+ match cleanup {
668
+ StackPopCleanup :: Goto { unwind, .. } if unwind. is_some ( ) => {
669
+
670
+ info ! ( "unwind: found cleanup block {:?}" , unwind) ;
671
+ self . goto_block ( unwind) ?;
672
+ break ;
673
+ } ,
674
+ _ => { }
675
+ }
676
+
677
+ info ! ( "unwinding: popping frame!" ) ;
678
+ let res = self . pop_stack_frame_internal ( true ) ?;
679
+ cleanup = res. 0 ;
680
+ stack_pop_info = res. 1 ;
681
+ }
682
+ if self . stack . is_empty ( ) {
683
+ // We should never get here:
684
+ // The 'start_fn' lang item should always install a panic handler
685
+ throw_ub ! ( Unreachable ) ;
598
686
}
599
- StackPopCleanup :: None { .. } => { }
687
+
600
688
}
601
689
602
690
if self . stack . len ( ) > 0 {
@@ -760,4 +848,25 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
760
848
trace ! ( "generate stacktrace: {:#?}, {:?}" , frames, explicit_span) ;
761
849
frames
762
850
}
851
+
852
+ /// Resolve the function at the specified slot in the provided
853
+ /// vtable. An index of '0' corresponds to the first method
854
+ /// declared in the trait of the provided vtable
855
+ pub fn get_vtable_slot (
856
+ & self ,
857
+ vtable : Scalar < M :: PointerTag > ,
858
+ idx : usize
859
+ ) -> InterpResult < ' tcx , FnVal < ' tcx , M :: ExtraFnVal > > {
860
+ let ptr_size = self . pointer_size ( ) ;
861
+ // Skip over the 'drop_ptr', 'size', and 'align' fields
862
+ let vtable_slot = vtable. ptr_offset ( ptr_size * ( idx as u64 + 3 ) , self ) ?;
863
+ let vtable_slot = self . memory . check_ptr_access (
864
+ vtable_slot,
865
+ ptr_size,
866
+ self . tcx . data_layout . pointer_align . abi ,
867
+ ) ?. expect ( "cannot be a ZST" ) ;
868
+ let fn_ptr = self . memory . get ( vtable_slot. alloc_id ) ?
869
+ . read_ptr_sized ( self , vtable_slot) ?. not_undef ( ) ?;
870
+ Ok ( self . memory . get_fn ( fn_ptr) ?)
871
+ }
763
872
}
0 commit comments