@@ -485,8 +485,6 @@ impl<'tcx> Inliner<'tcx> {
485
485
callee_attrs : & CodegenFnAttrs ,
486
486
cross_crate_inlinable : bool ,
487
487
) -> Result < ( ) , & ' static str > {
488
- let tcx = self . tcx ;
489
-
490
488
let mut threshold = if cross_crate_inlinable {
491
489
self . tcx . sess . opts . unstable_opts . inline_mir_hint_threshold . unwrap_or ( 100 )
492
490
} else {
@@ -501,49 +499,25 @@ impl<'tcx> Inliner<'tcx> {
501
499
}
502
500
debug ! ( " final inline threshold = {}" , threshold) ;
503
501
502
+ if callee_attrs. instruction_set != self . codegen_fn_attrs . instruction_set {
503
+ for blk in callee_body. basic_blocks . iter ( ) {
504
+ if let TerminatorKind :: InlineAsm { .. } = blk. terminator ( ) . kind {
505
+ // During the attribute checking stage we allow a callee with no
506
+ // instruction_set assigned to count as compatible with a function that does
507
+ // assign one. However, during this stage we require an exact match when any
508
+ // inline-asm is detected. LLVM will still possibly do an inline later on
509
+ // if the no-attribute function ends up with the same instruction set anyway.
510
+ return Err ( "Cannot move inline-asm across instruction sets" ) ;
511
+ }
512
+ }
513
+ }
514
+
504
515
// FIXME: Give a bonus to functions with only a single caller
505
516
506
517
let mut checker =
507
518
CostChecker :: new ( self . tcx , self . param_env , Some ( callsite. callee ) , callee_body) ;
508
519
509
- // Traverse the MIR manually so we can account for the effects of inlining on the CFG.
510
- let mut work_list = vec ! [ START_BLOCK ] ;
511
- let mut visited = BitSet :: new_empty ( callee_body. basic_blocks . len ( ) ) ;
512
- while let Some ( bb) = work_list. pop ( ) {
513
- if !visited. insert ( bb. index ( ) ) {
514
- continue ;
515
- }
516
-
517
- let blk = & callee_body. basic_blocks [ bb] ;
518
- checker. visit_basic_block_data ( bb, blk) ;
519
-
520
- let term = blk. terminator ( ) ;
521
- if let TerminatorKind :: Drop { ref place, target, unwind, replace : _ } = term. kind {
522
- work_list. push ( target) ;
523
-
524
- // If the place doesn't actually need dropping, treat it like a regular goto.
525
- let ty = callsite. callee . instantiate_mir (
526
- self . tcx ,
527
- ty:: EarlyBinder :: bind ( & place. ty ( callee_body, tcx) . ty ) ,
528
- ) ;
529
- if ty. needs_drop ( tcx, self . param_env )
530
- && let UnwindAction :: Cleanup ( unwind) = unwind
531
- {
532
- work_list. push ( unwind) ;
533
- }
534
- } else if callee_attrs. instruction_set != self . codegen_fn_attrs . instruction_set
535
- && matches ! ( term. kind, TerminatorKind :: InlineAsm { .. } )
536
- {
537
- // During the attribute checking stage we allow a callee with no
538
- // instruction_set assigned to count as compatible with a function that does
539
- // assign one. However, during this stage we require an exact match when any
540
- // inline-asm is detected. LLVM will still possibly do an inline later on
541
- // if the no-attribute function ends up with the same instruction set anyway.
542
- return Err ( "Cannot move inline-asm across instruction sets" ) ;
543
- } else {
544
- work_list. extend ( term. successors ( ) )
545
- }
546
- }
520
+ checker. visit_body ( callee_body) ;
547
521
548
522
// N.B. We still apply our cost threshold to #[inline(always)] functions.
549
523
// That attribute is often applied to very large functions that exceed LLVM's (very
0 commit comments