@@ -893,37 +893,40 @@ void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
893
893
cpuc -> lbr_stack .hw_idx = tos ;
894
894
}
895
895
896
+ static DEFINE_STATIC_KEY_FALSE (x86_lbr_mispred );
897
+ static DEFINE_STATIC_KEY_FALSE (x86_lbr_cycles );
898
+ static DEFINE_STATIC_KEY_FALSE (x86_lbr_type );
899
+
896
900
static __always_inline int get_lbr_br_type (u64 info )
897
901
{
898
- if (!static_cpu_has (X86_FEATURE_ARCH_LBR ) || !x86_pmu .lbr_br_type )
899
- return 0 ;
902
+ int type = 0 ;
900
903
901
- return (info & LBR_INFO_BR_TYPE ) >> LBR_INFO_BR_TYPE_OFFSET ;
904
+ if (static_branch_likely (& x86_lbr_type ))
905
+ type = (info & LBR_INFO_BR_TYPE ) >> LBR_INFO_BR_TYPE_OFFSET ;
906
+
907
+ return type ;
902
908
}
903
909
904
910
static __always_inline bool get_lbr_mispred (u64 info )
905
911
{
906
- if (static_cpu_has (X86_FEATURE_ARCH_LBR ) && !x86_pmu .lbr_mispred )
907
- return 0 ;
912
+ bool mispred = 0 ;
908
913
909
- return !!(info & LBR_INFO_MISPRED );
910
- }
911
-
912
- static __always_inline bool get_lbr_predicted (u64 info )
913
- {
914
- if (static_cpu_has (X86_FEATURE_ARCH_LBR ) && !x86_pmu .lbr_mispred )
915
- return 0 ;
914
+ if (static_branch_likely (& x86_lbr_mispred ))
915
+ mispred = !!(info & LBR_INFO_MISPRED );
916
916
917
- return !( info & LBR_INFO_MISPRED ) ;
917
+ return mispred ;
918
918
}
919
919
920
920
static __always_inline u16 get_lbr_cycles (u64 info )
921
921
{
922
+ u16 cycles = info & LBR_INFO_CYCLES ;
923
+
922
924
if (static_cpu_has (X86_FEATURE_ARCH_LBR ) &&
923
- !(x86_pmu .lbr_timed_lbr && info & LBR_INFO_CYC_CNT_VALID ))
924
- return 0 ;
925
+ (!static_branch_likely (& x86_lbr_cycles ) ||
926
+ !(info & LBR_INFO_CYC_CNT_VALID )))
927
+ cycles = 0 ;
925
928
926
- return info & LBR_INFO_CYCLES ;
929
+ return cycles ;
927
930
}
928
931
929
932
static void intel_pmu_store_lbr (struct cpu_hw_events * cpuc ,
@@ -951,7 +954,7 @@ static void intel_pmu_store_lbr(struct cpu_hw_events *cpuc,
951
954
e -> from = from ;
952
955
e -> to = to ;
953
956
e -> mispred = get_lbr_mispred (info );
954
- e -> predicted = get_lbr_predicted ( info ) ;
957
+ e -> predicted = ! e -> mispred ;
955
958
e -> in_tx = !!(info & LBR_INFO_IN_TX );
956
959
e -> abort = !!(info & LBR_INFO_ABORT );
957
960
e -> cycles = get_lbr_cycles (info );
@@ -1718,6 +1721,14 @@ void intel_pmu_lbr_init(void)
1718
1721
x86_pmu .lbr_to_cycles = 1 ;
1719
1722
break ;
1720
1723
}
1724
+
1725
+ if (x86_pmu .lbr_has_info ) {
1726
+ /*
1727
+ * Only used in combination with baseline pebs.
1728
+ */
1729
+ static_branch_enable (& x86_lbr_mispred );
1730
+ static_branch_enable (& x86_lbr_cycles );
1731
+ }
1721
1732
}
1722
1733
1723
1734
/*
@@ -1779,6 +1790,12 @@ void __init intel_pmu_arch_lbr_init(void)
1779
1790
x86_pmu .lbr_br_type = ecx .split .lbr_br_type ;
1780
1791
x86_pmu .lbr_nr = lbr_nr ;
1781
1792
1793
+ if (x86_pmu .lbr_mispred )
1794
+ static_branch_enable (& x86_lbr_mispred );
1795
+ if (x86_pmu .lbr_timed_lbr )
1796
+ static_branch_enable (& x86_lbr_cycles );
1797
+ if (x86_pmu .lbr_br_type )
1798
+ static_branch_enable (& x86_lbr_type );
1782
1799
1783
1800
arch_lbr_xsave = is_arch_lbr_xsave_available ();
1784
1801
if (arch_lbr_xsave ) {
0 commit comments