@@ -941,6 +941,8 @@ static void perf_ibs_get_mem_lock(union ibs_op_data3 *op_data3,
941
941
data_src -> mem_lock = PERF_MEM_LOCK_LOCKED ;
942
942
}
943
943
944
+ /* Be careful. Works only for contiguous MSRs. */
945
+ #define ibs_fetch_msr_idx (msr ) (msr - MSR_AMD64_IBSFETCHCTL)
944
946
#define ibs_op_msr_idx (msr ) (msr - MSR_AMD64_IBSOPCTL)
945
947
946
948
static void perf_ibs_get_data_src (struct perf_ibs_data * ibs_data ,
@@ -1036,6 +1038,67 @@ static int perf_ibs_get_offset_max(struct perf_ibs *perf_ibs, u64 sample_type,
1036
1038
return 1 ;
1037
1039
}
1038
1040
1041
+ static bool perf_ibs_is_kernel_data_addr (struct perf_event * event ,
1042
+ struct perf_ibs_data * ibs_data )
1043
+ {
1044
+ u64 sample_type_mask = PERF_SAMPLE_ADDR | PERF_SAMPLE_RAW ;
1045
+ union ibs_op_data3 op_data3 ;
1046
+ u64 dc_lin_addr ;
1047
+
1048
+ op_data3 .val = ibs_data -> regs [ibs_op_msr_idx (MSR_AMD64_IBSOPDATA3 )];
1049
+ dc_lin_addr = ibs_data -> regs [ibs_op_msr_idx (MSR_AMD64_IBSDCLINAD )];
1050
+
1051
+ return unlikely ((event -> attr .sample_type & sample_type_mask ) &&
1052
+ op_data3 .dc_lin_addr_valid && kernel_ip (dc_lin_addr ));
1053
+ }
1054
+
1055
+ static bool perf_ibs_is_kernel_br_target (struct perf_event * event ,
1056
+ struct perf_ibs_data * ibs_data ,
1057
+ int br_target_idx )
1058
+ {
1059
+ union ibs_op_data op_data ;
1060
+ u64 br_target ;
1061
+
1062
+ op_data .val = ibs_data -> regs [ibs_op_msr_idx (MSR_AMD64_IBSOPDATA )];
1063
+ br_target = ibs_data -> regs [br_target_idx ];
1064
+
1065
+ return unlikely ((event -> attr .sample_type & PERF_SAMPLE_RAW ) &&
1066
+ op_data .op_brn_ret && kernel_ip (br_target ));
1067
+ }
1068
+
1069
+ static bool perf_ibs_swfilt_discard (struct perf_ibs * perf_ibs , struct perf_event * event ,
1070
+ struct pt_regs * regs , struct perf_ibs_data * ibs_data ,
1071
+ int br_target_idx )
1072
+ {
1073
+ if (perf_exclude_event (event , regs ))
1074
+ return true;
1075
+
1076
+ if (perf_ibs != & perf_ibs_op || !event -> attr .exclude_kernel )
1077
+ return false;
1078
+
1079
+ if (perf_ibs_is_kernel_data_addr (event , ibs_data ))
1080
+ return true;
1081
+
1082
+ if (br_target_idx != -1 &&
1083
+ perf_ibs_is_kernel_br_target (event , ibs_data , br_target_idx ))
1084
+ return true;
1085
+
1086
+ return false;
1087
+ }
1088
+
1089
+ static void perf_ibs_phyaddr_clear (struct perf_ibs * perf_ibs ,
1090
+ struct perf_ibs_data * ibs_data )
1091
+ {
1092
+ if (perf_ibs == & perf_ibs_op ) {
1093
+ ibs_data -> regs [ibs_op_msr_idx (MSR_AMD64_IBSOPDATA3 )] &= ~(1ULL << 18 );
1094
+ ibs_data -> regs [ibs_op_msr_idx (MSR_AMD64_IBSDCPHYSAD )] = 0 ;
1095
+ return ;
1096
+ }
1097
+
1098
+ ibs_data -> regs [ibs_fetch_msr_idx (MSR_AMD64_IBSFETCHCTL )] &= ~(1ULL << 52 );
1099
+ ibs_data -> regs [ibs_fetch_msr_idx (MSR_AMD64_IBSFETCHPHYSAD )] = 0 ;
1100
+ }
1101
+
1039
1102
static int perf_ibs_handle_irq (struct perf_ibs * perf_ibs , struct pt_regs * iregs )
1040
1103
{
1041
1104
struct cpu_perf_ibs * pcpu = this_cpu_ptr (perf_ibs -> pcpu );
@@ -1048,6 +1111,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
1048
1111
int offset , size , check_rip , offset_max , throttle = 0 ;
1049
1112
unsigned int msr ;
1050
1113
u64 * buf , * config , period , new_config = 0 ;
1114
+ int br_target_idx = -1 ;
1051
1115
1052
1116
if (!test_bit (IBS_STARTED , pcpu -> state )) {
1053
1117
fail :
@@ -1102,6 +1166,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
1102
1166
if (perf_ibs == & perf_ibs_op ) {
1103
1167
if (ibs_caps & IBS_CAPS_BRNTRGT ) {
1104
1168
rdmsrl (MSR_AMD64_IBSBRTARGET , * buf ++ );
1169
+ br_target_idx = size ;
1105
1170
size ++ ;
1106
1171
}
1107
1172
if (ibs_caps & IBS_CAPS_OPDATA4 ) {
@@ -1128,16 +1193,20 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
1128
1193
regs .flags |= PERF_EFLAGS_EXACT ;
1129
1194
}
1130
1195
1131
- if (perf_ibs == & perf_ibs_op )
1132
- perf_ibs_parse_ld_st_data (event -> attr .sample_type , & ibs_data , & data );
1133
-
1134
1196
if ((event -> attr .config2 & IBS_SW_FILTER_MASK ) &&
1135
- (perf_exclude_event (event , & regs ) ||
1136
- ((data .sample_flags & PERF_SAMPLE_ADDR ) &&
1137
- event -> attr .exclude_kernel && kernel_ip (data .addr )))) {
1197
+ perf_ibs_swfilt_discard (perf_ibs , event , & regs , & ibs_data , br_target_idx )) {
1138
1198
throttle = perf_event_account_interrupt (event );
1139
1199
goto out ;
1140
1200
}
1201
+ /*
1202
+ * Prevent leaking physical addresses to unprivileged users. Skip
1203
+ * PERF_SAMPLE_PHYS_ADDR check since generic code prevents it for
1204
+ * unprivileged users.
1205
+ */
1206
+ if ((event -> attr .sample_type & PERF_SAMPLE_RAW ) &&
1207
+ perf_allow_kernel (& event -> attr )) {
1208
+ perf_ibs_phyaddr_clear (perf_ibs , & ibs_data );
1209
+ }
1141
1210
1142
1211
if (event -> attr .sample_type & PERF_SAMPLE_RAW ) {
1143
1212
raw = (struct perf_raw_record ){
@@ -1149,6 +1218,9 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
1149
1218
perf_sample_save_raw_data (& data , event , & raw );
1150
1219
}
1151
1220
1221
+ if (perf_ibs == & perf_ibs_op )
1222
+ perf_ibs_parse_ld_st_data (event -> attr .sample_type , & ibs_data , & data );
1223
+
1152
1224
/*
1153
1225
* rip recorded by IbsOpRip will not be consistent with rsp and rbp
1154
1226
* recorded as part of interrupt regs. Thus we need to use rip from
0 commit comments