@@ -72,7 +72,7 @@ const int kOutPipeFd = kMaxFd - 2; // remapped from stdout
72
72
const int kCoverFd = kOutPipeFd - kMaxThreads ;
73
73
const int kExtraCoverFd = kCoverFd - 1 ;
74
74
const int kMaxArgs = 9 ;
75
- const int kCoverSize = 512 << 10 ;
75
+ const int kCoverSize = 8 << 10 ;
76
76
const int kFailStatus = 67 ;
77
77
78
78
// Two approaches of dealing with kcov memory.
@@ -324,6 +324,7 @@ const uint64 no_copyout = -1;
324
324
static int running;
325
325
static uint32 completed;
326
326
static bool is_kernel_64_bit;
327
+ static bool is_uniq_mode = true ;
327
328
static bool use_cover_edges;
328
329
329
330
static uint8* input_data;
@@ -347,9 +348,12 @@ struct call_t {
347
348
struct cover_t {
348
349
int fd;
349
350
uint32 size;
351
+ uint32 size_edge;
350
352
uint32 mmap_alloc_size;
351
353
char * data;
352
354
char * data_end;
355
+ char * data_edge;
356
+ char * data_edge_end;
353
357
// Currently collecting comparisons.
354
358
bool collect_comps;
355
359
// Note: On everything but darwin the first value in data is the count of
@@ -367,6 +371,8 @@ struct cover_t {
367
371
intptr_t pc_offset;
368
372
// The coverage buffer has overflowed and we have truncated coverage.
369
373
bool overflow;
374
+ // kcov mode.
375
+ unsigned int mode;
370
376
};
371
377
372
378
struct thread_t {
@@ -1157,36 +1163,51 @@ template <typename cover_data_t>
1157
1163
uint32 write_signal (flatbuffers::FlatBufferBuilder& fbb, int index, cover_t * cov, bool all)
1158
1164
{
1159
1165
// Write out feedback signals.
1160
- // Currently it is code edges computed as xor of two subsequent basic block PCs.
1161
1166
fbb.StartVector (0 , sizeof (uint64));
1162
- cover_data_t * cover_data = (cover_data_t *)(cov->data + cov->data_offset );
1163
- if ((char *)(cover_data + cov->size ) > cov->data_end )
1164
- failmsg (" too much cover" , " cov=%u" , cov->size );
1165
- uint32 nsig = 0 ;
1166
- cover_data_t prev_pc = 0 ;
1167
- bool prev_filter = true ;
1168
- for (uint32 i = 0 ; i < cov->size ; i++) {
1169
- cover_data_t pc = cover_data[i] + cov->pc_offset ;
1170
- uint64 sig = pc;
1171
- if (use_cover_edges) {
1172
- // Only hash the lower 12 bits so the hash is independent of any module offsets.
1173
- const uint64 mask = (1 << 12 ) - 1 ;
1174
- sig ^= hash (prev_pc & mask) & mask;
1167
+ if (is_uniq_mode) {
1168
+ uint32 nsig = 0 ;
1169
+ cover_data_t * cover_data = (cover_data_t *)(cov->data_edge + cov->data_offset );
1170
+ if ((char *)(cover_data + cov->size_edge ) > cov->data_edge_end )
1171
+ failmsg (" too much cover" , " cov=%u" , cov->size_edge );
1172
+ for (uint32 i = 0 ; i < cov->size_edge ; i++) {
1173
+ cover_data_t sig = cover_data[i] + cov->pc_offset ;
1174
+ if (!all && max_signal && max_signal->Contains (sig))
1175
+ continue ;
1176
+ fbb.PushElement (uint64 (sig));
1177
+ nsig++;
1175
1178
}
1176
- bool filter = coverage_filter (pc);
1177
- // Ignore the edge only if both current and previous PCs are filtered out
1178
- // to capture all incoming and outcoming edges into the interesting code.
1179
- bool ignore = !filter && !prev_filter;
1180
- prev_pc = pc;
1181
- prev_filter = filter;
1182
- if (ignore || dedup (index , sig))
1183
- continue ;
1184
- if (!all && max_signal && max_signal->Contains (sig))
1185
- continue ;
1186
- fbb.PushElement (uint64 (sig));
1187
- nsig++;
1179
+ return fbb.EndVector (nsig);
1180
+ } else {
1181
+ // It is code edges computed as xor of two subsequent basic block PCs.
1182
+ cover_data_t * cover_data = (cover_data_t *)(cov->data + cov->data_offset );
1183
+ if ((char *)(cover_data + cov->size ) > cov->data_end )
1184
+ failmsg (" too much cover" , " cov=%u" , cov->size );
1185
+ uint32 nsig = 0 ;
1186
+ cover_data_t prev_pc = 0 ;
1187
+ bool prev_filter = true ;
1188
+ for (uint32 i = 0 ; i < cov->size ; i++) {
1189
+ cover_data_t pc = cover_data[i] + cov->pc_offset ;
1190
+ uint64 sig = pc;
1191
+ if (use_cover_edges) {
1192
+ // Only hash the lower 12 bits so the hash is independent of any module offsets.
1193
+ const uint64 mask = (1 << 12 ) - 1 ;
1194
+ sig ^= hash (prev_pc & mask) & mask;
1195
+ }
1196
+ bool filter = coverage_filter (pc);
1197
+ // Ignore the edge only if both current and previous PCs are filtered out
1198
+ // to capture all incoming and outcoming edges into the interesting code.
1199
+ bool ignore = !filter && !prev_filter;
1200
+ prev_pc = pc;
1201
+ prev_filter = filter;
1202
+ if (ignore || dedup (index , sig))
1203
+ continue ;
1204
+ if (!all && max_signal && max_signal->Contains (sig))
1205
+ continue ;
1206
+ fbb.PushElement (uint64 (sig));
1207
+ nsig++;
1208
+ }
1209
+ return fbb.EndVector (nsig);
1188
1210
}
1189
- return fbb.EndVector (nsig);
1190
1211
}
1191
1212
1192
1213
template <typename cover_data_t >
@@ -1519,8 +1540,11 @@ void execute_call(thread_t* th)
1519
1540
th->id , current_time_ms () - start_time_ms, call->name , (uint64)th->res );
1520
1541
if (th->res == (intptr_t )-1 )
1521
1542
debug (" errno=%d" , th->reserrno );
1522
- if (flag_coverage)
1543
+ if (flag_coverage) {
1523
1544
debug (" cover=%u" , th->cov .size );
1545
+ if (is_uniq_mode)
1546
+ debug (" edge=%u" , th->cov .size_edge );
1547
+ }
1524
1548
if (th->call_props .fail_nth > 0 )
1525
1549
debug (" fault=%d" , th->fault_injected );
1526
1550
if (th->call_props .rerun > 0 )
0 commit comments