Skip to content

Commit 570d72b

Browse files
committed
all: support linux KCOV_TRACE_UNIQ_[PC|EDGE|CMP] modes
KCOV_TRACE_UNIQ_PC and KCOV_TRACE_UNIQ_EDGE are enabled together, which are used to replace previous KCOV_TRACE_PC mode. KCOV_TRACE_UNIQ_CMP is used to replace KCOV_TRACE_CMP mode. With UNIQ modes, smaller cover buffer is required. Uniq mode for remote cover is not supported yet.
1 parent 7315a7c commit 570d72b

File tree

5 files changed

+91
-36
lines changed

5 files changed

+91
-36
lines changed

executor/common_linux.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
#include <unistd.h>
1010

1111
#if SYZ_EXECUTOR
12-
const int kExtraCoverSize = 1024 << 10;
12+
const int kExtraCoverSize = 8 << 10;
1313
struct cover_t;
1414
static void cover_reset(cover_t* cov);
1515
#endif

executor/executor.cc

Lines changed: 53 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ const int kOutPipeFd = kMaxFd - 2; // remapped from stdout
7272
const int kCoverFd = kOutPipeFd - kMaxThreads;
7373
const int kExtraCoverFd = kCoverFd - 1;
7474
const int kMaxArgs = 9;
75-
const int kCoverSize = 512 << 10;
75+
const int kCoverSize = 8 << 10;
7676
const int kFailStatus = 67;
7777

7878
// Two approaches of dealing with kcov memory.
@@ -324,6 +324,7 @@ const uint64 no_copyout = -1;
324324
static int running;
325325
static uint32 completed;
326326
static bool is_kernel_64_bit;
327+
static bool is_uniq_mode = true;
327328
static bool use_cover_edges;
328329

329330
static uint8* input_data;
@@ -347,9 +348,12 @@ struct call_t {
347348
struct cover_t {
348349
int fd;
349350
uint32 size;
351+
uint32 size_edge;
350352
uint32 mmap_alloc_size;
351353
char* data;
352354
char* data_end;
355+
char* data_edge;
356+
char* data_edge_end;
353357
// Currently collecting comparisons.
354358
bool collect_comps;
355359
// Note: On everything but darwin the first value in data is the count of
@@ -367,6 +371,8 @@ struct cover_t {
367371
intptr_t pc_offset;
368372
// The coverage buffer has overflowed and we have truncated coverage.
369373
bool overflow;
374+
// kcov mode.
375+
unsigned int mode;
370376
};
371377

372378
struct thread_t {
@@ -1157,36 +1163,51 @@ template <typename cover_data_t>
11571163
uint32 write_signal(flatbuffers::FlatBufferBuilder& fbb, int index, cover_t* cov, bool all)
11581164
{
11591165
// Write out feedback signals.
1160-
// Currently it is code edges computed as xor of two subsequent basic block PCs.
11611166
fbb.StartVector(0, sizeof(uint64));
1162-
cover_data_t* cover_data = (cover_data_t*)(cov->data + cov->data_offset);
1163-
if ((char*)(cover_data + cov->size) > cov->data_end)
1164-
failmsg("too much cover", "cov=%u", cov->size);
1165-
uint32 nsig = 0;
1166-
cover_data_t prev_pc = 0;
1167-
bool prev_filter = true;
1168-
for (uint32 i = 0; i < cov->size; i++) {
1169-
cover_data_t pc = cover_data[i] + cov->pc_offset;
1170-
uint64 sig = pc;
1171-
if (use_cover_edges) {
1172-
// Only hash the lower 12 bits so the hash is independent of any module offsets.
1173-
const uint64 mask = (1 << 12) - 1;
1174-
sig ^= hash(prev_pc & mask) & mask;
1167+
if (is_uniq_mode) {
1168+
uint32 nsig = 0;
1169+
cover_data_t* cover_data = (cover_data_t*)(cov->data_edge + cov->data_offset);
1170+
if ((char*)(cover_data + cov->size_edge) > cov->data_edge_end)
1171+
failmsg("too much cover", "cov=%u", cov->size_edge);
1172+
for (uint32 i = 0; i < cov->size_edge; i++) {
1173+
cover_data_t sig = cover_data[i] + cov->pc_offset;
1174+
if (!all && max_signal && max_signal->Contains(sig))
1175+
continue;
1176+
fbb.PushElement(uint64(sig));
1177+
nsig++;
11751178
}
1176-
bool filter = coverage_filter(pc);
1177-
// Ignore the edge only if both current and previous PCs are filtered out
1178-
// to capture all incoming and outcoming edges into the interesting code.
1179-
bool ignore = !filter && !prev_filter;
1180-
prev_pc = pc;
1181-
prev_filter = filter;
1182-
if (ignore || dedup(index, sig))
1183-
continue;
1184-
if (!all && max_signal && max_signal->Contains(sig))
1185-
continue;
1186-
fbb.PushElement(uint64(sig));
1187-
nsig++;
1179+
return fbb.EndVector(nsig);
1180+
} else {
1181+
// It is code edges computed as xor of two subsequent basic block PCs.
1182+
cover_data_t* cover_data = (cover_data_t*)(cov->data + cov->data_offset);
1183+
if ((char*)(cover_data + cov->size) > cov->data_end)
1184+
failmsg("too much cover", "cov=%u", cov->size);
1185+
uint32 nsig = 0;
1186+
cover_data_t prev_pc = 0;
1187+
bool prev_filter = true;
1188+
for (uint32 i = 0; i < cov->size; i++) {
1189+
cover_data_t pc = cover_data[i] + cov->pc_offset;
1190+
uint64 sig = pc;
1191+
if (use_cover_edges) {
1192+
// Only hash the lower 12 bits so the hash is independent of any module offsets.
1193+
const uint64 mask = (1 << 12) - 1;
1194+
sig ^= hash(prev_pc & mask) & mask;
1195+
}
1196+
bool filter = coverage_filter(pc);
1197+
// Ignore the edge only if both current and previous PCs are filtered out
1198+
// to capture all incoming and outcoming edges into the interesting code.
1199+
bool ignore = !filter && !prev_filter;
1200+
prev_pc = pc;
1201+
prev_filter = filter;
1202+
if (ignore || dedup(index, sig))
1203+
continue;
1204+
if (!all && max_signal && max_signal->Contains(sig))
1205+
continue;
1206+
fbb.PushElement(uint64(sig));
1207+
nsig++;
1208+
}
1209+
return fbb.EndVector(nsig);
11881210
}
1189-
return fbb.EndVector(nsig);
11901211
}
11911212

11921213
template <typename cover_data_t>
@@ -1519,8 +1540,11 @@ void execute_call(thread_t* th)
15191540
th->id, current_time_ms() - start_time_ms, call->name, (uint64)th->res);
15201541
if (th->res == (intptr_t)-1)
15211542
debug(" errno=%d", th->reserrno);
1522-
if (flag_coverage)
1543+
if (flag_coverage) {
15231544
debug(" cover=%u", th->cov.size);
1545+
if (is_uniq_mode)
1546+
debug(" edge=%u", th->cov.size_edge);
1547+
}
15241548
if (th->call_props.fail_nth > 0)
15251549
debug(" fault=%d", th->fault_injected);
15261550
if (th->call_props.rerun > 0)

executor/executor_linux.h

Lines changed: 33 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -17,10 +17,13 @@ static bool pkeys_enabled;
1717
// very large buffer b/c there are usually multiple procs, and each of them consumes
1818
// significant amount of memory. In snapshot mode we have only one proc, so we can have
1919
// larger coverage buffer.
20-
const int kSnapshotCoverSize = 1024 << 10;
20+
const int kSnapshotCoverSize = 8 << 10;
2121

2222
const unsigned long KCOV_TRACE_PC = 0;
2323
const unsigned long KCOV_TRACE_CMP = 1;
24+
const unsigned long KCOV_TRACE_UNIQ_PC = 2;
25+
const unsigned long KCOV_TRACE_UNIQ_EDGE = 4;
26+
const unsigned long KCOV_TRACE_UNIQ_CMP = 8;
2427

2528
template <int N>
2629
struct kcov_remote_arg {
@@ -149,21 +152,41 @@ static void cover_mmap(cover_t* cov)
149152
cov->data_end = cov->data + cov->mmap_alloc_size;
150153
cov->data_offset = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t);
151154
cov->pc_offset = 0;
155+
156+
if (is_uniq_mode) {
157+
// Now map edge cover.
158+
unsigned int off = cov->mmap_alloc_size;
159+
mapped = (char*)mmap(NULL, cov->mmap_alloc_size + 2 * SYZ_PAGE_SIZE,
160+
PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, off);
161+
if (mapped == MAP_FAILED)
162+
exitf("failed to preallocate kcov buffer");
163+
cov->data_edge = (char*)mmap(mapped + SYZ_PAGE_SIZE, cov->mmap_alloc_size,
164+
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, cov->fd, off);
165+
if (cov->data_edge == MAP_FAILED)
166+
exitf("cover mmap failed");
167+
if (pkeys_enabled && pkey_mprotect(cov->data_edge, cov->mmap_alloc_size, PROT_READ | PROT_WRITE, RESERVED_PKEY))
168+
exitf("failed to pkey_mprotect kcov buffer");
169+
cov->data_edge_end = cov->data_edge + cov->mmap_alloc_size;
170+
}
152171
}
153172

154173
static void cover_enable(cover_t* cov, bool collect_comps, bool extra)
155174
{
156-
unsigned int kcov_mode = collect_comps ? KCOV_TRACE_CMP : KCOV_TRACE_PC;
175+
cov->mode = collect_comps ? KCOV_TRACE_UNIQ_CMP : KCOV_TRACE_UNIQ_PC | KCOV_TRACE_UNIQ_EDGE;
157176
// The KCOV_ENABLE call should be fatal,
158177
// but in practice ioctl fails with assorted errors (9, 14, 25),
159178
// so we use exitf.
160179
if (!extra) {
161-
if (ioctl(cov->fd, KCOV_ENABLE, kcov_mode))
162-
exitf("cover enable write trace failed, mode=%d", kcov_mode);
180+
if (ioctl(cov->fd, KCOV_ENABLE, cov->mode)) {
181+
is_uniq_mode = false;
182+
cov->mode = collect_comps ? KCOV_TRACE_CMP : KCOV_TRACE_PC;
183+
if (ioctl(cov->fd, KCOV_ENABLE, cov->mode))
184+
exitf("cover enable write trace failed, mode=%d", cov->mode);
185+
}
163186
return;
164187
}
165188
kcov_remote_arg<1> arg = {
166-
.trace_mode = kcov_mode,
189+
.trace_mode = collect_comps ? KCOV_TRACE_CMP : KCOV_TRACE_PC,
167190
// Coverage buffer size of background threads.
168191
.area_size = kExtraCoverSize,
169192
.num_handles = 1,
@@ -186,6 +209,7 @@ static void cover_reset(cover_t* cov)
186209
}
187210
cover_unprotect(cov);
188211
*(uint64*)cov->data = 0;
212+
*(uint64*)cov->data_edge = 0;
189213
cover_protect(cov);
190214
cov->overflow = false;
191215
}
@@ -195,6 +219,10 @@ static void cover_collect_impl(cover_t* cov)
195219
{
196220
cov->size = *(cover_data_t*)cov->data;
197221
cov->overflow = (cov->data + (cov->size + 2) * sizeof(cover_data_t)) > cov->data_end;
222+
if (cov->mode & KCOV_TRACE_UNIQ_EDGE) {
223+
cov->size_edge = *(cover_data_t*)cov->data_edge;
224+
cov->overflow |= (cov->data_edge + (cov->size_edge + 2) * sizeof(cover_data_t)) > cov->data_edge_end;
225+
}
198226
}
199227

200228
static void cover_collect(cover_t* cov)

sys/linux/sys.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1400,7 +1400,7 @@ _ = ADJ_OFFSET, ADJ_FREQUENCY, ADJ_MAXERROR, ADJ_ESTERROR, ADJ_STATUS, ADJ_TIMEC
14001400
_ = SMB_PATH_MAX, XT_CGROUP_PATH_MAX, XENSTORE_REL_PATH_MAX
14011401

14021402
# misc
1403-
_ = KCOV_INIT_TRACE, KCOV_ENABLE, KCOV_DISABLE, KCOV_TRACE_PC, KCOV_TRACE_CMP, PTRACE_TRACEME, SYSLOG_ACTION_CONSOLE_ON, SYSLOG_ACTION_CONSOLE_OFF, SYSLOG_ACTION_CONSOLE_LEVEL, SYSLOG_ACTION_CLEAR, __NR_mmap2
1403+
_ = KCOV_INIT_TRACE, KCOV_ENABLE, KCOV_DISABLE, KCOV_TRACE_PC, KCOV_TRACE_CMP, KCOV_TRACE_UNIQ_PC, KCOV_TRACE_UNIQ_EDGE, KCOV_TRACE_UNIQ_CMP, PTRACE_TRACEME, SYSLOG_ACTION_CONSOLE_ON, SYSLOG_ACTION_CONSOLE_OFF, SYSLOG_ACTION_CONSOLE_LEVEL, SYSLOG_ACTION_CLEAR, __NR_mmap2
14041404

14051405
# Hardcode KCOV_REMOTE_ENABLE value for amd64 until new kcov patches reach mainline.
14061406
define KCOV_REMOTE_ENABLE 1075340134

sys/linux/sys.txt.const

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -216,6 +216,9 @@ KCOV_INIT_TRACE = 2148033281, 386:arm:2147771137, mips64le:ppc64le:1074291457
216216
KCOV_REMOTE_ENABLE = 1075340134, mips64le:ppc64le:2149081958
217217
KCOV_TRACE_CMP = 1
218218
KCOV_TRACE_PC = 0
219+
KCOV_TRACE_UNIQ_PC = 2
220+
KCOV_TRACE_UNIQ_EDGE = 4
221+
KCOV_TRACE_UNIQ_CMP = 8
219222
KEXEC_ARCH_386 = 196608
220223
KEXEC_ARCH_ARM = 2621440
221224
KEXEC_ARCH_DEFAULT = 0

0 commit comments

Comments
 (0)