Skip to content

Commit 25a61fe

Browse files
executor: refactor struct cover_t
Instead of having a single `void *data` pointer that is used to both allocate the memory and access the trace, split it into: - `void *alloc` of size `mmap_alloc_size`, used by mmap/munmap to manage the memory for kcov (may contain other data apart from the trace) - `void *trace` of size `trace_size`, used to access the collected trace Future patches will introduce other collection modes that may relocate the trace, so `alloc` and `trace` won't be interchangeable anymore. Also rename `trace_offset` to `trace_skip`. From now on, `XXX_offset` is the offset of `XXX` from the beginning of `alloc`, and `trace_skip` is the number of bytes to skip from the beginning of the trace to get to the actual PCs. I tried my best to not break Darwin and BSD, but I didn't test them.
1 parent 0808a66 commit 25a61fe

File tree

6 files changed

+86
-65
lines changed

6 files changed

+86
-65
lines changed

executor/executor.cc

Lines changed: 23 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -348,18 +348,25 @@ struct cover_t {
348348
int fd;
349349
uint32 size;
350350
uint32 mmap_alloc_size;
351-
char* data;
352-
char* data_end;
351+
// Memory buffer shared with kcov.
352+
void* alloc;
353+
// Buffer used by kcov for collecting PCs.
354+
char* trace;
355+
uint32 trace_size;
356+
char* trace_end;
357+
// Offset of `trace` from the beginning of `alloc`. Should only be accessed
358+
// by OS-specific code.
359+
intptr_t trace_offset;
353360
// Currently collecting comparisons.
354361
bool collect_comps;
355-
// Note: On everything but darwin the first value in data is the count of
356-
// recorded PCs, followed by the PCs. We therefore set data_offset to the
362+
// Note: On everything but darwin the first value in trace is the count of
363+
// recorded PCs, followed by the PCs. We therefore set trace_skip to the
357364
// size of one PC.
358-
// On darwin data points to an instance of the ksancov_trace struct. Here we
359-
// set data_offset to the offset between data and the structs 'pcs' member,
365+
// On darwin trace points to an instance of the ksancov_trace struct. Here we
366+
// set trace_skip to the offset between trace and the structs 'pcs' member,
360367
// which contains the PCs.
361-
intptr_t data_offset;
362-
// Note: On everything but darwin this is 0, as the PCs contained in data
368+
intptr_t trace_skip;
369+
// Note: On everything but darwin this is 0, as the PCs contained in trace
363370
// are already correct. XNUs KSANCOV API, however, chose to always squeeze
364371
// PCs into 32 bit. To make the recorded PC fit, KSANCOV substracts a fixed
365372
// offset (VM_MIN_KERNEL_ADDRESS for AMD64) and then truncates the result to
@@ -1170,8 +1177,8 @@ uint32 write_signal(flatbuffers::FlatBufferBuilder& fbb, int index, cover_t* cov
11701177
// Write out feedback signals.
11711178
// Currently it is code edges computed as xor of two subsequent basic block PCs.
11721179
fbb.StartVector(0, sizeof(uint64));
1173-
cover_data_t* cover_data = (cover_data_t*)(cov->data + cov->data_offset);
1174-
if ((char*)(cover_data + cov->size) > cov->data_end)
1180+
cover_data_t* cover_data = (cover_data_t*)(cov->trace + cov->trace_skip);
1181+
if ((char*)(cover_data + cov->size) > cov->trace_end)
11751182
failmsg("too much cover", "cov=%u", cov->size);
11761183
uint32 nsig = 0;
11771184
cover_data_t prev_pc = 0;
@@ -1204,7 +1211,7 @@ template <typename cover_data_t>
12041211
uint32 write_cover(flatbuffers::FlatBufferBuilder& fbb, cover_t* cov)
12051212
{
12061213
uint32 cover_size = cov->size;
1207-
cover_data_t* cover_data = (cover_data_t*)(cov->data + cov->data_offset);
1214+
cover_data_t* cover_data = (cover_data_t*)(cov->trace + cov->trace_skip);
12081215
if (flag_dedup_cover) {
12091216
cover_data_t* end = cover_data + cover_size;
12101217
std::sort(cover_data, end);
@@ -1220,11 +1227,11 @@ uint32 write_cover(flatbuffers::FlatBufferBuilder& fbb, cover_t* cov)
12201227
uint32 write_comparisons(flatbuffers::FlatBufferBuilder& fbb, cover_t* cov)
12211228
{
12221229
// Collect only the comparisons
1223-
uint64 ncomps = *(uint64_t*)cov->data;
1224-
kcov_comparison_t* cov_start = (kcov_comparison_t*)(cov->data + sizeof(uint64));
1225-
if ((char*)(cov_start + ncomps) > cov->data_end)
1230+
uint64 ncomps = *(uint64_t*)cov->trace;
1231+
kcov_comparison_t* cov_start = (kcov_comparison_t*)(cov->trace + sizeof(uint64));
1232+
if ((char*)(cov_start + ncomps) > cov->trace_end)
12261233
failmsg("too many comparisons", "ncomps=%llu", ncomps);
1227-
cov->overflow = ((char*)(cov_start + ncomps + 1) > cov->data_end);
1234+
cov->overflow = ((char*)(cov_start + ncomps + 1) > cov->trace_end);
12281235
rpc::ComparisonRaw* start = (rpc::ComparisonRaw*)cov_start;
12291236
rpc::ComparisonRaw* end = start;
12301237
// We will convert kcov_comparison_t to ComparisonRaw inplace.
@@ -1456,7 +1463,7 @@ void thread_create(thread_t* th, int id, bool need_coverage)
14561463

14571464
void thread_mmap_cover(thread_t* th)
14581465
{
1459-
if (th->cov.data != NULL)
1466+
if (th->cov.alloc != NULL)
14601467
return;
14611468
cover_mmap(&th->cov);
14621469
cover_protect(&th->cov);

executor/executor_bsd.h

Lines changed: 13 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -106,15 +106,18 @@ static void cover_open(cover_t* cov, bool extra)
106106

107107
static void cover_mmap(cover_t* cov)
108108
{
109-
if (cov->data != NULL)
109+
if (cov->alloc != NULL)
110110
fail("cover_mmap invoked on an already mmapped cover_t object");
111111
void* mmap_ptr = mmap(NULL, cov->mmap_alloc_size, PROT_READ | PROT_WRITE,
112112
MAP_SHARED, cov->fd, 0);
113113
if (mmap_ptr == MAP_FAILED)
114114
fail("cover mmap failed");
115-
cov->data = (char*)mmap_ptr;
116-
cov->data_end = cov->data + cov->mmap_alloc_size;
117-
cov->data_offset = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t);
115+
cov->alloc = mmap_ptr;
116+
cov->trace = (char*)cov->alloc;
117+
cov->trace_size = cov->mmap_alloc_size;
118+
cov->trace_end = cov->trace + cov->trace_size;
119+
cov->trace_offset = 0;
120+
cov->trace_skip = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t);
118121
cov->pc_offset = 0;
119122
}
120123

@@ -124,7 +127,7 @@ static void cover_protect(cover_t* cov)
124127
size_t mmap_alloc_size = kCoverSize * KCOV_ENTRY_SIZE;
125128
long page_size = sysconf(_SC_PAGESIZE);
126129
if (page_size > 0)
127-
mprotect(cov->data + page_size, mmap_alloc_size - page_size,
130+
mprotect(cov->alloc + page_size, mmap_alloc_size - page_size,
128131
PROT_READ);
129132
#elif GOOS_openbsd
130133
int mib[2], page_size;
@@ -133,18 +136,18 @@ static void cover_protect(cover_t* cov)
133136
mib[1] = HW_PAGESIZE;
134137
size_t len = sizeof(page_size);
135138
if (sysctl(mib, ARRAY_SIZE(mib), &page_size, &len, NULL, 0) != -1)
136-
mprotect(cov->data + page_size, mmap_alloc_size - page_size, PROT_READ);
139+
mprotect(cov->alloc + page_size, mmap_alloc_size - page_size, PROT_READ);
137140
#endif
138141
}
139142

140143
static void cover_unprotect(cover_t* cov)
141144
{
142145
#if GOOS_freebsd
143146
size_t mmap_alloc_size = kCoverSize * KCOV_ENTRY_SIZE;
144-
mprotect(cov->data, mmap_alloc_size, PROT_READ | PROT_WRITE);
147+
mprotect(cov->alloc, mmap_alloc_size, PROT_READ | PROT_WRITE);
145148
#elif GOOS_openbsd
146149
size_t mmap_alloc_size = kCoverSize * sizeof(uintptr_t);
147-
mprotect(cov->data, mmap_alloc_size, PROT_READ | PROT_WRITE);
150+
mprotect(cov->alloc, mmap_alloc_size, PROT_READ | PROT_WRITE);
148151
#endif
149152
}
150153

@@ -171,12 +174,12 @@ static void cover_enable(cover_t* cov, bool collect_comps, bool extra)
171174

172175
static void cover_reset(cover_t* cov)
173176
{
174-
*(uint64*)cov->data = 0;
177+
*(uint64*)cov->trace = 0;
175178
}
176179

177180
static void cover_collect(cover_t* cov)
178181
{
179-
cov->size = *(uint64*)cov->data;
182+
cov->size = *(uint64*)cov->trace;
180183
}
181184

182185
#if GOOS_netbsd

executor/executor_darwin.h

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ static void cover_open(cover_t* cov, bool extra)
7373

7474
static void cover_mmap(cover_t* cov)
7575
{
76-
if (cov->data != NULL)
76+
if (cov->alloc != NULL)
7777
fail("cover_mmap invoked on an already mmapped cover_t object");
7878
uintptr_t mmap_ptr = 0;
7979
if (ksancov_map(cov->fd, &mmap_ptr, &cov->mmap_alloc_size))
@@ -84,8 +84,10 @@ static void cover_mmap(cover_t* cov)
8484
if (cov->mmap_alloc_size > kCoverSize)
8585
fail("mmap allocation size larger than anticipated");
8686

87-
cov->data = (char*)mmap_ptr;
88-
cov->data_end = cov->data + cov->mmap_alloc_size;
87+
cov->alloc = mmap_ptr;
88+
cov->trace_size = cov->mmap_alloc_size;
89+
cov->trace = (char*)cov->alloc;
90+
cov->trace_end = cov->trace + cov->trace_size;
8991
}
9092

9193
static void cover_protect(cover_t* cov)
@@ -110,14 +112,15 @@ static void cover_enable(cover_t* cov, bool collect_comps, bool extra)
110112

111113
static void cover_reset(cover_t* cov)
112114
{
113-
ksancov_reset((struct ksancov_header*)cov->data);
114-
ksancov_start((struct ksancov_header*)cov->data);
115+
ksancov_reset((struct ksancov_header*)cov->trace);
116+
ksancov_start((struct ksancov_header*)cov->trace);
115117
}
116118

117119
static void cover_collect(cover_t* cov)
118120
{
119-
struct ksancov_trace* trace = (struct ksancov_trace*)cov->data;
121+
struct ksancov_trace* trace = (struct ksancov_trace*)cov->trace;
120122
cov->size = ksancov_trace_head(trace);
121-
cov->data_offset = ((int64_t) & (trace->pcs)) - ((int64_t)(cov->data));
123+
cov->trace_offset = 0;
124+
cov->trace_skip = ((int64_t) & (trace->pcs)) - ((int64_t)(cov->trace));
122125
cov->pc_offset = trace->offset;
123126
}

executor/executor_linux.h

Lines changed: 18 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@ static void cover_unprotect(cover_t* cov)
130130

131131
static void cover_mmap(cover_t* cov)
132132
{
133-
if (cov->data != NULL)
133+
if (cov->alloc != NULL)
134134
fail("cover_mmap invoked on an already mmapped cover_t object");
135135
if (cov->mmap_alloc_size == 0)
136136
fail("cover_t structure is corrupted");
@@ -140,14 +140,18 @@ static void cover_mmap(cover_t* cov)
140140
if (mapped == MAP_FAILED)
141141
exitf("failed to preallocate kcov buffer");
142142
// Now map the kcov buffer to the file, overwriting the existing mapping above.
143-
cov->data = (char*)mmap(mapped + SYZ_PAGE_SIZE, cov->mmap_alloc_size,
144-
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, cov->fd, 0);
145-
if (cov->data == MAP_FAILED)
143+
cov->alloc = (char*)mmap(mapped + SYZ_PAGE_SIZE, cov->mmap_alloc_size,
144+
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, cov->fd, 0);
145+
if (cov->alloc == MAP_FAILED)
146146
exitf("cover mmap failed");
147-
if (pkeys_enabled && pkey_mprotect(cov->data, cov->mmap_alloc_size, PROT_READ | PROT_WRITE, RESERVED_PKEY))
147+
if (pkeys_enabled && pkey_mprotect(cov->alloc, cov->mmap_alloc_size, PROT_READ | PROT_WRITE, RESERVED_PKEY))
148148
exitf("failed to pkey_mprotect kcov buffer");
149-
cov->data_end = cov->data + cov->mmap_alloc_size;
150-
cov->data_offset = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t);
149+
cov->trace_offset = 0;
150+
cov->trace = (char*)cov->alloc;
151+
// TODO(glider): trace size and may be different from mmap_alloc_size in the future.
152+
cov->trace_size = cov->mmap_alloc_size;
153+
cov->trace_end = cov->trace + cov->mmap_alloc_size;
154+
cov->trace_skip = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t);
151155
cov->pc_offset = 0;
152156
}
153157

@@ -185,16 +189,16 @@ static void cover_reset(cover_t* cov)
185189
cov = &current_thread->cov;
186190
}
187191
cover_unprotect(cov);
188-
*(uint64*)cov->data = 0;
192+
*(uint64*)cov->trace = 0;
189193
cover_protect(cov);
190194
cov->overflow = false;
191195
}
192196

193197
template <typename cover_data_t>
194198
static void cover_collect_impl(cover_t* cov)
195199
{
196-
cov->size = *(cover_data_t*)cov->data;
197-
cov->overflow = (cov->data + (cov->size + 2) * sizeof(cover_data_t)) > cov->data_end;
200+
cov->size = *(cover_data_t*)cov->trace;
201+
cov->overflow = (cov->trace + (cov->size + 2) * sizeof(cover_data_t)) > cov->trace_end;
198202
}
199203

200204
static void cover_collect(cover_t* cov)
@@ -285,8 +289,8 @@ static const char* setup_delay_kcov()
285289
cov.fd = kCoverFd;
286290
cover_open(&cov, false);
287291
cover_mmap(&cov);
288-
char* first = cov.data;
289-
cov.data = nullptr;
292+
char* first = (char*)cov.alloc;
293+
cov.alloc = nullptr;
290294
cover_mmap(&cov);
291295
// If delayed kcov mmap is not supported by the kernel,
292296
// accesses to the second mapping will crash.
@@ -298,7 +302,8 @@ static const char* setup_delay_kcov()
298302
fail("clock_gettime failed");
299303
error = "kernel commit b3d7fe86fbd0 is not present";
300304
} else {
301-
munmap(cov.data - SYZ_PAGE_SIZE, cov.mmap_alloc_size + 2 * SYZ_PAGE_SIZE);
305+
void* region = (void*)((char*)cov.alloc - SYZ_PAGE_SIZE);
306+
munmap(region, cov.mmap_alloc_size + 2 * SYZ_PAGE_SIZE);
302307
}
303308
munmap(first - SYZ_PAGE_SIZE, cov.mmap_alloc_size + 2 * SYZ_PAGE_SIZE);
304309
close(cov.fd);

executor/executor_test.h

Lines changed: 21 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ static void os_init(int argc, char** argv, void* data, size_t data_size)
3636

3737
extern "C" notrace void __sanitizer_cov_trace_pc(void)
3838
{
39-
if (current_thread == nullptr || current_thread->cov.data == nullptr || current_thread->cov.collect_comps)
39+
if (current_thread == nullptr || current_thread->cov.trace == nullptr || current_thread->cov.collect_comps)
4040
return;
4141
uint64 pc = (uint64)__builtin_return_address(0);
4242
// Convert to what is_kernel_pc will accept as valid coverage;
@@ -45,16 +45,16 @@ extern "C" notrace void __sanitizer_cov_trace_pc(void)
4545
// because it must not be instrumented which is hard to achieve for all compiler
4646
// if the code is in a separate function.
4747
if (is_kernel_64_bit) {
48-
uint64* start = (uint64*)current_thread->cov.data;
49-
uint64* end = (uint64*)current_thread->cov.data_end;
48+
uint64* start = (uint64*)current_thread->cov.trace;
49+
uint64* end = (uint64*)current_thread->cov.trace_end;
5050
uint64 pos = start[0];
5151
if (start + pos + 1 < end) {
5252
start[0] = pos + 1;
5353
start[pos + 1] = pc;
5454
}
5555
} else {
56-
uint32* start = (uint32*)current_thread->cov.data;
57-
uint32* end = (uint32*)current_thread->cov.data_end;
56+
uint32* start = (uint32*)current_thread->cov.trace;
57+
uint32* end = (uint32*)current_thread->cov.trace_end;
5858
uint32 pos = start[0];
5959
if (start + pos + 1 < end) {
6060
start[0] = pos + 1;
@@ -85,15 +85,15 @@ static void cover_enable(cover_t* cov, bool collect_comps, bool extra)
8585

8686
static void cover_reset(cover_t* cov)
8787
{
88-
*(uint64*)(cov->data) = 0;
88+
*(uint64*)(cov->trace) = 0;
8989
}
9090

9191
static void cover_collect(cover_t* cov)
9292
{
9393
if (is_kernel_64_bit)
94-
cov->size = *(uint64*)cov->data;
94+
cov->size = *(uint64*)cov->trace;
9595
else
96-
cov->size = *(uint32*)cov->data;
96+
cov->size = *(uint32*)cov->trace;
9797
}
9898

9999
static void cover_protect(cover_t* cov)
@@ -102,16 +102,19 @@ static void cover_protect(cover_t* cov)
102102

103103
static void cover_mmap(cover_t* cov)
104104
{
105-
if (cov->data != NULL)
105+
if (cov->alloc != NULL)
106106
fail("cover_mmap invoked on an already mmapped cover_t object");
107107
if (cov->mmap_alloc_size == 0)
108108
fail("cover_t structure is corrupted");
109-
cov->data = (char*)mmap(NULL, cov->mmap_alloc_size,
110-
PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
111-
if (cov->data == MAP_FAILED)
109+
cov->alloc = (char*)mmap(NULL, cov->mmap_alloc_size,
110+
PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
111+
if (cov->alloc == MAP_FAILED)
112112
exitf("cover mmap failed");
113-
cov->data_end = cov->data + cov->mmap_alloc_size;
114-
cov->data_offset = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t);
113+
cov->trace = (char*)cov->alloc;
114+
cov->trace_size = cov->mmap_alloc_size;
115+
cov->trace_end = cov->trace + cov->trace_size;
116+
cov->trace_offset = 0;
117+
cov->trace_skip = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t);
115118
// We don't care about the specific PC values for now.
116119
// Once we do, we might want to consider ASLR here.
117120
cov->pc_offset = 0;
@@ -123,11 +126,11 @@ static void cover_unprotect(cover_t* cov)
123126

124127
static long inject_cover(cover_t* cov, long a, long b)
125128
{
126-
if (cov->data == nullptr)
129+
if (cov->trace == nullptr)
127130
return ENOENT;
128-
uint32 size = std::min((uint32)b, cov->mmap_alloc_size);
129-
memcpy(cov->data, (void*)a, size);
130-
memset(cov->data + size, 0xcd, std::min<uint64>(100, cov->mmap_alloc_size - size));
131+
uint32 size = std::min((uint32)b, cov->trace_size);
132+
memcpy(cov->trace, (void*)a, size);
133+
memset(cov->trace + size, 0xcd, std::min<uint64>(100, cov->trace_size - size));
131134
return 0;
132135
}
133136

executor/snapshot.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -213,7 +213,7 @@ static void SnapshotStart()
213213
thread_t* th = &threads[i];
214214
thread_create(th, i, flag_coverage);
215215
if (flag_coverage)
216-
PopulateMemory(th->cov.data, kCoveragePopulate);
216+
PopulateMemory(th->cov.alloc, kCoveragePopulate);
217217
}
218218
TouchMemory((char*)output_data + output_size - kOutputPopulate, kOutputPopulate);
219219
TouchMemory(ivs.input, kInputPopulate);

0 commit comments

Comments
 (0)