From 25a61fe60fa2868cc94367fc0e48aa1b1dec26bd Mon Sep 17 00:00:00 2001 From: Alexander Potapenko Date: Tue, 18 Feb 2025 17:01:48 +0100 Subject: [PATCH] executor: refactor struct cover_t Instead of having a single `void *data` pointer that is used to both allocate the memory and access the trace, split it into: - `void *alloc` of size `mmap_alloc_size`, used by mmap/munmap to manage the memory for kcov (may contain other data apart from the trace) - `void *trace` of size `trace_size`, used to access the collected trace Future patches will introduce other collection modes that may relocate the trace, so `alloc` and `trace` won't be interchangeable anymore. Also rename `trace_offset` to `trace_skip`. From now on, `XXX_offset` is the offset of `XXX` from the beginning of `alloc`, and `trace_skip` is the number of bytes to skip from the beginning of the trace to get to the actual PCs. I tried my best to not break Darwin and BSD, but I didn't test them. --- executor/executor.cc | 39 ++++++++++++++++++++++---------------- executor/executor_bsd.h | 23 ++++++++++++---------- executor/executor_darwin.h | 17 ++++++++++------- executor/executor_linux.h | 31 +++++++++++++++++------------- executor/executor_test.h | 39 ++++++++++++++++++++------------------ executor/snapshot.h | 2 +- 6 files changed, 86 insertions(+), 65 deletions(-) diff --git a/executor/executor.cc b/executor/executor.cc index 7a0c115b8cbe..6da1f495db87 100644 --- a/executor/executor.cc +++ b/executor/executor.cc @@ -348,18 +348,25 @@ struct cover_t { int fd; uint32 size; uint32 mmap_alloc_size; - char* data; - char* data_end; + // Memory buffer shared with kcov. + void* alloc; + // Buffer used by kcov for collecting PCs. + char* trace; + uint32 trace_size; + char* trace_end; + // Offset of `trace` from the beginning of `alloc`. Should only be accessed + // by OS-specific code. + intptr_t trace_offset; // Currently collecting comparisons. bool collect_comps; - // Note: On everything but darwin the first value in data is the count of - // recorded PCs, followed by the PCs. We therefore set data_offset to the + // Note: On everything but darwin the first value in trace is the count of + // recorded PCs, followed by the PCs. We therefore set trace_skip to the // size of one PC. - // On darwin data points to an instance of the ksancov_trace struct. Here we - // set data_offset to the offset between data and the structs 'pcs' member, + // On darwin trace points to an instance of the ksancov_trace struct. Here we + // set trace_skip to the offset between trace and the structs 'pcs' member, // which contains the PCs. - intptr_t data_offset; - // Note: On everything but darwin this is 0, as the PCs contained in data + intptr_t trace_skip; + // Note: On everything but darwin this is 0, as the PCs contained in trace // are already correct. XNUs KSANCOV API, however, chose to always squeeze // PCs into 32 bit. To make the recorded PC fit, KSANCOV substracts a fixed // offset (VM_MIN_KERNEL_ADDRESS for AMD64) and then truncates the result to @@ -1170,8 +1177,8 @@ uint32 write_signal(flatbuffers::FlatBufferBuilder& fbb, int index, cover_t* cov // Write out feedback signals. // Currently it is code edges computed as xor of two subsequent basic block PCs. fbb.StartVector(0, sizeof(uint64)); - cover_data_t* cover_data = (cover_data_t*)(cov->data + cov->data_offset); - if ((char*)(cover_data + cov->size) > cov->data_end) + cover_data_t* cover_data = (cover_data_t*)(cov->trace + cov->trace_skip); + if ((char*)(cover_data + cov->size) > cov->trace_end) failmsg("too much cover", "cov=%u", cov->size); uint32 nsig = 0; cover_data_t prev_pc = 0; @@ -1204,7 +1211,7 @@ template uint32 write_cover(flatbuffers::FlatBufferBuilder& fbb, cover_t* cov) { uint32 cover_size = cov->size; - cover_data_t* cover_data = (cover_data_t*)(cov->data + cov->data_offset); + cover_data_t* cover_data = (cover_data_t*)(cov->trace + cov->trace_skip); if (flag_dedup_cover) { cover_data_t* end = cover_data + cover_size; std::sort(cover_data, end); @@ -1220,11 +1227,11 @@ uint32 write_cover(flatbuffers::FlatBufferBuilder& fbb, cover_t* cov) uint32 write_comparisons(flatbuffers::FlatBufferBuilder& fbb, cover_t* cov) { // Collect only the comparisons - uint64 ncomps = *(uint64_t*)cov->data; - kcov_comparison_t* cov_start = (kcov_comparison_t*)(cov->data + sizeof(uint64)); - if ((char*)(cov_start + ncomps) > cov->data_end) + uint64 ncomps = *(uint64_t*)cov->trace; + kcov_comparison_t* cov_start = (kcov_comparison_t*)(cov->trace + sizeof(uint64)); + if ((char*)(cov_start + ncomps) > cov->trace_end) failmsg("too many comparisons", "ncomps=%llu", ncomps); - cov->overflow = ((char*)(cov_start + ncomps + 1) > cov->data_end); + cov->overflow = ((char*)(cov_start + ncomps + 1) > cov->trace_end); rpc::ComparisonRaw* start = (rpc::ComparisonRaw*)cov_start; rpc::ComparisonRaw* end = start; // We will convert kcov_comparison_t to ComparisonRaw inplace. @@ -1456,7 +1463,7 @@ void thread_create(thread_t* th, int id, bool need_coverage) void thread_mmap_cover(thread_t* th) { - if (th->cov.data != NULL) + if (th->cov.alloc != NULL) return; cover_mmap(&th->cov); cover_protect(&th->cov); diff --git a/executor/executor_bsd.h b/executor/executor_bsd.h index 0eb76c58819d..22c446313290 100644 --- a/executor/executor_bsd.h +++ b/executor/executor_bsd.h @@ -106,15 +106,18 @@ static void cover_open(cover_t* cov, bool extra) static void cover_mmap(cover_t* cov) { - if (cov->data != NULL) + if (cov->alloc != NULL) fail("cover_mmap invoked on an already mmapped cover_t object"); void* mmap_ptr = mmap(NULL, cov->mmap_alloc_size, PROT_READ | PROT_WRITE, MAP_SHARED, cov->fd, 0); if (mmap_ptr == MAP_FAILED) fail("cover mmap failed"); - cov->data = (char*)mmap_ptr; - cov->data_end = cov->data + cov->mmap_alloc_size; - cov->data_offset = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t); + cov->alloc = mmap_ptr; + cov->trace = (char*)cov->alloc; + cov->trace_size = cov->mmap_alloc_size; + cov->trace_end = cov->trace + cov->trace_size; + cov->trace_offset = 0; + cov->trace_skip = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t); cov->pc_offset = 0; } @@ -124,7 +127,7 @@ static void cover_protect(cover_t* cov) size_t mmap_alloc_size = kCoverSize * KCOV_ENTRY_SIZE; long page_size = sysconf(_SC_PAGESIZE); if (page_size > 0) - mprotect(cov->data + page_size, mmap_alloc_size - page_size, + mprotect(cov->alloc + page_size, mmap_alloc_size - page_size, PROT_READ); #elif GOOS_openbsd int mib[2], page_size; @@ -133,7 +136,7 @@ static void cover_protect(cover_t* cov) mib[1] = HW_PAGESIZE; size_t len = sizeof(page_size); if (sysctl(mib, ARRAY_SIZE(mib), &page_size, &len, NULL, 0) != -1) - mprotect(cov->data + page_size, mmap_alloc_size - page_size, PROT_READ); + mprotect(cov->alloc + page_size, mmap_alloc_size - page_size, PROT_READ); #endif } @@ -141,10 +144,10 @@ static void cover_unprotect(cover_t* cov) { #if GOOS_freebsd size_t mmap_alloc_size = kCoverSize * KCOV_ENTRY_SIZE; - mprotect(cov->data, mmap_alloc_size, PROT_READ | PROT_WRITE); + mprotect(cov->alloc, mmap_alloc_size, PROT_READ | PROT_WRITE); #elif GOOS_openbsd size_t mmap_alloc_size = kCoverSize * sizeof(uintptr_t); - mprotect(cov->data, mmap_alloc_size, PROT_READ | PROT_WRITE); + mprotect(cov->alloc, mmap_alloc_size, PROT_READ | PROT_WRITE); #endif } @@ -171,12 +174,12 @@ static void cover_enable(cover_t* cov, bool collect_comps, bool extra) static void cover_reset(cover_t* cov) { - *(uint64*)cov->data = 0; + *(uint64*)cov->trace = 0; } static void cover_collect(cover_t* cov) { - cov->size = *(uint64*)cov->data; + cov->size = *(uint64*)cov->trace; } #if GOOS_netbsd diff --git a/executor/executor_darwin.h b/executor/executor_darwin.h index 76b939fcfa68..93175ae2198d 100644 --- a/executor/executor_darwin.h +++ b/executor/executor_darwin.h @@ -73,7 +73,7 @@ static void cover_open(cover_t* cov, bool extra) static void cover_mmap(cover_t* cov) { - if (cov->data != NULL) + if (cov->alloc != NULL) fail("cover_mmap invoked on an already mmapped cover_t object"); uintptr_t mmap_ptr = 0; if (ksancov_map(cov->fd, &mmap_ptr, &cov->mmap_alloc_size)) @@ -84,8 +84,10 @@ static void cover_mmap(cover_t* cov) if (cov->mmap_alloc_size > kCoverSize) fail("mmap allocation size larger than anticipated"); - cov->data = (char*)mmap_ptr; - cov->data_end = cov->data + cov->mmap_alloc_size; + cov->alloc = mmap_ptr; + cov->trace_size = cov->mmap_alloc_size; + cov->trace = (char*)cov->alloc; + cov->trace_end = cov->trace + cov->trace_size; } static void cover_protect(cover_t* cov) @@ -110,14 +112,15 @@ static void cover_enable(cover_t* cov, bool collect_comps, bool extra) static void cover_reset(cover_t* cov) { - ksancov_reset((struct ksancov_header*)cov->data); - ksancov_start((struct ksancov_header*)cov->data); + ksancov_reset((struct ksancov_header*)cov->trace); + ksancov_start((struct ksancov_header*)cov->trace); } static void cover_collect(cover_t* cov) { - struct ksancov_trace* trace = (struct ksancov_trace*)cov->data; + struct ksancov_trace* trace = (struct ksancov_trace*)cov->trace; cov->size = ksancov_trace_head(trace); - cov->data_offset = ((int64_t) & (trace->pcs)) - ((int64_t)(cov->data)); + cov->trace_offset = 0; + cov->trace_skip = ((int64_t) & (trace->pcs)) - ((int64_t)(cov->trace)); cov->pc_offset = trace->offset; } diff --git a/executor/executor_linux.h b/executor/executor_linux.h index 9784700ba732..ddaacc6aaaac 100644 --- a/executor/executor_linux.h +++ b/executor/executor_linux.h @@ -130,7 +130,7 @@ static void cover_unprotect(cover_t* cov) static void cover_mmap(cover_t* cov) { - if (cov->data != NULL) + if (cov->alloc != NULL) fail("cover_mmap invoked on an already mmapped cover_t object"); if (cov->mmap_alloc_size == 0) fail("cover_t structure is corrupted"); @@ -140,14 +140,18 @@ static void cover_mmap(cover_t* cov) if (mapped == MAP_FAILED) exitf("failed to preallocate kcov buffer"); // Now map the kcov buffer to the file, overwriting the existing mapping above. - cov->data = (char*)mmap(mapped + SYZ_PAGE_SIZE, cov->mmap_alloc_size, - PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, cov->fd, 0); - if (cov->data == MAP_FAILED) + cov->alloc = (char*)mmap(mapped + SYZ_PAGE_SIZE, cov->mmap_alloc_size, + PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, cov->fd, 0); + if (cov->alloc == MAP_FAILED) exitf("cover mmap failed"); - if (pkeys_enabled && pkey_mprotect(cov->data, cov->mmap_alloc_size, PROT_READ | PROT_WRITE, RESERVED_PKEY)) + if (pkeys_enabled && pkey_mprotect(cov->alloc, cov->mmap_alloc_size, PROT_READ | PROT_WRITE, RESERVED_PKEY)) exitf("failed to pkey_mprotect kcov buffer"); - cov->data_end = cov->data + cov->mmap_alloc_size; - cov->data_offset = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t); + cov->trace_offset = 0; + cov->trace = (char*)cov->alloc; + // TODO(glider): trace size and may be different from mmap_alloc_size in the future. + cov->trace_size = cov->mmap_alloc_size; + cov->trace_end = cov->trace + cov->mmap_alloc_size; + cov->trace_skip = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t); cov->pc_offset = 0; } @@ -185,7 +189,7 @@ static void cover_reset(cover_t* cov) cov = ¤t_thread->cov; } cover_unprotect(cov); - *(uint64*)cov->data = 0; + *(uint64*)cov->trace = 0; cover_protect(cov); cov->overflow = false; } @@ -193,8 +197,8 @@ static void cover_reset(cover_t* cov) template static void cover_collect_impl(cover_t* cov) { - cov->size = *(cover_data_t*)cov->data; - cov->overflow = (cov->data + (cov->size + 2) * sizeof(cover_data_t)) > cov->data_end; + cov->size = *(cover_data_t*)cov->trace; + cov->overflow = (cov->trace + (cov->size + 2) * sizeof(cover_data_t)) > cov->trace_end; } static void cover_collect(cover_t* cov) @@ -285,8 +289,8 @@ static const char* setup_delay_kcov() cov.fd = kCoverFd; cover_open(&cov, false); cover_mmap(&cov); - char* first = cov.data; - cov.data = nullptr; + char* first = (char*)cov.alloc; + cov.alloc = nullptr; cover_mmap(&cov); // If delayed kcov mmap is not supported by the kernel, // accesses to the second mapping will crash. @@ -298,7 +302,8 @@ static const char* setup_delay_kcov() fail("clock_gettime failed"); error = "kernel commit b3d7fe86fbd0 is not present"; } else { - munmap(cov.data - SYZ_PAGE_SIZE, cov.mmap_alloc_size + 2 * SYZ_PAGE_SIZE); + void* region = (void*)((char*)cov.alloc - SYZ_PAGE_SIZE); + munmap(region, cov.mmap_alloc_size + 2 * SYZ_PAGE_SIZE); } munmap(first - SYZ_PAGE_SIZE, cov.mmap_alloc_size + 2 * SYZ_PAGE_SIZE); close(cov.fd); diff --git a/executor/executor_test.h b/executor/executor_test.h index e2a2002bb2df..76da75daace6 100644 --- a/executor/executor_test.h +++ b/executor/executor_test.h @@ -36,7 +36,7 @@ static void os_init(int argc, char** argv, void* data, size_t data_size) extern "C" notrace void __sanitizer_cov_trace_pc(void) { - if (current_thread == nullptr || current_thread->cov.data == nullptr || current_thread->cov.collect_comps) + if (current_thread == nullptr || current_thread->cov.trace == nullptr || current_thread->cov.collect_comps) return; uint64 pc = (uint64)__builtin_return_address(0); // Convert to what is_kernel_pc will accept as valid coverage; @@ -45,16 +45,16 @@ extern "C" notrace void __sanitizer_cov_trace_pc(void) // because it must not be instrumented which is hard to achieve for all compiler // if the code is in a separate function. if (is_kernel_64_bit) { - uint64* start = (uint64*)current_thread->cov.data; - uint64* end = (uint64*)current_thread->cov.data_end; + uint64* start = (uint64*)current_thread->cov.trace; + uint64* end = (uint64*)current_thread->cov.trace_end; uint64 pos = start[0]; if (start + pos + 1 < end) { start[0] = pos + 1; start[pos + 1] = pc; } } else { - uint32* start = (uint32*)current_thread->cov.data; - uint32* end = (uint32*)current_thread->cov.data_end; + uint32* start = (uint32*)current_thread->cov.trace; + uint32* end = (uint32*)current_thread->cov.trace_end; uint32 pos = start[0]; if (start + pos + 1 < end) { start[0] = pos + 1; @@ -85,15 +85,15 @@ static void cover_enable(cover_t* cov, bool collect_comps, bool extra) static void cover_reset(cover_t* cov) { - *(uint64*)(cov->data) = 0; + *(uint64*)(cov->trace) = 0; } static void cover_collect(cover_t* cov) { if (is_kernel_64_bit) - cov->size = *(uint64*)cov->data; + cov->size = *(uint64*)cov->trace; else - cov->size = *(uint32*)cov->data; + cov->size = *(uint32*)cov->trace; } static void cover_protect(cover_t* cov) @@ -102,16 +102,19 @@ static void cover_protect(cover_t* cov) static void cover_mmap(cover_t* cov) { - if (cov->data != NULL) + if (cov->alloc != NULL) fail("cover_mmap invoked on an already mmapped cover_t object"); if (cov->mmap_alloc_size == 0) fail("cover_t structure is corrupted"); - cov->data = (char*)mmap(NULL, cov->mmap_alloc_size, - PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); - if (cov->data == MAP_FAILED) + cov->alloc = (char*)mmap(NULL, cov->mmap_alloc_size, + PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); + if (cov->alloc == MAP_FAILED) exitf("cover mmap failed"); - cov->data_end = cov->data + cov->mmap_alloc_size; - cov->data_offset = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t); + cov->trace = (char*)cov->alloc; + cov->trace_size = cov->mmap_alloc_size; + cov->trace_end = cov->trace + cov->trace_size; + cov->trace_offset = 0; + cov->trace_skip = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t); // We don't care about the specific PC values for now. // Once we do, we might want to consider ASLR here. cov->pc_offset = 0; @@ -123,11 +126,11 @@ static void cover_unprotect(cover_t* cov) static long inject_cover(cover_t* cov, long a, long b) { - if (cov->data == nullptr) + if (cov->trace == nullptr) return ENOENT; - uint32 size = std::min((uint32)b, cov->mmap_alloc_size); - memcpy(cov->data, (void*)a, size); - memset(cov->data + size, 0xcd, std::min(100, cov->mmap_alloc_size - size)); + uint32 size = std::min((uint32)b, cov->trace_size); + memcpy(cov->trace, (void*)a, size); + memset(cov->trace + size, 0xcd, std::min(100, cov->trace_size - size)); return 0; } diff --git a/executor/snapshot.h b/executor/snapshot.h index 71c0b3940fa4..76d652cd24be 100644 --- a/executor/snapshot.h +++ b/executor/snapshot.h @@ -213,7 +213,7 @@ static void SnapshotStart() thread_t* th = &threads[i]; thread_create(th, i, flag_coverage); if (flag_coverage) - PopulateMemory(th->cov.data, kCoveragePopulate); + PopulateMemory(th->cov.alloc, kCoveragePopulate); } TouchMemory((char*)output_data + output_size - kOutputPopulate, kOutputPopulate); TouchMemory(ivs.input, kInputPopulate);