Skip to content

executor: refactor struct cover_t #5809

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 23 additions & 16 deletions executor/executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -348,18 +348,25 @@ struct cover_t {
int fd;
uint32 size;
uint32 mmap_alloc_size;
char* data;
char* data_end;
// Memory buffer shared with kcov.
void* alloc;
// Buffer used by kcov for collecting PCs.
char* trace;
uint32 trace_size;
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Let's keep only trace_end.

char* trace_end;
// Offset of `trace` from the beginning of `alloc`. Should only be accessed
// by OS-specific code.
intptr_t trace_offset;
// Currently collecting comparisons.
bool collect_comps;
// Note: On everything but darwin the first value in data is the count of
// recorded PCs, followed by the PCs. We therefore set data_offset to the
// Note: On everything but darwin the first value in trace is the count of
// recorded PCs, followed by the PCs. We therefore set trace_skip to the
// size of one PC.
// On darwin data points to an instance of the ksancov_trace struct. Here we
// set data_offset to the offset between data and the structs 'pcs' member,
// On darwin trace points to an instance of the ksancov_trace struct. Here we
// set trace_skip to the offset between trace and the structs 'pcs' member,
// which contains the PCs.
intptr_t data_offset;
// Note: On everything but darwin this is 0, as the PCs contained in data
intptr_t trace_skip;
// Note: On everything but darwin this is 0, as the PCs contained in trace
// are already correct. XNUs KSANCOV API, however, chose to always squeeze
// PCs into 32 bit. To make the recorded PC fit, KSANCOV substracts a fixed
// offset (VM_MIN_KERNEL_ADDRESS for AMD64) and then truncates the result to
Expand Down Expand Up @@ -1170,8 +1177,8 @@ uint32 write_signal(flatbuffers::FlatBufferBuilder& fbb, int index, cover_t* cov
// Write out feedback signals.
// Currently it is code edges computed as xor of two subsequent basic block PCs.
fbb.StartVector(0, sizeof(uint64));
cover_data_t* cover_data = (cover_data_t*)(cov->data + cov->data_offset);
if ((char*)(cover_data + cov->size) > cov->data_end)
cover_data_t* cover_data = (cover_data_t*)(cov->trace + cov->trace_skip);
if ((char*)(cover_data + cov->size) > cov->trace_end)
failmsg("too much cover", "cov=%u", cov->size);
uint32 nsig = 0;
cover_data_t prev_pc = 0;
Expand Down Expand Up @@ -1204,7 +1211,7 @@ template <typename cover_data_t>
uint32 write_cover(flatbuffers::FlatBufferBuilder& fbb, cover_t* cov)
{
uint32 cover_size = cov->size;
cover_data_t* cover_data = (cover_data_t*)(cov->data + cov->data_offset);
cover_data_t* cover_data = (cover_data_t*)(cov->trace + cov->trace_skip);
if (flag_dedup_cover) {
cover_data_t* end = cover_data + cover_size;
std::sort(cover_data, end);
Expand All @@ -1220,11 +1227,11 @@ uint32 write_cover(flatbuffers::FlatBufferBuilder& fbb, cover_t* cov)
uint32 write_comparisons(flatbuffers::FlatBufferBuilder& fbb, cover_t* cov)
{
// Collect only the comparisons
uint64 ncomps = *(uint64_t*)cov->data;
kcov_comparison_t* cov_start = (kcov_comparison_t*)(cov->data + sizeof(uint64));
if ((char*)(cov_start + ncomps) > cov->data_end)
uint64 ncomps = *(uint64_t*)cov->trace;
kcov_comparison_t* cov_start = (kcov_comparison_t*)(cov->trace + sizeof(uint64));
if ((char*)(cov_start + ncomps) > cov->trace_end)
failmsg("too many comparisons", "ncomps=%llu", ncomps);
cov->overflow = ((char*)(cov_start + ncomps + 1) > cov->data_end);
cov->overflow = ((char*)(cov_start + ncomps + 1) > cov->trace_end);
rpc::ComparisonRaw* start = (rpc::ComparisonRaw*)cov_start;
rpc::ComparisonRaw* end = start;
// We will convert kcov_comparison_t to ComparisonRaw inplace.
Expand Down Expand Up @@ -1456,7 +1463,7 @@ void thread_create(thread_t* th, int id, bool need_coverage)

void thread_mmap_cover(thread_t* th)
{
if (th->cov.data != NULL)
if (th->cov.alloc != NULL)
return;
cover_mmap(&th->cov);
cover_protect(&th->cov);
Expand Down
23 changes: 13 additions & 10 deletions executor/executor_bsd.h
Original file line number Diff line number Diff line change
Expand Up @@ -106,15 +106,18 @@ static void cover_open(cover_t* cov, bool extra)

static void cover_mmap(cover_t* cov)
{
if (cov->data != NULL)
if (cov->alloc != NULL)
fail("cover_mmap invoked on an already mmapped cover_t object");
void* mmap_ptr = mmap(NULL, cov->mmap_alloc_size, PROT_READ | PROT_WRITE,
MAP_SHARED, cov->fd, 0);
if (mmap_ptr == MAP_FAILED)
fail("cover mmap failed");
cov->data = (char*)mmap_ptr;
cov->data_end = cov->data + cov->mmap_alloc_size;
cov->data_offset = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t);
cov->alloc = mmap_ptr;
cov->trace = (char*)cov->alloc;
cov->trace_size = cov->mmap_alloc_size;
cov->trace_end = cov->trace + cov->trace_size;
cov->trace_offset = 0;
cov->trace_skip = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t);
cov->pc_offset = 0;
}

Expand All @@ -124,7 +127,7 @@ static void cover_protect(cover_t* cov)
size_t mmap_alloc_size = kCoverSize * KCOV_ENTRY_SIZE;
long page_size = sysconf(_SC_PAGESIZE);
if (page_size > 0)
mprotect(cov->data + page_size, mmap_alloc_size - page_size,
mprotect(cov->alloc + page_size, mmap_alloc_size - page_size,
PROT_READ);
#elif GOOS_openbsd
int mib[2], page_size;
Expand All @@ -133,18 +136,18 @@ static void cover_protect(cover_t* cov)
mib[1] = HW_PAGESIZE;
size_t len = sizeof(page_size);
if (sysctl(mib, ARRAY_SIZE(mib), &page_size, &len, NULL, 0) != -1)
mprotect(cov->data + page_size, mmap_alloc_size - page_size, PROT_READ);
mprotect(cov->alloc + page_size, mmap_alloc_size - page_size, PROT_READ);
#endif
}

static void cover_unprotect(cover_t* cov)
{
#if GOOS_freebsd
size_t mmap_alloc_size = kCoverSize * KCOV_ENTRY_SIZE;
mprotect(cov->data, mmap_alloc_size, PROT_READ | PROT_WRITE);
mprotect(cov->alloc, mmap_alloc_size, PROT_READ | PROT_WRITE);
#elif GOOS_openbsd
size_t mmap_alloc_size = kCoverSize * sizeof(uintptr_t);
mprotect(cov->data, mmap_alloc_size, PROT_READ | PROT_WRITE);
mprotect(cov->alloc, mmap_alloc_size, PROT_READ | PROT_WRITE);
#endif
}

Expand All @@ -171,12 +174,12 @@ static void cover_enable(cover_t* cov, bool collect_comps, bool extra)

static void cover_reset(cover_t* cov)
{
*(uint64*)cov->data = 0;
*(uint64*)cov->trace = 0;
}

static void cover_collect(cover_t* cov)
{
cov->size = *(uint64*)cov->data;
cov->size = *(uint64*)cov->trace;
}

#if GOOS_netbsd
Expand Down
17 changes: 10 additions & 7 deletions executor/executor_darwin.h
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ static void cover_open(cover_t* cov, bool extra)

static void cover_mmap(cover_t* cov)
{
if (cov->data != NULL)
if (cov->alloc != NULL)
fail("cover_mmap invoked on an already mmapped cover_t object");
uintptr_t mmap_ptr = 0;
if (ksancov_map(cov->fd, &mmap_ptr, &cov->mmap_alloc_size))
Expand All @@ -84,8 +84,10 @@ static void cover_mmap(cover_t* cov)
if (cov->mmap_alloc_size > kCoverSize)
fail("mmap allocation size larger than anticipated");

cov->data = (char*)mmap_ptr;
cov->data_end = cov->data + cov->mmap_alloc_size;
cov->alloc = mmap_ptr;
cov->trace_size = cov->mmap_alloc_size;
cov->trace = (char*)cov->alloc;
cov->trace_end = cov->trace + cov->trace_size;
}

static void cover_protect(cover_t* cov)
Expand All @@ -110,14 +112,15 @@ static void cover_enable(cover_t* cov, bool collect_comps, bool extra)

static void cover_reset(cover_t* cov)
{
ksancov_reset((struct ksancov_header*)cov->data);
ksancov_start((struct ksancov_header*)cov->data);
ksancov_reset((struct ksancov_header*)cov->trace);
ksancov_start((struct ksancov_header*)cov->trace);
}

static void cover_collect(cover_t* cov)
{
struct ksancov_trace* trace = (struct ksancov_trace*)cov->data;
struct ksancov_trace* trace = (struct ksancov_trace*)cov->trace;
cov->size = ksancov_trace_head(trace);
cov->data_offset = ((int64_t) & (trace->pcs)) - ((int64_t)(cov->data));
cov->trace_offset = 0;
cov->trace_skip = ((int64_t) & (trace->pcs)) - ((int64_t)(cov->trace));
cov->pc_offset = trace->offset;
}
31 changes: 18 additions & 13 deletions executor/executor_linux.h
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ static void cover_unprotect(cover_t* cov)

static void cover_mmap(cover_t* cov)
{
if (cov->data != NULL)
if (cov->alloc != NULL)
fail("cover_mmap invoked on an already mmapped cover_t object");
if (cov->mmap_alloc_size == 0)
fail("cover_t structure is corrupted");
Expand All @@ -140,14 +140,18 @@ static void cover_mmap(cover_t* cov)
if (mapped == MAP_FAILED)
exitf("failed to preallocate kcov buffer");
// Now map the kcov buffer to the file, overwriting the existing mapping above.
cov->data = (char*)mmap(mapped + SYZ_PAGE_SIZE, cov->mmap_alloc_size,
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, cov->fd, 0);
if (cov->data == MAP_FAILED)
cov->alloc = (char*)mmap(mapped + SYZ_PAGE_SIZE, cov->mmap_alloc_size,
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, cov->fd, 0);
if (cov->alloc == MAP_FAILED)
exitf("cover mmap failed");
if (pkeys_enabled && pkey_mprotect(cov->data, cov->mmap_alloc_size, PROT_READ | PROT_WRITE, RESERVED_PKEY))
if (pkeys_enabled && pkey_mprotect(cov->alloc, cov->mmap_alloc_size, PROT_READ | PROT_WRITE, RESERVED_PKEY))
exitf("failed to pkey_mprotect kcov buffer");
cov->data_end = cov->data + cov->mmap_alloc_size;
cov->data_offset = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t);
cov->trace_offset = 0;
cov->trace = (char*)cov->alloc;
// TODO(glider): trace size and may be different from mmap_alloc_size in the future.
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This does not look useful as TODO in the code.

cov->trace_size = cov->mmap_alloc_size;
cov->trace_end = cov->trace + cov->mmap_alloc_size;
cov->trace_skip = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t);
cov->pc_offset = 0;
}

Expand Down Expand Up @@ -185,16 +189,16 @@ static void cover_reset(cover_t* cov)
cov = &current_thread->cov;
}
cover_unprotect(cov);
*(uint64*)cov->data = 0;
*(uint64*)cov->trace = 0;
cover_protect(cov);
cov->overflow = false;
}

template <typename cover_data_t>
static void cover_collect_impl(cover_t* cov)
{
cov->size = *(cover_data_t*)cov->data;
cov->overflow = (cov->data + (cov->size + 2) * sizeof(cover_data_t)) > cov->data_end;
cov->size = *(cover_data_t*)cov->trace;
cov->overflow = (cov->trace + (cov->size + 2) * sizeof(cover_data_t)) > cov->trace_end;
}

static void cover_collect(cover_t* cov)
Expand Down Expand Up @@ -285,8 +289,8 @@ static const char* setup_delay_kcov()
cov.fd = kCoverFd;
cover_open(&cov, false);
cover_mmap(&cov);
char* first = cov.data;
cov.data = nullptr;
char* first = (char*)cov.alloc;
cov.alloc = nullptr;
cover_mmap(&cov);
// If delayed kcov mmap is not supported by the kernel,
// accesses to the second mapping will crash.
Expand All @@ -298,7 +302,8 @@ static const char* setup_delay_kcov()
fail("clock_gettime failed");
error = "kernel commit b3d7fe86fbd0 is not present";
} else {
munmap(cov.data - SYZ_PAGE_SIZE, cov.mmap_alloc_size + 2 * SYZ_PAGE_SIZE);
void* region = (void*)((char*)cov.alloc - SYZ_PAGE_SIZE);
munmap(region, cov.mmap_alloc_size + 2 * SYZ_PAGE_SIZE);
}
munmap(first - SYZ_PAGE_SIZE, cov.mmap_alloc_size + 2 * SYZ_PAGE_SIZE);
close(cov.fd);
Expand Down
39 changes: 21 additions & 18 deletions executor/executor_test.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ static void os_init(int argc, char** argv, void* data, size_t data_size)

extern "C" notrace void __sanitizer_cov_trace_pc(void)
{
if (current_thread == nullptr || current_thread->cov.data == nullptr || current_thread->cov.collect_comps)
if (current_thread == nullptr || current_thread->cov.trace == nullptr || current_thread->cov.collect_comps)
return;
uint64 pc = (uint64)__builtin_return_address(0);
// Convert to what is_kernel_pc will accept as valid coverage;
Expand All @@ -45,16 +45,16 @@ extern "C" notrace void __sanitizer_cov_trace_pc(void)
// because it must not be instrumented which is hard to achieve for all compiler
// if the code is in a separate function.
if (is_kernel_64_bit) {
uint64* start = (uint64*)current_thread->cov.data;
uint64* end = (uint64*)current_thread->cov.data_end;
uint64* start = (uint64*)current_thread->cov.trace;
uint64* end = (uint64*)current_thread->cov.trace_end;
uint64 pos = start[0];
if (start + pos + 1 < end) {
start[0] = pos + 1;
start[pos + 1] = pc;
}
} else {
uint32* start = (uint32*)current_thread->cov.data;
uint32* end = (uint32*)current_thread->cov.data_end;
uint32* start = (uint32*)current_thread->cov.trace;
uint32* end = (uint32*)current_thread->cov.trace_end;
uint32 pos = start[0];
if (start + pos + 1 < end) {
start[0] = pos + 1;
Expand Down Expand Up @@ -85,15 +85,15 @@ static void cover_enable(cover_t* cov, bool collect_comps, bool extra)

static void cover_reset(cover_t* cov)
{
*(uint64*)(cov->data) = 0;
*(uint64*)(cov->trace) = 0;
}

static void cover_collect(cover_t* cov)
{
if (is_kernel_64_bit)
cov->size = *(uint64*)cov->data;
cov->size = *(uint64*)cov->trace;
else
cov->size = *(uint32*)cov->data;
cov->size = *(uint32*)cov->trace;
}

static void cover_protect(cover_t* cov)
Expand All @@ -102,16 +102,19 @@ static void cover_protect(cover_t* cov)

static void cover_mmap(cover_t* cov)
{
if (cov->data != NULL)
if (cov->alloc != NULL)
fail("cover_mmap invoked on an already mmapped cover_t object");
if (cov->mmap_alloc_size == 0)
fail("cover_t structure is corrupted");
cov->data = (char*)mmap(NULL, cov->mmap_alloc_size,
PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
if (cov->data == MAP_FAILED)
cov->alloc = (char*)mmap(NULL, cov->mmap_alloc_size,
PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
if (cov->alloc == MAP_FAILED)
exitf("cover mmap failed");
cov->data_end = cov->data + cov->mmap_alloc_size;
cov->data_offset = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t);
cov->trace = (char*)cov->alloc;
cov->trace_size = cov->mmap_alloc_size;
cov->trace_end = cov->trace + cov->trace_size;
cov->trace_offset = 0;
cov->trace_skip = is_kernel_64_bit ? sizeof(uint64_t) : sizeof(uint32_t);
// We don't care about the specific PC values for now.
// Once we do, we might want to consider ASLR here.
cov->pc_offset = 0;
Expand All @@ -123,11 +126,11 @@ static void cover_unprotect(cover_t* cov)

static long inject_cover(cover_t* cov, long a, long b)
{
if (cov->data == nullptr)
if (cov->trace == nullptr)
return ENOENT;
uint32 size = std::min((uint32)b, cov->mmap_alloc_size);
memcpy(cov->data, (void*)a, size);
memset(cov->data + size, 0xcd, std::min<uint64>(100, cov->mmap_alloc_size - size));
uint32 size = std::min((uint32)b, cov->trace_size);
memcpy(cov->trace, (void*)a, size);
memset(cov->trace + size, 0xcd, std::min<uint64>(100, cov->trace_size - size));
return 0;
}

Expand Down
2 changes: 1 addition & 1 deletion executor/snapshot.h
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,7 @@ static void SnapshotStart()
thread_t* th = &threads[i];
thread_create(th, i, flag_coverage);
if (flag_coverage)
PopulateMemory(th->cov.data, kCoveragePopulate);
PopulateMemory(th->cov.alloc, kCoveragePopulate);
}
TouchMemory((char*)output_data + output_size - kOutputPopulate, kOutputPopulate);
TouchMemory(ivs.input, kInputPopulate);
Expand Down
Loading