Skip to content

Commit dd161f7

Browse files
committed
Merge tag 'trace-sorttable-v6.15' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace
Pull tracing / sorttable updates from Steven Rostedt: - Implement arm64 build time sorting of the mcount location table When gcc is used to build arm64, the mcount_loc section is all zeros in the vmlinux elf file. The addresses are stored in the Elf_Rela location. To sort at build time, an array is allocated and the addresses are added to it via the content of the mcount_loc section as well as he Elf_Rela data. After sorting, the information is put back into the Elf_Rela which now has the section sorted. - Make sorting of mcount location table for arm64 work with clang as well When clang is used, the mcount_loc section contains the addresses, unlike the gcc build. An array is still created and the sorting works for both methods. - Remove weak functions from the mcount_loc section Have the sorttable code pass in the data of functions defined via 'nm -S' which shows the functions as well as their sizes. Using this information the sorttable code can determine if a function in the mcount_loc section was weak and overridden. If the function is not found, it is set to be zero. On boot, when the mcount_loc section is read and the ftrace table is created, if the address in the mcount_loc is not in the kernel core text then it is removed and not added to the ftrace_filter_functions (the functions that can be attached by ftrace callbacks). - Update and fix the reporting of how much data is used for ftrace functions On boot, a report of how many pages were used by the ftrace table as well as how they were grouped (the table holds a list of sections that are groups of pages that were able to be allocated). The removing of the weak functions required the accounting to be updated. * tag 'trace-sorttable-v6.15' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace: scripts/sorttable: Allow matches to functions before function entry scripts/sorttable: Use normal sort if theres no relocs in the mcount section ftrace: Check against is_kernel_text() instead of kaslr_offset() ftrace: Test mcount_loc addr before calling ftrace_call_addr() ftrace: Have ftrace pages output reflect freed pages ftrace: Update the mcount_loc check of skipped entries scripts/sorttable: Zero out weak functions in mcount_loc table scripts/sorttable: Always use an array for the mcount_loc sorting scripts/sorttable: Have mcount rela sort use direct values arm64: scripts/sorttable: Implement sorting mcount_loc at boot for arm64
2 parents 5c2a430 + dc208c6 commit dd161f7

File tree

4 files changed

+457
-14
lines changed

4 files changed

+457
-14
lines changed

arch/arm64/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -218,6 +218,7 @@ config ARM64
218218
if DYNAMIC_FTRACE_WITH_ARGS
219219
select HAVE_SAMPLE_FTRACE_DIRECT
220220
select HAVE_SAMPLE_FTRACE_DIRECT_MULTI
221+
select HAVE_BUILDTIME_MCOUNT_SORT
221222
select HAVE_EFFICIENT_UNALIGNED_ACCESS
222223
select HAVE_GUP_FAST
223224
select HAVE_FTRACE_GRAPH_FUNC

kernel/trace/ftrace.c

Lines changed: 50 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -7016,13 +7016,16 @@ static int ftrace_process_locs(struct module *mod,
70167016
unsigned long *p;
70177017
unsigned long addr;
70187018
unsigned long flags = 0; /* Shut up gcc */
7019+
unsigned long pages;
70197020
int ret = -ENOMEM;
70207021

70217022
count = end - start;
70227023

70237024
if (!count)
70247025
return 0;
70257026

7027+
pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE);
7028+
70267029
/*
70277030
* Sorting mcount in vmlinux at build time depend on
70287031
* CONFIG_BUILDTIME_MCOUNT_SORT, while mcount loc in
@@ -7067,7 +7070,9 @@ static int ftrace_process_locs(struct module *mod,
70677070
pg = start_pg;
70687071
while (p < end) {
70697072
unsigned long end_offset;
7070-
addr = ftrace_call_adjust(*p++);
7073+
7074+
addr = *p++;
7075+
70717076
/*
70727077
* Some architecture linkers will pad between
70737078
* the different mcount_loc sections of different
@@ -7079,6 +7084,19 @@ static int ftrace_process_locs(struct module *mod,
70797084
continue;
70807085
}
70817086

7087+
/*
7088+
* If this is core kernel, make sure the address is in core
7089+
* or inittext, as weak functions get zeroed and KASLR can
7090+
* move them to something other than zero. It just will not
7091+
* move it to an area where kernel text is.
7092+
*/
7093+
if (!mod && !(is_kernel_text(addr) || is_kernel_inittext(addr))) {
7094+
skipped++;
7095+
continue;
7096+
}
7097+
7098+
addr = ftrace_call_adjust(addr);
7099+
70827100
end_offset = (pg->index+1) * sizeof(pg->records[0]);
70837101
if (end_offset > PAGE_SIZE << pg->order) {
70847102
/* We should have allocated enough */
@@ -7118,11 +7136,41 @@ static int ftrace_process_locs(struct module *mod,
71187136

71197137
/* We should have used all pages unless we skipped some */
71207138
if (pg_unuse) {
7121-
WARN_ON(!skipped);
7139+
unsigned long pg_remaining, remaining = 0;
7140+
unsigned long skip;
7141+
7142+
/* Count the number of entries unused and compare it to skipped. */
7143+
pg_remaining = (ENTRIES_PER_PAGE << pg->order) - pg->index;
7144+
7145+
if (!WARN(skipped < pg_remaining, "Extra allocated pages for ftrace")) {
7146+
7147+
skip = skipped - pg_remaining;
7148+
7149+
for (pg = pg_unuse; pg; pg = pg->next)
7150+
remaining += 1 << pg->order;
7151+
7152+
pages -= remaining;
7153+
7154+
skip = DIV_ROUND_UP(skip, ENTRIES_PER_PAGE);
7155+
7156+
/*
7157+
* Check to see if the number of pages remaining would
7158+
* just fit the number of entries skipped.
7159+
*/
7160+
WARN(skip != remaining, "Extra allocated pages for ftrace: %lu with %lu skipped",
7161+
remaining, skipped);
7162+
}
71227163
/* Need to synchronize with ftrace_location_range() */
71237164
synchronize_rcu();
71247165
ftrace_free_pages(pg_unuse);
71257166
}
7167+
7168+
if (!mod) {
7169+
count -= skipped;
7170+
pr_info("ftrace: allocating %ld entries in %ld pages\n",
7171+
count, pages);
7172+
}
7173+
71267174
return ret;
71277175
}
71287176

@@ -7768,9 +7816,6 @@ void __init ftrace_init(void)
77687816
goto failed;
77697817
}
77707818

7771-
pr_info("ftrace: allocating %ld entries in %ld pages\n",
7772-
count, DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
7773-
77747819
ret = ftrace_process_locs(NULL,
77757820
__start_mcount_loc,
77767821
__stop_mcount_loc);

scripts/link-vmlinux.sh

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -173,12 +173,14 @@ mksysmap()
173173

174174
sorttable()
175175
{
176-
${objtree}/scripts/sorttable ${1}
176+
${NM} -S ${1} > .tmp_vmlinux.nm-sort
177+
${objtree}/scripts/sorttable -s .tmp_vmlinux.nm-sort ${1}
177178
}
178179

179180
cleanup()
180181
{
181182
rm -f .btf.*
183+
rm -f .tmp_vmlinux.nm-sort
182184
rm -f System.map
183185
rm -f vmlinux
184186
rm -f vmlinux.map

0 commit comments

Comments
 (0)