Skip to content

Commit 7c1badb

Browse files
committed
Merge tag 'ftrace-v6.14-2' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace
Pull fgraph updates from Steven Rostedt: "Remove calltime and rettime from fgraph infrastructure The calltime and rettime were used by the function graph tracer to calculate the timings of functions where it traced their entry and exit. The calltime and rettime were stored in the generic structures that were used for the mechanisms to add an entry and exit callback. Now that function graph infrastructure is used by other subsystems than just the tracer, the calltime and rettime are not needed for them. Remove the calltime and rettime from the generic fgraph infrastructure and have the callers that require them handle them" * tag 'ftrace-v6.14-2' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace: fgraph: Remove calltime and rettime from generic operations
2 parents e8744fb + 66611c0 commit 7c1badb

File tree

7 files changed

+33
-26
lines changed

7 files changed

+33
-26
lines changed

include/linux/ftrace.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1151,8 +1151,6 @@ struct ftrace_graph_ret {
11511151
int depth;
11521152
/* Number of functions that overran the depth limit for current task */
11531153
unsigned int overrun;
1154-
unsigned long long calltime;
1155-
unsigned long long rettime;
11561154
} __packed;
11571155

11581156
struct fgraph_ops;

kernel/trace/fgraph.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -826,7 +826,6 @@ __ftrace_return_to_handler(struct ftrace_regs *fregs, unsigned long frame_pointe
826826
return (unsigned long)panic;
827827
}
828828

829-
trace.rettime = trace_clock_local();
830829
if (fregs)
831830
ftrace_regs_set_instruction_pointer(fregs, ret);
832831

kernel/trace/trace.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -924,7 +924,9 @@ extern int __trace_graph_retaddr_entry(struct trace_array *tr,
924924
unsigned long retaddr);
925925
extern void __trace_graph_return(struct trace_array *tr,
926926
struct ftrace_graph_ret *trace,
927-
unsigned int trace_ctx);
927+
unsigned int trace_ctx,
928+
u64 calltime, u64 rettime);
929+
928930
extern void init_array_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops);
929931
extern int allocate_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops);
930932
extern void free_fgraph_ops(struct trace_array *tr);

kernel/trace/trace_entries.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -124,8 +124,8 @@ FTRACE_ENTRY_PACKED(funcgraph_exit, ftrace_graph_ret_entry,
124124
__field_packed( unsigned long, ret, retval )
125125
__field_packed( int, ret, depth )
126126
__field_packed( unsigned int, ret, overrun )
127-
__field_packed( unsigned long long, ret, calltime)
128-
__field_packed( unsigned long long, ret, rettime )
127+
__field(unsigned long long, calltime )
128+
__field(unsigned long long, rettime )
129129
),
130130

131131
F_printk("<-- %ps (%d) (start: %llx end: %llx) over: %d retval: %lx",
@@ -146,8 +146,8 @@ FTRACE_ENTRY_PACKED(funcgraph_exit, ftrace_graph_ret_entry,
146146
__field_packed( unsigned long, ret, func )
147147
__field_packed( int, ret, depth )
148148
__field_packed( unsigned int, ret, overrun )
149-
__field_packed( unsigned long long, ret, calltime)
150-
__field_packed( unsigned long long, ret, rettime )
149+
__field(unsigned long long, calltime )
150+
__field(unsigned long long, rettime )
151151
),
152152

153153
F_printk("<-- %ps (%d) (start: %llx end: %llx) over: %d",

kernel/trace/trace_functions_graph.c

Lines changed: 19 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -266,12 +266,10 @@ __trace_graph_function(struct trace_array *tr,
266266
struct ftrace_graph_ret ret = {
267267
.func = ip,
268268
.depth = 0,
269-
.calltime = time,
270-
.rettime = time,
271269
};
272270

273271
__trace_graph_entry(tr, &ent, trace_ctx);
274-
__trace_graph_return(tr, &ret, trace_ctx);
272+
__trace_graph_return(tr, &ret, trace_ctx, time, time);
275273
}
276274

277275
void
@@ -283,8 +281,9 @@ trace_graph_function(struct trace_array *tr,
283281
}
284282

285283
void __trace_graph_return(struct trace_array *tr,
286-
struct ftrace_graph_ret *trace,
287-
unsigned int trace_ctx)
284+
struct ftrace_graph_ret *trace,
285+
unsigned int trace_ctx,
286+
u64 calltime, u64 rettime)
288287
{
289288
struct ring_buffer_event *event;
290289
struct trace_buffer *buffer = tr->array_buffer.buffer;
@@ -296,6 +295,8 @@ void __trace_graph_return(struct trace_array *tr,
296295
return;
297296
entry = ring_buffer_event_data(event);
298297
entry->ret = *trace;
298+
entry->calltime = calltime;
299+
entry->rettime = rettime;
299300
trace_buffer_unlock_commit_nostack(buffer, event);
300301
}
301302

@@ -317,10 +318,13 @@ void trace_graph_return(struct ftrace_graph_ret *trace,
317318
struct trace_array_cpu *data;
318319
struct fgraph_times *ftimes;
319320
unsigned int trace_ctx;
321+
u64 calltime, rettime;
320322
long disabled;
321323
int size;
322324
int cpu;
323325

326+
rettime = trace_clock_local();
327+
324328
ftrace_graph_addr_finish(gops, trace);
325329

326330
if (*task_var & TRACE_GRAPH_NOTRACE) {
@@ -334,15 +338,15 @@ void trace_graph_return(struct ftrace_graph_ret *trace,
334338

335339
handle_nosleeptime(trace, ftimes, size);
336340

337-
trace->calltime = ftimes->calltime;
341+
calltime = ftimes->calltime;
338342

339343
preempt_disable_notrace();
340344
cpu = raw_smp_processor_id();
341345
data = per_cpu_ptr(tr->array_buffer.data, cpu);
342346
disabled = atomic_read(&data->disabled);
343347
if (likely(!disabled)) {
344348
trace_ctx = tracing_gen_ctx();
345-
__trace_graph_return(tr, trace, trace_ctx);
349+
__trace_graph_return(tr, trace, trace_ctx, calltime, rettime);
346350
}
347351
preempt_enable_notrace();
348352
}
@@ -367,10 +371,8 @@ static void trace_graph_thresh_return(struct ftrace_graph_ret *trace,
367371

368372
handle_nosleeptime(trace, ftimes, size);
369373

370-
trace->calltime = ftimes->calltime;
371-
372374
if (tracing_thresh &&
373-
(trace->rettime - ftimes->calltime < tracing_thresh))
375+
(trace_clock_local() - ftimes->calltime < tracing_thresh))
374376
return;
375377
else
376378
trace_graph_return(trace, gops, fregs);
@@ -856,7 +858,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
856858

857859
graph_ret = &ret_entry->ret;
858860
call = &entry->graph_ent;
859-
duration = graph_ret->rettime - graph_ret->calltime;
861+
duration = ret_entry->rettime - ret_entry->calltime;
860862

861863
func = call->func + iter->tr->text_delta;
862864

@@ -1137,11 +1139,14 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
11371139
}
11381140

11391141
static enum print_line_t
1140-
print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1142+
print_graph_return(struct ftrace_graph_ret_entry *retentry, struct trace_seq *s,
11411143
struct trace_entry *ent, struct trace_iterator *iter,
11421144
u32 flags)
11431145
{
1144-
unsigned long long duration = trace->rettime - trace->calltime;
1146+
struct ftrace_graph_ret *trace = &retentry->ret;
1147+
u64 calltime = retentry->calltime;
1148+
u64 rettime = retentry->rettime;
1149+
unsigned long long duration = rettime - calltime;
11451150
struct fgraph_data *data = iter->private;
11461151
struct trace_array *tr = iter->tr;
11471152
unsigned long func;
@@ -1342,7 +1347,7 @@ print_graph_function_flags(struct trace_iterator *iter, u32 flags)
13421347
case TRACE_GRAPH_RET: {
13431348
struct ftrace_graph_ret_entry *field;
13441349
trace_assign_type(field, entry);
1345-
return print_graph_return(&field->ret, s, entry, iter, flags);
1350+
return print_graph_return(field, s, entry, iter, flags);
13461351
}
13471352
case TRACE_STACK:
13481353
case TRACE_FN:

kernel/trace/trace_irqsoff.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -223,20 +223,21 @@ static void irqsoff_graph_return(struct ftrace_graph_ret *trace,
223223
unsigned long flags;
224224
unsigned int trace_ctx;
225225
u64 *calltime;
226+
u64 rettime;
226227
int size;
227228

228229
ftrace_graph_addr_finish(gops, trace);
229230

230231
if (!func_prolog_dec(tr, &data, &flags))
231232
return;
232233

234+
rettime = trace_clock_local();
233235
calltime = fgraph_retrieve_data(gops->idx, &size);
234236
if (!calltime)
235237
return;
236-
trace->calltime = *calltime;
237238

238239
trace_ctx = tracing_gen_ctx_flags(flags);
239-
__trace_graph_return(tr, trace, trace_ctx);
240+
__trace_graph_return(tr, trace, trace_ctx, *calltime, rettime);
240241
atomic_dec(&data->disabled);
241242
}
242243

kernel/trace/trace_sched_wakeup.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -158,19 +158,21 @@ static void wakeup_graph_return(struct ftrace_graph_ret *trace,
158158
struct trace_array_cpu *data;
159159
unsigned int trace_ctx;
160160
u64 *calltime;
161+
u64 rettime;
161162
int size;
162163

163164
ftrace_graph_addr_finish(gops, trace);
164165

165166
if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
166167
return;
167168

169+
rettime = trace_clock_local();
170+
168171
calltime = fgraph_retrieve_data(gops->idx, &size);
169172
if (!calltime)
170173
return;
171-
trace->calltime = *calltime;
172174

173-
__trace_graph_return(tr, trace, trace_ctx);
175+
__trace_graph_return(tr, trace, trace_ctx, *calltime, rettime);
174176
atomic_dec(&data->disabled);
175177

176178
preempt_enable_notrace();

0 commit comments

Comments
 (0)