|
| 1 | +#ifndef __VMLINUX_H |
| 2 | +#define __VMLINUX_H |
| 3 | + |
| 4 | +#include <linux/bpf.h> |
| 5 | +#include <linux/types.h> |
| 6 | +#include <linux/perf_event.h> |
| 7 | +#include <stdbool.h> |
| 8 | + |
| 9 | +// non-UAPI kernel data structures, used in the .bpf.c BPF tool component. |
| 10 | + |
| 11 | +// Just the fields used in these tools preserving the access index so that |
| 12 | +// libbpf can fixup offsets with the ones used in the kernel when loading the |
| 13 | +// BPF bytecode, if they differ from what is used here. |
| 14 | + |
| 15 | +typedef __u8 u8; |
| 16 | +typedef __u32 u32; |
| 17 | +typedef __u64 u64; |
| 18 | +typedef __s64 s64; |
| 19 | + |
| 20 | +typedef int pid_t; |
| 21 | + |
| 22 | +enum cgroup_subsys_id { |
| 23 | + perf_event_cgrp_id = 8, |
| 24 | +}; |
| 25 | + |
| 26 | +enum { |
| 27 | + HI_SOFTIRQ = 0, |
| 28 | + TIMER_SOFTIRQ, |
| 29 | + NET_TX_SOFTIRQ, |
| 30 | + NET_RX_SOFTIRQ, |
| 31 | + BLOCK_SOFTIRQ, |
| 32 | + IRQ_POLL_SOFTIRQ, |
| 33 | + TASKLET_SOFTIRQ, |
| 34 | + SCHED_SOFTIRQ, |
| 35 | + HRTIMER_SOFTIRQ, |
| 36 | + RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ |
| 37 | + |
| 38 | + NR_SOFTIRQS |
| 39 | +}; |
| 40 | + |
| 41 | +typedef struct { |
| 42 | + s64 counter; |
| 43 | +} __attribute__((preserve_access_index)) atomic64_t; |
| 44 | + |
| 45 | +typedef atomic64_t atomic_long_t; |
| 46 | + |
| 47 | +struct raw_spinlock { |
| 48 | + int rawlock; |
| 49 | +} __attribute__((preserve_access_index)); |
| 50 | + |
| 51 | +typedef struct raw_spinlock raw_spinlock_t; |
| 52 | + |
| 53 | +typedef struct { |
| 54 | + struct raw_spinlock rlock; |
| 55 | +} __attribute__((preserve_access_index)) spinlock_t; |
| 56 | + |
| 57 | +struct sighand_struct { |
| 58 | + spinlock_t siglock; |
| 59 | +} __attribute__((preserve_access_index)); |
| 60 | + |
| 61 | +struct rw_semaphore { |
| 62 | + atomic_long_t owner; |
| 63 | +} __attribute__((preserve_access_index)); |
| 64 | + |
| 65 | +struct mutex { |
| 66 | + atomic_long_t owner; |
| 67 | +} __attribute__((preserve_access_index)); |
| 68 | + |
| 69 | +struct kernfs_node { |
| 70 | + u64 id; |
| 71 | +} __attribute__((preserve_access_index)); |
| 72 | + |
| 73 | +struct cgroup { |
| 74 | + struct kernfs_node *kn; |
| 75 | + int level; |
| 76 | +} __attribute__((preserve_access_index)); |
| 77 | + |
| 78 | +struct cgroup_subsys_state { |
| 79 | + struct cgroup *cgroup; |
| 80 | +} __attribute__((preserve_access_index)); |
| 81 | + |
| 82 | +struct css_set { |
| 83 | + struct cgroup_subsys_state *subsys[13]; |
| 84 | + struct cgroup *dfl_cgrp; |
| 85 | +} __attribute__((preserve_access_index)); |
| 86 | + |
| 87 | +struct mm_struct { |
| 88 | + struct rw_semaphore mmap_lock; |
| 89 | +} __attribute__((preserve_access_index)); |
| 90 | + |
| 91 | +struct task_struct { |
| 92 | + unsigned int flags; |
| 93 | + struct mm_struct *mm; |
| 94 | + pid_t pid; |
| 95 | + pid_t tgid; |
| 96 | + char comm[16]; |
| 97 | + struct sighand_struct *sighand; |
| 98 | + struct css_set *cgroups; |
| 99 | +} __attribute__((preserve_access_index)); |
| 100 | + |
| 101 | +struct trace_entry { |
| 102 | + short unsigned int type; |
| 103 | + unsigned char flags; |
| 104 | + unsigned char preempt_count; |
| 105 | + int pid; |
| 106 | +} __attribute__((preserve_access_index)); |
| 107 | + |
| 108 | +struct trace_event_raw_irq_handler_entry { |
| 109 | + struct trace_entry ent; |
| 110 | + int irq; |
| 111 | + u32 __data_loc_name; |
| 112 | + char __data[]; |
| 113 | +} __attribute__((preserve_access_index)); |
| 114 | + |
| 115 | +struct trace_event_raw_irq_handler_exit { |
| 116 | + struct trace_entry ent; |
| 117 | + int irq; |
| 118 | + int ret; |
| 119 | + char __data[]; |
| 120 | +} __attribute__((preserve_access_index)); |
| 121 | + |
| 122 | +struct trace_event_raw_softirq { |
| 123 | + struct trace_entry ent; |
| 124 | + unsigned int vec; |
| 125 | + char __data[]; |
| 126 | +} __attribute__((preserve_access_index)); |
| 127 | + |
| 128 | +struct trace_event_raw_workqueue_execute_start { |
| 129 | + struct trace_entry ent; |
| 130 | + void *work; |
| 131 | + void *function; |
| 132 | + char __data[]; |
| 133 | +} __attribute__((preserve_access_index)); |
| 134 | + |
| 135 | +struct trace_event_raw_workqueue_execute_end { |
| 136 | + struct trace_entry ent; |
| 137 | + void *work; |
| 138 | + void *function; |
| 139 | + char __data[]; |
| 140 | +} __attribute__((preserve_access_index)); |
| 141 | + |
| 142 | +struct trace_event_raw_workqueue_activate_work { |
| 143 | + struct trace_entry ent; |
| 144 | + void *work; |
| 145 | + char __data[]; |
| 146 | +} __attribute__((preserve_access_index)); |
| 147 | + |
| 148 | +struct perf_sample_data { |
| 149 | + u64 addr; |
| 150 | + u64 period; |
| 151 | + union perf_sample_weight weight; |
| 152 | + u64 txn; |
| 153 | + union perf_mem_data_src data_src; |
| 154 | + u64 ip; |
| 155 | + struct { |
| 156 | + u32 pid; |
| 157 | + u32 tid; |
| 158 | + } tid_entry; |
| 159 | + u64 time; |
| 160 | + u64 id; |
| 161 | + struct { |
| 162 | + u32 cpu; |
| 163 | + } cpu_entry; |
| 164 | + u64 phys_addr; |
| 165 | + u64 data_page_size; |
| 166 | + u64 code_page_size; |
| 167 | +} __attribute__((__aligned__(64))) __attribute__((preserve_access_index)); |
| 168 | + |
| 169 | +struct bpf_perf_event_data_kern { |
| 170 | + struct perf_sample_data *data; |
| 171 | + struct perf_event *event; |
| 172 | +} __attribute__((preserve_access_index)); |
| 173 | +#endif // __VMLINUX_H |
0 commit comments