8
8
9
9
#include <linux/bpf.h>
10
10
#include <linux/filter.h>
11
+ #include <linux/memory.h>
12
+ #include <asm/patch.h>
11
13
#include "bpf_jit.h"
12
14
13
15
/* Number of iterations to try until offsets converge. */
@@ -117,16 +119,24 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
117
119
sizeof (struct exception_table_entry );
118
120
prog_size = sizeof (* ctx -> insns ) * ctx -> ninsns ;
119
121
120
- jit_data -> header =
121
- bpf_jit_binary_alloc (prog_size + extable_size ,
122
- & jit_data -> image ,
123
- sizeof ( u32 ) ,
124
- bpf_fill_ill_insns );
125
- if (!jit_data -> header ) {
122
+ jit_data -> ro_header =
123
+ bpf_jit_binary_pack_alloc (prog_size + extable_size ,
124
+ & jit_data -> ro_image , sizeof ( u32 ) ,
125
+ & jit_data -> header , & jit_data -> image ,
126
+ bpf_fill_ill_insns );
127
+ if (!jit_data -> ro_header ) {
126
128
prog = orig_prog ;
127
129
goto out_offset ;
128
130
}
129
131
132
+ /*
133
+ * Use the image(RW) for writing the JITed instructions. But also save
134
+ * the ro_image(RX) for calculating the offsets in the image. The RW
135
+ * image will be later copied to the RX image from where the program
136
+ * will run. The bpf_jit_binary_pack_finalize() will do this copy in the
137
+ * final step.
138
+ */
139
+ ctx -> ro_insns = (u16 * )jit_data -> ro_image ;
130
140
ctx -> insns = (u16 * )jit_data -> image ;
131
141
/*
132
142
* Now, when the image is allocated, the image can
@@ -138,14 +148,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
138
148
139
149
if (i == NR_JIT_ITERATIONS ) {
140
150
pr_err ("bpf-jit: image did not converge in <%d passes!\n" , i );
141
- if (jit_data -> header )
142
- bpf_jit_binary_free (jit_data -> header );
143
151
prog = orig_prog ;
144
- goto out_offset ;
152
+ goto out_free_hdr ;
145
153
}
146
154
147
155
if (extable_size )
148
- prog -> aux -> extable = (void * )ctx -> insns + prog_size ;
156
+ prog -> aux -> extable = (void * )ctx -> ro_insns + prog_size ;
149
157
150
158
skip_init_ctx :
151
159
pass ++ ;
@@ -154,23 +162,33 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
154
162
155
163
bpf_jit_build_prologue (ctx );
156
164
if (build_body (ctx , extra_pass , NULL )) {
157
- bpf_jit_binary_free (jit_data -> header );
158
165
prog = orig_prog ;
159
- goto out_offset ;
166
+ goto out_free_hdr ;
160
167
}
161
168
bpf_jit_build_epilogue (ctx );
162
169
163
170
if (bpf_jit_enable > 1 )
164
171
bpf_jit_dump (prog -> len , prog_size , pass , ctx -> insns );
165
172
166
- prog -> bpf_func = (void * )ctx -> insns ;
173
+ prog -> bpf_func = (void * )ctx -> ro_insns ;
167
174
prog -> jited = 1 ;
168
175
prog -> jited_len = prog_size ;
169
176
170
- bpf_flush_icache (jit_data -> header , ctx -> insns + ctx -> ninsns );
171
-
172
177
if (!prog -> is_func || extra_pass ) {
173
- bpf_jit_binary_lock_ro (jit_data -> header );
178
+ if (WARN_ON (bpf_jit_binary_pack_finalize (prog , jit_data -> ro_header ,
179
+ jit_data -> header ))) {
180
+ /* ro_header has been freed */
181
+ jit_data -> ro_header = NULL ;
182
+ prog = orig_prog ;
183
+ goto out_offset ;
184
+ }
185
+ /*
186
+ * The instructions have now been copied to the ROX region from
187
+ * where they will execute.
188
+ * Write any modified data cache blocks out to memory and
189
+ * invalidate the corresponding blocks in the instruction cache.
190
+ */
191
+ bpf_flush_icache (jit_data -> ro_header , ctx -> ro_insns + ctx -> ninsns );
174
192
for (i = 0 ; i < prog -> len ; i ++ )
175
193
ctx -> offset [i ] = ninsns_rvoff (ctx -> offset [i ]);
176
194
bpf_prog_fill_jited_linfo (prog , ctx -> offset );
@@ -185,6 +203,14 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
185
203
bpf_jit_prog_release_other (prog , prog == orig_prog ?
186
204
tmp : orig_prog );
187
205
return prog ;
206
+
207
+ out_free_hdr :
208
+ if (jit_data -> header ) {
209
+ bpf_arch_text_copy (& jit_data -> ro_header -> size , & jit_data -> header -> size ,
210
+ sizeof (jit_data -> header -> size ));
211
+ bpf_jit_binary_pack_free (jit_data -> ro_header , jit_data -> header );
212
+ }
213
+ goto out_offset ;
188
214
}
189
215
190
216
u64 bpf_jit_alloc_exec_limit (void )
@@ -204,3 +230,51 @@ void bpf_jit_free_exec(void *addr)
204
230
{
205
231
return vfree (addr );
206
232
}
233
+
234
+ void * bpf_arch_text_copy (void * dst , void * src , size_t len )
235
+ {
236
+ int ret ;
237
+
238
+ mutex_lock (& text_mutex );
239
+ ret = patch_text_nosync (dst , src , len );
240
+ mutex_unlock (& text_mutex );
241
+
242
+ if (ret )
243
+ return ERR_PTR (- EINVAL );
244
+
245
+ return dst ;
246
+ }
247
+
248
+ int bpf_arch_text_invalidate (void * dst , size_t len )
249
+ {
250
+ int ret ;
251
+
252
+ mutex_lock (& text_mutex );
253
+ ret = patch_text_set_nosync (dst , 0 , len );
254
+ mutex_unlock (& text_mutex );
255
+
256
+ return ret ;
257
+ }
258
+
259
+ void bpf_jit_free (struct bpf_prog * prog )
260
+ {
261
+ if (prog -> jited ) {
262
+ struct rv_jit_data * jit_data = prog -> aux -> jit_data ;
263
+ struct bpf_binary_header * hdr ;
264
+
265
+ /*
266
+ * If we fail the final pass of JIT (from jit_subprogs),
267
+ * the program may not be finalized yet. Call finalize here
268
+ * before freeing it.
269
+ */
270
+ if (jit_data ) {
271
+ bpf_jit_binary_pack_finalize (prog , jit_data -> ro_header , jit_data -> header );
272
+ kfree (jit_data );
273
+ }
274
+ hdr = bpf_jit_binary_pack_hdr (prog );
275
+ bpf_jit_binary_pack_free (hdr , NULL );
276
+ WARN_ON_ONCE (!bpf_prog_kallsyms_verify_off (prog ));
277
+ }
278
+
279
+ bpf_prog_unlock_free (prog );
280
+ }
0 commit comments