@@ -110,38 +110,19 @@ static struct linux_binfmt elf_format = {
110
110
111
111
#define BAD_ADDR (x ) (unlikely((unsigned long)(x) >= TASK_SIZE))
112
112
113
- static int set_brk (unsigned long start , unsigned long end , int prot )
114
- {
115
- start = ELF_PAGEALIGN (start );
116
- end = ELF_PAGEALIGN (end );
117
- if (end > start ) {
118
- /*
119
- * Map the last of the bss segment.
120
- * If the header is requesting these pages to be
121
- * executable, honour that (ppc32 needs this).
122
- */
123
- int error = vm_brk_flags (start , end - start ,
124
- prot & PROT_EXEC ? VM_EXEC : 0 );
125
- if (error )
126
- return error ;
127
- }
128
- current -> mm -> start_brk = current -> mm -> brk = end ;
129
- return 0 ;
130
- }
131
-
132
- /* We need to explicitly zero any fractional pages
133
- after the data section (i.e. bss). This would
134
- contain the junk from the file that should not
135
- be in memory
113
+ /*
114
+ * We need to explicitly zero any trailing portion of the page that follows
115
+ * p_filesz when it ends before the page ends (e.g. bss), otherwise this
116
+ * memory will contain the junk from the file that should not be present.
136
117
*/
137
- static int padzero (unsigned long elf_bss )
118
+ static int padzero (unsigned long address )
138
119
{
139
120
unsigned long nbyte ;
140
121
141
- nbyte = ELF_PAGEOFFSET (elf_bss );
122
+ nbyte = ELF_PAGEOFFSET (address );
142
123
if (nbyte ) {
143
124
nbyte = ELF_MIN_ALIGN - nbyte ;
144
- if (clear_user ((void __user * ) elf_bss , nbyte ))
125
+ if (clear_user ((void __user * )address , nbyte ))
145
126
return - EFAULT ;
146
127
}
147
128
return 0 ;
@@ -367,6 +348,11 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
367
348
return 0 ;
368
349
}
369
350
351
+ /*
352
+ * Map "eppnt->p_filesz" bytes from "filep" offset "eppnt->p_offset"
353
+ * into memory at "addr". (Note that p_filesz is rounded up to the
354
+ * next page, so any extra bytes from the file must be wiped.)
355
+ */
370
356
static unsigned long elf_map (struct file * filep , unsigned long addr ,
371
357
const struct elf_phdr * eppnt , int prot , int type ,
372
358
unsigned long total_size )
@@ -406,6 +392,60 @@ static unsigned long elf_map(struct file *filep, unsigned long addr,
406
392
return (map_addr );
407
393
}
408
394
395
+ /*
396
+ * Map "eppnt->p_filesz" bytes from "filep" offset "eppnt->p_offset"
397
+ * into memory at "addr". Memory from "p_filesz" through "p_memsz"
398
+ * rounded up to the next page is zeroed.
399
+ */
400
+ static unsigned long elf_load (struct file * filep , unsigned long addr ,
401
+ const struct elf_phdr * eppnt , int prot , int type ,
402
+ unsigned long total_size )
403
+ {
404
+ unsigned long zero_start , zero_end ;
405
+ unsigned long map_addr ;
406
+
407
+ if (eppnt -> p_filesz ) {
408
+ map_addr = elf_map (filep , addr , eppnt , prot , type , total_size );
409
+ if (BAD_ADDR (map_addr ))
410
+ return map_addr ;
411
+ if (eppnt -> p_memsz > eppnt -> p_filesz ) {
412
+ zero_start = map_addr + ELF_PAGEOFFSET (eppnt -> p_vaddr ) +
413
+ eppnt -> p_filesz ;
414
+ zero_end = map_addr + ELF_PAGEOFFSET (eppnt -> p_vaddr ) +
415
+ eppnt -> p_memsz ;
416
+
417
+ /*
418
+ * Zero the end of the last mapped page but ignore
419
+ * any errors if the segment isn't writable.
420
+ */
421
+ if (padzero (zero_start ) && (prot & PROT_WRITE ))
422
+ return - EFAULT ;
423
+ }
424
+ } else {
425
+ map_addr = zero_start = ELF_PAGESTART (addr );
426
+ zero_end = zero_start + ELF_PAGEOFFSET (eppnt -> p_vaddr ) +
427
+ eppnt -> p_memsz ;
428
+ }
429
+ if (eppnt -> p_memsz > eppnt -> p_filesz ) {
430
+ /*
431
+ * Map the last of the segment.
432
+ * If the header is requesting these pages to be
433
+ * executable, honour that (ppc32 needs this).
434
+ */
435
+ int error ;
436
+
437
+ zero_start = ELF_PAGEALIGN (zero_start );
438
+ zero_end = ELF_PAGEALIGN (zero_end );
439
+
440
+ error = vm_brk_flags (zero_start , zero_end - zero_start ,
441
+ prot & PROT_EXEC ? VM_EXEC : 0 );
442
+ if (error )
443
+ map_addr = error ;
444
+ }
445
+ return map_addr ;
446
+ }
447
+
448
+
409
449
static unsigned long total_mapping_size (const struct elf_phdr * phdr , int nr )
410
450
{
411
451
elf_addr_t min_addr = -1 ;
@@ -596,8 +636,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
596
636
struct elf_phdr * eppnt ;
597
637
unsigned long load_addr = 0 ;
598
638
int load_addr_set = 0 ;
599
- unsigned long last_bss = 0 , elf_bss = 0 ;
600
- int bss_prot = 0 ;
601
639
unsigned long error = ~0UL ;
602
640
unsigned long total_size ;
603
641
int i ;
@@ -634,7 +672,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
634
672
else if (no_base && interp_elf_ex -> e_type == ET_DYN )
635
673
load_addr = - vaddr ;
636
674
637
- map_addr = elf_map (interpreter , load_addr + vaddr ,
675
+ map_addr = elf_load (interpreter , load_addr + vaddr ,
638
676
eppnt , elf_prot , elf_type , total_size );
639
677
total_size = 0 ;
640
678
error = map_addr ;
@@ -660,51 +698,9 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
660
698
error = - ENOMEM ;
661
699
goto out ;
662
700
}
663
-
664
- /*
665
- * Find the end of the file mapping for this phdr, and
666
- * keep track of the largest address we see for this.
667
- */
668
- k = load_addr + eppnt -> p_vaddr + eppnt -> p_filesz ;
669
- if (k > elf_bss )
670
- elf_bss = k ;
671
-
672
- /*
673
- * Do the same thing for the memory mapping - between
674
- * elf_bss and last_bss is the bss section.
675
- */
676
- k = load_addr + eppnt -> p_vaddr + eppnt -> p_memsz ;
677
- if (k > last_bss ) {
678
- last_bss = k ;
679
- bss_prot = elf_prot ;
680
- }
681
701
}
682
702
}
683
703
684
- /*
685
- * Now fill out the bss section: first pad the last page from
686
- * the file up to the page boundary, and zero it from elf_bss
687
- * up to the end of the page.
688
- */
689
- if (padzero (elf_bss )) {
690
- error = - EFAULT ;
691
- goto out ;
692
- }
693
- /*
694
- * Next, align both the file and mem bss up to the page size,
695
- * since this is where elf_bss was just zeroed up to, and where
696
- * last_bss will end after the vm_brk_flags() below.
697
- */
698
- elf_bss = ELF_PAGEALIGN (elf_bss );
699
- last_bss = ELF_PAGEALIGN (last_bss );
700
- /* Finally, if there is still more bss to allocate, do it. */
701
- if (last_bss > elf_bss ) {
702
- error = vm_brk_flags (elf_bss , last_bss - elf_bss ,
703
- bss_prot & PROT_EXEC ? VM_EXEC : 0 );
704
- if (error )
705
- goto out ;
706
- }
707
-
708
704
error = load_addr ;
709
705
out :
710
706
return error ;
@@ -828,8 +824,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
828
824
unsigned long error ;
829
825
struct elf_phdr * elf_ppnt , * elf_phdata , * interp_elf_phdata = NULL ;
830
826
struct elf_phdr * elf_property_phdata = NULL ;
831
- unsigned long elf_bss , elf_brk ;
832
- int bss_prot = 0 ;
827
+ unsigned long elf_brk ;
833
828
int retval , i ;
834
829
unsigned long elf_entry ;
835
830
unsigned long e_entry ;
@@ -1020,7 +1015,6 @@ static int load_elf_binary(struct linux_binprm *bprm)
1020
1015
if (retval < 0 )
1021
1016
goto out_free_dentry ;
1022
1017
1023
- elf_bss = 0 ;
1024
1018
elf_brk = 0 ;
1025
1019
1026
1020
start_code = ~0UL ;
@@ -1040,33 +1034,6 @@ static int load_elf_binary(struct linux_binprm *bprm)
1040
1034
if (elf_ppnt -> p_type != PT_LOAD )
1041
1035
continue ;
1042
1036
1043
- if (unlikely (elf_brk > elf_bss )) {
1044
- unsigned long nbyte ;
1045
-
1046
- /* There was a PT_LOAD segment with p_memsz > p_filesz
1047
- before this one. Map anonymous pages, if needed,
1048
- and clear the area. */
1049
- retval = set_brk (elf_bss + load_bias ,
1050
- elf_brk + load_bias ,
1051
- bss_prot );
1052
- if (retval )
1053
- goto out_free_dentry ;
1054
- nbyte = ELF_PAGEOFFSET (elf_bss );
1055
- if (nbyte ) {
1056
- nbyte = ELF_MIN_ALIGN - nbyte ;
1057
- if (nbyte > elf_brk - elf_bss )
1058
- nbyte = elf_brk - elf_bss ;
1059
- if (clear_user ((void __user * )elf_bss +
1060
- load_bias , nbyte )) {
1061
- /*
1062
- * This bss-zeroing can fail if the ELF
1063
- * file specifies odd protections. So
1064
- * we don't check the return value
1065
- */
1066
- }
1067
- }
1068
- }
1069
-
1070
1037
elf_prot = make_prot (elf_ppnt -> p_flags , & arch_state ,
1071
1038
!!interpreter , false);
1072
1039
@@ -1162,7 +1129,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
1162
1129
}
1163
1130
}
1164
1131
1165
- error = elf_map (bprm -> file , load_bias + vaddr , elf_ppnt ,
1132
+ error = elf_load (bprm -> file , load_bias + vaddr , elf_ppnt ,
1166
1133
elf_prot , elf_flags , total_size );
1167
1134
if (BAD_ADDR (error )) {
1168
1135
retval = IS_ERR_VALUE (error ) ?
@@ -1210,40 +1177,24 @@ static int load_elf_binary(struct linux_binprm *bprm)
1210
1177
1211
1178
k = elf_ppnt -> p_vaddr + elf_ppnt -> p_filesz ;
1212
1179
1213
- if (k > elf_bss )
1214
- elf_bss = k ;
1215
1180
if ((elf_ppnt -> p_flags & PF_X ) && end_code < k )
1216
1181
end_code = k ;
1217
1182
if (end_data < k )
1218
1183
end_data = k ;
1219
1184
k = elf_ppnt -> p_vaddr + elf_ppnt -> p_memsz ;
1220
- if (k > elf_brk ) {
1221
- bss_prot = elf_prot ;
1185
+ if (k > elf_brk )
1222
1186
elf_brk = k ;
1223
- }
1224
1187
}
1225
1188
1226
1189
e_entry = elf_ex -> e_entry + load_bias ;
1227
1190
phdr_addr += load_bias ;
1228
- elf_bss += load_bias ;
1229
1191
elf_brk += load_bias ;
1230
1192
start_code += load_bias ;
1231
1193
end_code += load_bias ;
1232
1194
start_data += load_bias ;
1233
1195
end_data += load_bias ;
1234
1196
1235
- /* Calling set_brk effectively mmaps the pages that we need
1236
- * for the bss and break sections. We must do this before
1237
- * mapping in the interpreter, to make sure it doesn't wind
1238
- * up getting placed where the bss needs to go.
1239
- */
1240
- retval = set_brk (elf_bss , elf_brk , bss_prot );
1241
- if (retval )
1242
- goto out_free_dentry ;
1243
- if (likely (elf_bss != elf_brk ) && unlikely (padzero (elf_bss ))) {
1244
- retval = - EFAULT ; /* Nobody gets to see this, but.. */
1245
- goto out_free_dentry ;
1246
- }
1197
+ current -> mm -> start_brk = current -> mm -> brk = ELF_PAGEALIGN (elf_brk );
1247
1198
1248
1199
if (interpreter ) {
1249
1200
elf_entry = load_elf_interp (interp_elf_ex ,
@@ -1369,7 +1320,6 @@ static int load_elf_library(struct file *file)
1369
1320
{
1370
1321
struct elf_phdr * elf_phdata ;
1371
1322
struct elf_phdr * eppnt ;
1372
- unsigned long elf_bss , bss , len ;
1373
1323
int retval , error , i , j ;
1374
1324
struct elfhdr elf_ex ;
1375
1325
@@ -1414,30 +1364,15 @@ static int load_elf_library(struct file *file)
1414
1364
eppnt ++ ;
1415
1365
1416
1366
/* Now use mmap to map the library into memory. */
1417
- error = vm_mmap (file ,
1418
- ELF_PAGESTART (eppnt -> p_vaddr ),
1419
- (eppnt -> p_filesz +
1420
- ELF_PAGEOFFSET (eppnt -> p_vaddr )),
1367
+ error = elf_load (file , ELF_PAGESTART (eppnt -> p_vaddr ),
1368
+ eppnt ,
1421
1369
PROT_READ | PROT_WRITE | PROT_EXEC ,
1422
1370
MAP_FIXED_NOREPLACE | MAP_PRIVATE ,
1423
- ( eppnt -> p_offset -
1424
- ELF_PAGEOFFSET ( eppnt -> p_vaddr )));
1371
+ 0 );
1372
+
1425
1373
if (error != ELF_PAGESTART (eppnt -> p_vaddr ))
1426
1374
goto out_free_ph ;
1427
1375
1428
- elf_bss = eppnt -> p_vaddr + eppnt -> p_filesz ;
1429
- if (padzero (elf_bss )) {
1430
- error = - EFAULT ;
1431
- goto out_free_ph ;
1432
- }
1433
-
1434
- len = ELF_PAGEALIGN (eppnt -> p_filesz + eppnt -> p_vaddr );
1435
- bss = ELF_PAGEALIGN (eppnt -> p_memsz + eppnt -> p_vaddr );
1436
- if (bss > len ) {
1437
- error = vm_brk (len , bss - len );
1438
- if (error )
1439
- goto out_free_ph ;
1440
- }
1441
1376
error = 0 ;
1442
1377
1443
1378
out_free_ph :
0 commit comments