37
37
import_sig = re .compile (r'(var|const) ([_\w$]+ *=[^;]+);' )
38
38
39
39
40
- def split_funcs (js , just_split = False ):
41
- if just_split :
42
- return [('(json)' , line ) for line in js .splitlines ()]
40
+ def split_funcs (js ):
43
41
# split properly even if there are no newlines,
44
42
# which is important for deterministic builds (as which functions
45
43
# are in each chunk may differ, so we need to split them up and combine
@@ -141,7 +139,7 @@ def chunkify(funcs, chunk_size):
141
139
return ['' .join (func [1 ] for func in chunk ) for chunk in chunks ] # remove function names
142
140
143
141
144
- def run_on_js (filename , passes , extra_info = None , just_split = False , just_concat = False ):
142
+ def run_on_js (filename , passes , extra_info = None ):
145
143
with ToolchainProfiler .profile_block ('js_optimizer.split_markers' ):
146
144
if not isinstance (passes , list ):
147
145
passes = [passes ]
@@ -230,20 +228,16 @@ def check_symbol_mapping(p):
230
228
231
229
with ToolchainProfiler .profile_block ('js_optimizer.split' ):
232
230
total_size = len (js )
233
- funcs = split_funcs (js , just_split )
231
+ funcs = split_funcs (js )
234
232
js = None
235
233
236
234
with ToolchainProfiler .profile_block ('js_optimizer.split_to_chunks' ):
237
235
# if we are making source maps, we want our debug numbering to start from the
238
236
# top of the file, so avoid breaking the JS into chunks
239
237
240
- if just_split :
241
- # keep same chunks as before
242
- chunks = [f [1 ] for f in funcs ]
243
- else :
244
- intended_num_chunks = round (shared .get_num_cores () * NUM_CHUNKS_PER_CORE )
245
- chunk_size = min (MAX_CHUNK_SIZE , max (MIN_CHUNK_SIZE , total_size / intended_num_chunks ))
246
- chunks = chunkify (funcs , chunk_size )
238
+ intended_num_chunks = round (shared .get_num_cores () * NUM_CHUNKS_PER_CORE )
239
+ chunk_size = min (MAX_CHUNK_SIZE , max (MIN_CHUNK_SIZE , total_size / intended_num_chunks ))
240
+ chunks = chunkify (funcs , chunk_size )
247
241
248
242
chunks = [chunk for chunk in chunks if chunk ]
249
243
if DEBUG :
@@ -326,28 +320,23 @@ def write_chunk(chunk, i):
326
320
pre = None
327
321
328
322
with ToolchainProfiler .profile_block ('sort_or_concat' ):
329
- if not just_concat :
330
- # sort functions by size, to make diffing easier and to improve aot times
331
- funcses = []
332
- for out_file in filenames :
333
- funcses .append (split_funcs (utils .read_file (out_file ), False ))
334
- funcs = [item for sublist in funcses for item in sublist ]
335
- funcses = None
336
- if not os .environ .get ('EMCC_NO_OPT_SORT' ):
337
- funcs .sort (key = lambda x : (len (x [1 ]), x [0 ]), reverse = True )
338
-
339
- if 'last' in passes and len (funcs ):
340
- count = funcs [0 ][1 ].count ('\n ' )
341
- if count > 3000 :
342
- print ('warning: Output contains some very large functions (%s lines in %s), consider building source files with -Os or -Oz)' % (count , funcs [0 ][0 ]), file = sys .stderr )
343
-
344
- for func in funcs :
345
- f .write (func [1 ])
346
- funcs = None
347
- else :
348
- # just concat the outputs
349
- for out_file in filenames :
350
- f .write (utils .read_file (out_file ))
323
+ # sort functions by size, to make diffing easier and to improve aot times
324
+ funcses = []
325
+ for out_file in filenames :
326
+ funcses .append (split_funcs (utils .read_file (out_file )))
327
+ funcs = [item for sublist in funcses for item in sublist ]
328
+ funcses = None
329
+ if not os .environ .get ('EMCC_NO_OPT_SORT' ):
330
+ funcs .sort (key = lambda x : (len (x [1 ]), x [0 ]), reverse = True )
331
+
332
+ if 'last' in passes and len (funcs ):
333
+ count = funcs [0 ][1 ].count ('\n ' )
334
+ if count > 3000 :
335
+ print ('warning: Output contains some very large functions (%s lines in %s), consider building source files with -Os or -Oz)' % (count , funcs [0 ][0 ]), file = sys .stderr )
336
+
337
+ for func in funcs :
338
+ f .write (func [1 ])
339
+ funcs = None
351
340
352
341
with ToolchainProfiler .profile_block ('write_post' ):
353
342
f .write ('\n ' )
@@ -359,9 +348,7 @@ def write_chunk(chunk, i):
359
348
360
349
@ToolchainProfiler .profile_block ('js_optimizer.run_on_js' )
361
350
def run (filename , passes , extra_info = None ):
362
- just_split = 'receiveJSON' in passes
363
- just_concat = 'emitJSON' in passes
364
- return run_on_js (filename , passes , extra_info = extra_info , just_split = just_split , just_concat = just_concat )
351
+ return run_on_js (filename , passes , extra_info = extra_info )
365
352
366
353
367
354
def main ():
0 commit comments