@@ -225,25 +225,24 @@ def check_expected(actual, expected):
225
225
226
226
# check binary format. here we can verify execution of the final
227
227
# result, no need for an output verification
228
- split_num = 0
229
228
actual = ''
230
- with open ('spec.wast' , 'w' ) as transformed_spec_file :
231
- for module , asserts in support .split_wast (wast ):
229
+ with open (base , 'w' ) as transformed_spec_file :
230
+ for i , ( module , asserts ) in enumerate ( support .split_wast (wast ) ):
232
231
if not module :
233
232
# Skip any initial assertions that don't have a module
234
233
continue
235
- print (' testing split module' , split_num )
236
- split_num += 1
237
- support .write_wast ('split.wast' , module )
238
- run_opt_test ('split.wast' ) # also that our optimizer doesn't break on it
239
- result_wast_file = shared .binary_format_check ('split.wast' , verify_final_result = False )
234
+ print (f ' testing split module { i } ' )
235
+ split_name = os . path . splitext ( base )[ 0 ] + f'_split { i } .wast'
236
+ support .write_wast (split_name , module )
237
+ run_opt_test (split_name ) # also that our optimizer doesn't break on it
238
+ result_wast_file = shared .binary_format_check (split_name , verify_final_result = False )
240
239
with open (result_wast_file ) as f :
241
240
result_wast = f .read ()
242
241
# add the asserts, and verify that the test still passes
243
242
transformed_spec_file .write (result_wast + '\n ' + '\n ' .join (asserts ))
244
243
245
244
# compare all the outputs to the expected output
246
- actual = run_spec_test ('spec.wast' )
245
+ actual = run_spec_test (base )
247
246
check_expected (actual , os .path .join (shared .get_test_dir ('spec' ), 'expected-output' , base + '.log' ))
248
247
249
248
0 commit comments