10
10
import argparse
11
11
from glob import glob
12
12
13
+ run_tests_module = __import__ ("run-tests" )
14
+
13
15
sys .path .append ("../tools" )
14
16
import pyboard
15
17
16
- prepare_script_for_target = __import__ ( "run-tests" ) .prepare_script_for_target
18
+ prepare_script_for_target = run_tests_module .prepare_script_for_target
17
19
18
20
# Paths for host executables
19
21
if os .name == "nt" :
@@ -90,9 +92,9 @@ def run_benchmark_on_target(target, script):
90
92
91
93
92
94
def run_benchmarks (args , target , param_n , param_m , n_average , test_list ):
95
+ test_results = []
93
96
skip_complex = run_feature_test (target , "complex" ) != "complex"
94
97
skip_native = run_feature_test (target , "native_check" ) != "native"
95
- target_had_error = False
96
98
97
99
for test_file in sorted (test_list ):
98
100
print (test_file + ": " , end = "" )
@@ -105,6 +107,7 @@ def run_benchmarks(args, target, param_n, param_m, n_average, test_list):
105
107
and test_file .find ("viper_" ) != - 1
106
108
)
107
109
if skip :
110
+ test_results .append ((test_file , "skip" , "" ))
108
111
print ("SKIP" )
109
112
continue
110
113
@@ -125,6 +128,7 @@ def run_benchmarks(args, target, param_n, param_m, n_average, test_list):
125
128
if isinstance (target , pyboard .Pyboard ) or args .via_mpy :
126
129
crash , test_script_target = prepare_script_for_target (args , script_text = test_script )
127
130
if crash :
131
+ test_results .append ((test_file , "fail" , "preparation" ))
128
132
print ("CRASH:" , test_script_target )
129
133
continue
130
134
else :
@@ -162,10 +166,13 @@ def run_benchmarks(args, target, param_n, param_m, n_average, test_list):
162
166
error = "FAIL truth"
163
167
164
168
if error is not None :
165
- if not error .startswith ("SKIP" ):
166
- target_had_error = True
169
+ if error .startswith ("SKIP" ):
170
+ test_results .append ((test_file , "skip" , error ))
171
+ else :
172
+ test_results .append ((test_file , "fail" , error ))
167
173
print (error )
168
174
else :
175
+ test_results .append ((test_file , "pass" , "" ))
169
176
t_avg , t_sd = compute_stats (times )
170
177
s_avg , s_sd = compute_stats (scores )
171
178
print (
@@ -179,7 +186,7 @@ def run_benchmarks(args, target, param_n, param_m, n_average, test_list):
179
186
180
187
sys .stdout .flush ()
181
188
182
- return target_had_error
189
+ return test_results
183
190
184
191
185
192
def parse_output (filename ):
@@ -264,6 +271,12 @@ def main():
264
271
cmd_parser .add_argument ("--heapsize" , help = "heapsize to use (use default if not specified)" )
265
272
cmd_parser .add_argument ("--via-mpy" , action = "store_true" , help = "compile code to .mpy first" )
266
273
cmd_parser .add_argument ("--mpy-cross-flags" , default = "" , help = "flags to pass to mpy-cross" )
274
+ cmd_parser .add_argument (
275
+ "-r" ,
276
+ "--result-dir" ,
277
+ default = run_tests_module .base_path ("results" ),
278
+ help = "directory for test results" ,
279
+ )
267
280
cmd_parser .add_argument (
268
281
"N" , nargs = 1 , help = "N parameter (approximate target CPU frequency in MHz)"
269
282
)
@@ -307,13 +320,15 @@ def main():
307
320
308
321
print ("N={} M={} n_average={}" .format (N , M , n_average ))
309
322
310
- target_had_error = run_benchmarks (args , target , N , M , n_average , tests )
323
+ os .makedirs (args .result_dir , exist_ok = True )
324
+ test_results = run_benchmarks (args , target , N , M , n_average , tests )
325
+ res = run_tests_module .create_test_report (args , test_results )
311
326
312
327
if isinstance (target , pyboard .Pyboard ):
313
328
target .exit_raw_repl ()
314
329
target .close ()
315
330
316
- if target_had_error :
331
+ if not res :
317
332
sys .exit (1 )
318
333
319
334
0 commit comments