Skip to content

Commit 63701c2

Browse files
committed
tests/run-perfbench.py: Create a _result.json at end of run.
Reuse the `create_test_report()` function from `run-tests.py` to generate a `_result.json` file summarising the test run. Signed-off-by: Damien George <[email protected]>
1 parent 09b0585 commit 63701c2

File tree

1 file changed

+22
-7
lines changed

1 file changed

+22
-7
lines changed

tests/run-perfbench.py

Lines changed: 22 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -10,10 +10,12 @@
1010
import argparse
1111
from glob import glob
1212

13+
run_tests_module = __import__("run-tests")
14+
1315
sys.path.append("../tools")
1416
import pyboard
1517

16-
prepare_script_for_target = __import__("run-tests").prepare_script_for_target
18+
prepare_script_for_target = run_tests_module.prepare_script_for_target
1719

1820
# Paths for host executables
1921
if os.name == "nt":
@@ -90,9 +92,9 @@ def run_benchmark_on_target(target, script):
9092

9193

9294
def run_benchmarks(args, target, param_n, param_m, n_average, test_list):
95+
test_results = []
9396
skip_complex = run_feature_test(target, "complex") != "complex"
9497
skip_native = run_feature_test(target, "native_check") != "native"
95-
target_had_error = False
9698

9799
for test_file in sorted(test_list):
98100
print(test_file + ": ", end="")
@@ -105,6 +107,7 @@ def run_benchmarks(args, target, param_n, param_m, n_average, test_list):
105107
and test_file.find("viper_") != -1
106108
)
107109
if skip:
110+
test_results.append((test_file, "skip", ""))
108111
print("SKIP")
109112
continue
110113

@@ -125,6 +128,7 @@ def run_benchmarks(args, target, param_n, param_m, n_average, test_list):
125128
if isinstance(target, pyboard.Pyboard) or args.via_mpy:
126129
crash, test_script_target = prepare_script_for_target(args, script_text=test_script)
127130
if crash:
131+
test_results.append((test_file, "fail", "preparation"))
128132
print("CRASH:", test_script_target)
129133
continue
130134
else:
@@ -162,10 +166,13 @@ def run_benchmarks(args, target, param_n, param_m, n_average, test_list):
162166
error = "FAIL truth"
163167

164168
if error is not None:
165-
if not error.startswith("SKIP"):
166-
target_had_error = True
169+
if error.startswith("SKIP"):
170+
test_results.append((test_file, "skip", error))
171+
else:
172+
test_results.append((test_file, "fail", error))
167173
print(error)
168174
else:
175+
test_results.append((test_file, "pass", ""))
169176
t_avg, t_sd = compute_stats(times)
170177
s_avg, s_sd = compute_stats(scores)
171178
print(
@@ -179,7 +186,7 @@ def run_benchmarks(args, target, param_n, param_m, n_average, test_list):
179186

180187
sys.stdout.flush()
181188

182-
return target_had_error
189+
return test_results
183190

184191

185192
def parse_output(filename):
@@ -264,6 +271,12 @@ def main():
264271
cmd_parser.add_argument("--heapsize", help="heapsize to use (use default if not specified)")
265272
cmd_parser.add_argument("--via-mpy", action="store_true", help="compile code to .mpy first")
266273
cmd_parser.add_argument("--mpy-cross-flags", default="", help="flags to pass to mpy-cross")
274+
cmd_parser.add_argument(
275+
"-r",
276+
"--result-dir",
277+
default=run_tests_module.base_path("results"),
278+
help="directory for test results",
279+
)
267280
cmd_parser.add_argument(
268281
"N", nargs=1, help="N parameter (approximate target CPU frequency in MHz)"
269282
)
@@ -307,13 +320,15 @@ def main():
307320

308321
print("N={} M={} n_average={}".format(N, M, n_average))
309322

310-
target_had_error = run_benchmarks(args, target, N, M, n_average, tests)
323+
os.makedirs(args.result_dir, exist_ok=True)
324+
test_results = run_benchmarks(args, target, N, M, n_average, tests)
325+
res = run_tests_module.create_test_report(args, test_results)
311326

312327
if isinstance(target, pyboard.Pyboard):
313328
target.exit_raw_repl()
314329
target.close()
315330

316-
if target_had_error:
331+
if not res:
317332
sys.exit(1)
318333

319334

0 commit comments

Comments
 (0)