Skip to content

Commit 09b0585

Browse files
committed
tests/run-natmodtests.py: Create a _result.json at end of run.
Reuse the `create_test_report()` function from `run-tests.py` to generate a `_result.json` file summarising the test run. Signed-off-by: Damien George <[email protected]>
1 parent 6db9c80 commit 09b0585

File tree

1 file changed

+19
-15
lines changed

1 file changed

+19
-15
lines changed

tests/run-natmodtests.py

Lines changed: 19 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,8 @@
99
import sys
1010
import argparse
1111

12+
run_tests_module = __import__("run-tests")
13+
1214
sys.path.append("../tools")
1315
import pyboard
1416

@@ -133,14 +135,15 @@ def detect_architecture(target):
133135
return platform, arch, None
134136

135137

136-
def run_tests(target_truth, target, args, stats, resolved_arch):
138+
def run_tests(target_truth, target, args, resolved_arch):
137139
global injected_import_hook_code
138140

139141
prelude = ""
140142
if args.begin:
141143
prelude = args.begin.read()
142144
injected_import_hook_code = injected_import_hook_code.replace("{import_prelude}", prelude)
143145

146+
test_results = []
144147
for test_file in args.files:
145148
# Find supported test
146149
test_file_basename = os.path.basename(test_file)
@@ -195,17 +198,18 @@ def run_tests(target_truth, target, args, stats, resolved_arch):
195198
result = "pass"
196199

197200
# Accumulate statistics
198-
stats["total"] += 1
199201
if result == "pass":
200-
stats["pass"] += 1
202+
test_results.append((test_file, "pass", ""))
201203
elif result == "SKIP":
202-
stats["skip"] += 1
204+
test_results.append((test_file, "skip", ""))
203205
else:
204-
stats["fail"] += 1
206+
test_results.append((test_file, "fail", ""))
205207

206208
# Print result
207209
print("{:4} {}{}".format(result, test_file, extra))
208210

211+
return test_results
212+
209213

210214
def main():
211215
cmd_parser = argparse.ArgumentParser(
@@ -227,6 +231,12 @@ def main():
227231
default=None,
228232
help="prologue python file to execute before module import",
229233
)
234+
cmd_parser.add_argument(
235+
"-r",
236+
"--result-dir",
237+
default=run_tests_module.base_path("results"),
238+
help="directory for test results",
239+
)
230240
cmd_parser.add_argument("files", nargs="*", help="input test files")
231241
args = cmd_parser.parse_args()
232242

@@ -251,20 +261,14 @@ def main():
251261
print("platform={} ".format(target_platform), end="")
252262
print("arch={}".format(target_arch))
253263

254-
stats = {"total": 0, "pass": 0, "fail": 0, "skip": 0}
255-
run_tests(target_truth, target, args, stats, target_arch)
264+
os.makedirs(args.result_dir, exist_ok=True)
265+
test_results = run_tests(target_truth, target, args, target_arch)
266+
res = run_tests_module.create_test_report(args, test_results)
256267

257268
target.close()
258269
target_truth.close()
259270

260-
print("{} tests performed".format(stats["total"]))
261-
print("{} tests passed".format(stats["pass"]))
262-
if stats["fail"]:
263-
print("{} tests failed".format(stats["fail"]))
264-
if stats["skip"]:
265-
print("{} tests skipped".format(stats["skip"]))
266-
267-
if stats["fail"]:
271+
if not res:
268272
sys.exit(1)
269273

270274

0 commit comments

Comments
 (0)