From 0e3a6a7190a28da4ea064261c7780cd62628a9da Mon Sep 17 00:00:00 2001 From: Trung Nguyen Date: Tue, 8 Oct 2024 10:51:12 -0500 Subject: [PATCH] turned on verbose to check tolerances and also print that info out to the run.log file --- .github/workflows/kokkos-regression.yaml | 9 +++------ tools/regression-tests/run_tests.py | 20 ++++++++++++++------ 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/.github/workflows/kokkos-regression.yaml b/.github/workflows/kokkos-regression.yaml index 99297594932..94e8c60c5d8 100644 --- a/.github/workflows/kokkos-regression.yaml +++ b/.github/workflows/kokkos-regression.yaml @@ -75,15 +75,12 @@ jobs: --lmp-bin=build/lmp \ --config-file=tools/regression-tests/config_kokkos_openmp.yaml \ --example-folders="examples/colloid;examples/melt;examples/micelle;examples/threebody" \ - --output-file=output.xml \ - --progress-file=progress.yaml \ - --log-file=run.log - - tar -cvf kokkos-regression-test.tar run.log progress.yaml output.xml + --output-file=output_kokkos.xml --progress-file=progress_kokkos.yaml --log-file=run_kokkos.log \ + --verbose - name: Upload artifacts uses: actions/upload-artifact@v4 with: name: kokkos-regression-test-artifact - path: kokkos-regression-test.tar + path: *_kokkos.* diff --git a/tools/regression-tests/run_tests.py b/tools/regression-tests/run_tests.py index afb12b28457..15fe0c01ca9 100755 --- a/tools/regression-tests/run_tests.py +++ b/tools/regression-tests/run_tests.py @@ -659,6 +659,9 @@ def iterate(lmp_binary, input_folder, input_list, config, results, progress_file msg = f" {num_abs_failed} abs diff checks failed." print(msg) logger.info(msg) + for out in failed_abs_output: + logger.info(f" - {out}") + if verbose == True: for out in failed_abs_output: print(f" - {out}") @@ -667,6 +670,9 @@ def iterate(lmp_binary, input_folder, input_list, config, results, progress_file msg = f" {num_rel_failed} rel diff checks failed." print(msg) logger.info(msg) + for out in failed_rel_output: + logger.info(f" - {out}") + if verbose == True: for out in failed_rel_output: print(f" - {out}") @@ -675,11 +681,15 @@ def iterate(lmp_binary, input_folder, input_list, config, results, progress_file msg = f" all {num_checks} checks passed." print(msg) logger.info(msg) + + result.status = f" 'status': 'passed', 'abs_diff_failed': '{num_abs_failed}', 'rel_diff_failed': '{num_rel_failed}' " + num_passed = num_passed + 1 else: + result.status = f" 'status': 'failed', 'abs_diff_failed': '{num_abs_failed}', 'rel_diff_failed': '{num_rel_failed}' " num_error = num_error + 1 - result.status = f" 'abs_diff_failed': '{num_abs_failed}', 'rel_diff_failed': '{num_rel_failed}' " + results.append(result) # check if memleak detects from valgrind run (need to replace "mpirun" -> valgrind --leak-check=yes mpirun") @@ -1556,12 +1566,10 @@ def has_markers(inputFileName): for result in all_results: #print(f"{result.name}: {result.status}") case = TestCase(name=result.name, classname=result.name) - if result.status == "failed": - case.add_failure_info(message="Actual values did not match expected ones.") - if result.status == "skipped": + if "passed" not in result.status: + case.add_failure_info(message=result.status) + if "skipped" in result.status: case.add_skipped_info(message="Test was skipped.") - if result.status == "error": - case.add_skipped_info(message="Test run had errors.") test_cases.append(case) current_timestamp = datetime.datetime.now()