diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 32f6eca..7da6991 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -19,8 +19,9 @@ concurrency: cancel-in-progress: true jobs: - pymakeCI-os-compiler: + pymake-schedule: name: pymake CI gcc on different OSs + if: ${{ github.event_name == 'schedule' }} runs-on: ${{ matrix.os }} strategy: fail-fast: false @@ -28,15 +29,80 @@ jobs: include: # test latest gcc and python - {os: ubuntu-latest, FC: gcc, FC_V: 13} + - {os: macos-latest, FC: gcc, FC_V: 13} - {os: windows-latest, FC: gcc, FC_V: 13} + # test latest python and intel + - {os: ubuntu-latest, FC: intel-classic, FC_V: 2021.7} + - {os: macos-13, FC: intel-classic, FC_V: 2021.7} + - {os: windows-2019, FC: intel-classic, FC_V: 2021.7} + defaults: + run: + shell: bash + + steps: + - name: Checkout repo + uses: actions/checkout@v4 + + - uses: maxim-lobanov/setup-xcode@v1 + if: ${{ (runner.os == 'macOS') && (matrix.FC == 'intel-classic') }} + with: + xcode-version: "14.3.1" + + - name: Setup Graphviz on Linux + if: runner.os == 'Linux' + uses: ts-graphviz/setup-graphviz@v2 + + - name: Setup pixi + uses: prefix-dev/setup-pixi@v0.8.1 + with: + pixi-version: v0.19.1 + manifest-path: "pixi.toml" + + - name: pixi post-install + working-directory: pymake + run: | + pixi run postinstall + + - name: Setup ${{ matrix.FC }} ${{ matrix.FC_V }} on ${{ matrix.os }} + uses: fortran-lang/setup-fortran@main + with: + compiler: ${{ matrix.FC }} + version: ${{ matrix.FC_V }} + + - name: Install make + if: runner.os == 'Windows' + run: choco install make + + - name: Run scheduled tests + run: | + pixi run autotest-schedule + + - name: Upload failed test output + if: failure() + uses: actions/upload-artifact@v4 + with: + name: failed-schedule-${{ matrix.os }}-${{ matrix.FC }}-${{ matrix.FC_V }} + path: ./autotest/.failed + + + pymake-os-compiler: + name: pymake CI gcc on different OSs + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + include: + # test latest gcc and python + - {os: ubuntu-latest, FC: gcc, FC_V: 13} - {os: macos-latest, FC: gcc, FC_V: 13} + - {os: windows-latest, FC: gcc, FC_V: 13} + # test latest python and intel-classic + - {os: ubuntu-latest, FC: intel-classic, FC_V: 2021.7} + - {os: macos-13, FC: intel-classic, FC_V: 2021.7} + - {os: windows-2019, FC: intel-classic, FC_V: 2021.7} # test latest python and previous gcc - {os: ubuntu-latest, FC: gcc, FC_V: 12} - {os: ubuntu-latest, FC: gcc, FC_V: 11} - # test latest python and intel - - {os: ubuntu-latest, FC: intel-classic, FC_V: 2021.7} - - {os: windows-latest, FC: intel-classic, FC_V: 2021.7} - - {os: macos-13, FC: intel-classic, FC_V: 2021.7} defaults: run: shell: bash @@ -65,7 +131,7 @@ jobs: run: | pixi run postinstall - - name: Setup ${{ env.FC }} ${{ env.FC_V }} + - name: Setup ${{ matrix.FC }} ${{ matrix.FC_V }} on ${{ matrix.os }} uses: fortran-lang/setup-fortran@main with: compiler: ${{ matrix.FC }} @@ -96,14 +162,5 @@ jobs: with: name: failed-${{ matrix.os }}-${{ matrix.FC }}-${{ matrix.FC_V }} path: ./autotest/.failed - - - name: Print coverage report before upload - run: | - pixi run coverage-report - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v4 - env: - CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} - with: - file: ./autotest/coverage.xml + diff --git a/.github/workflows/pymake-requests.yml b/.github/workflows/pymake-requests.yml index 514d0e1..472efb9 100644 --- a/.github/workflows/pymake-requests.yml +++ b/.github/workflows/pymake-requests.yml @@ -30,32 +30,32 @@ jobs: - name: Checkout repo uses: actions/checkout@v4 - - name: Set up Python - uses: actions/setup-python@v5 + - name: Setup pixi + uses: prefix-dev/setup-pixi@v0.8.1 with: - python-version: "3.12" + pixi-version: v0.19.1 + manifest-path: "pixi.toml" - - name: Install python packages + - name: pixi post-install + working-directory: pymake run: | - python -m pip install --upgrade pip - pip install ".[test]" + pixi run postinstall - name: Run pytest - working-directory: ./autotest run: | - pytest -v -n=auto -m requests --durations=0 --cov=pymake --cov-report=xml + pixi run autotest-request - - name: Run scheduled tests - if: ${{ github.event_name == 'schedule' }} - working-directory: ./autotest - run: | - pytest -v -m="schedule" --durations=0 --cov=pymake --cov-report=xml + - name: Upload failed test output + if: failure() + uses: actions/upload-artifact@v4 + with: + name: failed-requests + path: ./autotest/.failed - name: Print coverage report before upload - working-directory: ./autotest run: | - coverage report - + pixi run coverage-report + - name: Upload coverage to Codecov uses: codecov/codecov-action@v4 env: diff --git a/autotest/test_build.py b/autotest/test_build.py index 1e02c2e..85a8b5d 100644 --- a/autotest/test_build.py +++ b/autotest/test_build.py @@ -12,11 +12,6 @@ RERUNS = 3 targets = pymake.usgs_program_data.get_keys(current=True) -targets_make = [ - t - for t in targets - if t not in ("libmf6", "gridgen", "mf2000", "swtv4", "mflgr") -] test_ostag = get_ostag() test_fc_env = os.environ.get("FC") if "win" in test_ostag: @@ -27,6 +22,15 @@ meson_exclude = ("sutra",) targets_meson = [t for t in targets if t not in meson_exclude] +if "win" in test_ostag and test_fc_env in ("ifort",): + targets_make = [] +else: + targets_make = [ + t + for t in targets + if t not in ("libmf6", "gridgen", "mf2000", "swtv4", "mflgr") + ] + def build_with_makefile(target, path, fc): success = True @@ -103,7 +107,6 @@ def test_meson_build(function_tmpdir, target: str) -> None: @pytest.mark.base @flaky(max_runs=RERUNS) -@pytest.mark.skipif(sys.platform == "win32", reason="do not run on Windows") @pytest.mark.parametrize("target", targets_make) def test_makefile_build(function_tmpdir, target: str) -> None: pm = pymake.Pymake(verbose=True) diff --git a/autotest/test_cli_cmds.py b/autotest/test_cli_cmds.py index f31e0a3..6adb089 100644 --- a/autotest/test_cli_cmds.py +++ b/autotest/test_cli_cmds.py @@ -46,43 +46,46 @@ def run_cli_cmd(cmd: list) -> None: @pytest.mark.base @pytest.mark.parametrize("target", targets) def test_make_program(function_tmpdir, target: str) -> None: - cmd = [ - "make-program", - target, - "--appdir", - str(function_tmpdir), - "--verbose", - ] - run_cli_cmd(cmd) + with set_dir(function_tmpdir): + cmd = [ + "make-program", + target, + "--appdir", + str(function_tmpdir), + "--verbose", + ] + run_cli_cmd(cmd) @flaky(max_runs=RERUNS) @pytest.mark.dependency(name="make_program") @pytest.mark.base def test_make_program_double(function_tmpdir) -> None: - cmd = [ - "make-program", - "mf2005", - "--double", - "--verbose", - "--appdir", - str(function_tmpdir), - ] - run_cli_cmd(cmd) + with set_dir(function_tmpdir): + cmd = [ + "make-program", + "mf2005", + "--double", + "--verbose", + "--appdir", + str(function_tmpdir), + ] + run_cli_cmd(cmd) @pytest.mark.dependency(name="make_program_all") @pytest.mark.schedule def test_make_program_all(module_tmpdir) -> None: - cmd = [ - "make-program", - ":", - "--appdir", - str(module_tmpdir / "all"), - "--verbose", - "--dryrun", - ] - run_cli_cmd(cmd) + with set_dir(module_tmpdir): + cmd = [ + "make-program", + ":", + "--appdir", + str(module_tmpdir / "all"), + "--verbose", + "--dryrun", + ] + run_cli_cmd(cmd) @flaky(max_runs=RERUNS) diff --git a/autotest/test_gridgen.py b/autotest/test_gridgen.py index 1c69312..8f6194b 100644 --- a/autotest/test_gridgen.py +++ b/autotest/test_gridgen.py @@ -59,14 +59,14 @@ def run_gridgen(cmd, ws, exe): @pytest.mark.dependency(name="download") -@pytest.mark.base +@pytest.mark.regression def test_download(pm, module_tmpdir, target): pm.download_target(target, download_path=module_tmpdir) assert pm.download, f"could not download {target} distribution" @pytest.mark.dependency(name="build", depends=["download"]) -@pytest.mark.base +@pytest.mark.regression def test_compile(pm, target): assert pm.build() == 0, f"could not compile {target}" diff --git a/autotest/test_mf2005.py b/autotest/test_mf2005.py index dbf361e..51887fa 100644 --- a/autotest/test_mf2005.py +++ b/autotest/test_mf2005.py @@ -43,14 +43,14 @@ def run_mf2005(namefile, ws, exe): @pytest.mark.dependency(name="download") -@pytest.mark.base +@pytest.mark.regression def test_download(pm, module_tmpdir, target): pm.download_target(target, download_path=module_tmpdir) assert pm.download, f"could not download {target}" @pytest.mark.dependency(name="build", depends=["download"]) -@pytest.mark.base +@pytest.mark.regression def test_compile(pm, target): assert pm.build() == 0, f"could not compile {target}" diff --git a/autotest/test_mf6.py b/autotest/test_mf6.py index b9470f6..3ae73c1 100644 --- a/autotest/test_mf6.py +++ b/autotest/test_mf6.py @@ -100,14 +100,14 @@ def build_with_makefile(pm, workspace, exe): @pytest.mark.dependency(name="download") -@pytest.mark.base +@pytest.mark.regression def test_download(pm, module_tmpdir, target): pm.download_target(target, download_path=module_tmpdir) assert pm.download, f"could not download {target} distribution" @pytest.mark.dependency(name="build", depends=["download"]) -@pytest.mark.base +@pytest.mark.regression def test_compile(pm, target): assert pm.build() == 0, f"could not compile {target}" @@ -118,39 +118,3 @@ def test_compile(pm, target): def test_mf6(ws, target): success, _ = flopy.run_model(target, None, model_ws=ws, silent=False) assert success, f"could not run {ws}" - - -@pytest.mark.dependency(name="makefile", depends=["build"]) -@pytest.mark.base -def test_makefile(pm, module_tmpdir, target): - assert build_with_makefile( - pm, module_tmpdir, target - ), f"could not compile {target} with makefile" - - -@pytest.mark.dependency(name="shared", depends=["makefile"]) -@pytest.mark.base -def test_sharedobject(pm, module_tmpdir, workspace, target_so, prog_data): - # reconfigure pymake object - pm.target = str(target_so) - pm.appdir = module_tmpdir - pm.srcdir = workspace / prog_data.srcdir - pm.srcdir2 = workspace / "src" - pm.excludefiles = [os.path.join(pm.srcdir2, "mf6.f90")] - pm.makefile = True - pm.makeclean = True - pm.sharedobject = True - pm.inplace = True - pm.dryrun = False - - # build the target - assert pm.build() == 0, f"could not compile {pm.target}" - assert target_so.is_file() - - -@pytest.mark.dependency(name="shared_makefile", depends=["shared", "makefile"]) -@pytest.mark.base -def test_sharedobject_makefile(pm, module_tmpdir, target_so): - assert build_with_makefile( - pm, module_tmpdir, target_so - ), f"could not compile {target_so} with makefile" diff --git a/autotest/test_mfusg.py b/autotest/test_mfusg.py index 0f13c3e..5488b90 100644 --- a/autotest/test_mfusg.py +++ b/autotest/test_mfusg.py @@ -68,7 +68,7 @@ def run_mfusg(fn, exe): @pytest.mark.dependency(name="download") -@pytest.mark.base +@pytest.mark.regrression def test_download(pm, pm_gsi, module_tmpdir, targets): pm.download_target(targets[0], download_path=module_tmpdir) assert pm.download, f"could not download {targets[0]}" @@ -78,7 +78,7 @@ def test_download(pm, pm_gsi, module_tmpdir, targets): @pytest.mark.dependency(name="build", depends=["download"]) -@pytest.mark.base +@pytest.mark.regression def test_compile(pm, pm_gsi, targets): assert pm.build() == 0, f"could not compile {targets[0]}" assert (targets[0]).is_file() @@ -87,7 +87,7 @@ def test_compile(pm, pm_gsi, targets): assert targets[1].is_file() -@pytest.mark.dependency(name="test", depends=["build"]) +@pytest.mark.dependency(name="test", depends=["download", "build"]) @pytest.mark.regression @pytest.mark.parametrize( "namefile", diff --git a/autotest/test_misc_programs.py b/autotest/test_misc_programs.py deleted file mode 100644 index 93d4e30..0000000 --- a/autotest/test_misc_programs.py +++ /dev/null @@ -1,24 +0,0 @@ -import pytest - -import pymake - -targets = [ - "crt", - "vs2dt", - "zonbud3", -] - - -@pytest.mark.base -@pytest.mark.parametrize("target", targets) -def test_compile(module_tmpdir, target): - bin_dir = module_tmpdir / "bin" - assert ( - pymake.build_apps( - str(bin_dir / target), - download_dir=str(module_tmpdir), - appdir=str(bin_dir), - verbose=True, - ) - == 0 - ), f"could not compile {target}" diff --git a/autotest/test_mp6.py b/autotest/test_mp6.py index 51c1452..70dfc2b 100644 --- a/autotest/test_mp6.py +++ b/autotest/test_mp6.py @@ -60,14 +60,14 @@ def update_files(fn, workspace): @pytest.mark.dependency(name="download") -@pytest.mark.base +@pytest.mark.regression def test_download(pm, module_tmpdir, target): pm.download_target(target, download_path=module_tmpdir) assert pm.download, f"could not download {target} distribution" @pytest.mark.dependency(name="build", depends=["download"]) -@pytest.mark.base +@pytest.mark.regression def test_compile(pm, target): assert pm.build() == 0, f"could not compile {target}" diff --git a/autotest/test_mp7.py b/autotest/test_mp7.py index 24b1c66..5b4265f 100644 --- a/autotest/test_mp7.py +++ b/autotest/test_mp7.py @@ -143,14 +143,14 @@ def run_modpath7(namefile, mp7_exe, mf2005_exe, mfusg_exe, mf6_exe): @pytest.mark.dependency(name="download") -@pytest.mark.base +@pytest.mark.regression def test_download(pm, module_tmpdir, target): pm.download_target(target, download_path=module_tmpdir) assert pm.download, f"could not download {target} distribution" @pytest.mark.dependency(name="build", depends=["download"]) -@pytest.mark.base +@pytest.mark.regression def test_compile(pm, target): assert pm.build() == 0, f"could not compile {target}" diff --git a/autotest/test_mt3d.py b/autotest/test_mt3d.py index f9c2ea0..fd09c89 100644 --- a/autotest/test_mt3d.py +++ b/autotest/test_mt3d.py @@ -88,7 +88,7 @@ def run_mt3dusgs(workspace, mt3dms_exe, mfnwt_exe, mf6_exe): @pytest.mark.dependency(name="download_mt3dms") -@pytest.mark.base +@pytest.mark.regression def test_download_mt3dms(pm, module_tmpdir): pm.target = "mt3dms" pm.download_target(pm.target, download_path=module_tmpdir) @@ -96,13 +96,13 @@ def test_download_mt3dms(pm, module_tmpdir): @pytest.mark.dependency(name="build_mt3dms", depends=["download_mt3dms"]) -@pytest.mark.base +@pytest.mark.regression def test_compile_mt3dms(pm): assert pm.build() == 0, f"could not compile {pm.target}" @pytest.mark.dependency(name="download") -@pytest.mark.base +@pytest.mark.regression def test_download(pm, module_tmpdir, target): pm.reset(str(target)) pm.download_target(target, download_path=module_tmpdir) @@ -110,7 +110,7 @@ def test_download(pm, module_tmpdir, target): @pytest.mark.dependency(name="build", depends=["download"]) -@pytest.mark.base +@pytest.mark.regression def test_compile(pm, target): assert pm.build() == 0, f"could not compile {target}" diff --git a/autotest/test_seawat.py b/autotest/test_seawat.py index 1dfbd43..d64fc13 100644 --- a/autotest/test_seawat.py +++ b/autotest/test_seawat.py @@ -76,14 +76,14 @@ def build_seawat_dependency_graphs(src_path, dep_path): @pytest.mark.dependency(name="download") -@pytest.mark.base +@pytest.mark.regression def test_download(pm, module_tmpdir, target): pm.download_target(target, download_path=module_tmpdir) assert pm.download, f"could not download {target}" @pytest.mark.dependency(name="build", depends=["download"]) -@pytest.mark.base +@pytest.mark.regression def test_compile(pm, target): assert pm.build() == 0, f"could not compile {target}" diff --git a/pixi.toml b/pixi.toml index 63f34b4..6a80254 100644 --- a/pixi.toml +++ b/pixi.toml @@ -45,6 +45,8 @@ download-examples = {cmd = "python ci_setup.py", cwd = "autotest"} autotest = { cmd = "pytest -v -n auto --dist=loadfile -m='base or regression' --durations 0 --cov=pymake --cov-report=xml --keep-failed .failed", cwd = "autotest" } autotest-base = { cmd = "pytest -v -n auto --dist=loadfile -m='base' --durations 0 --cov=pymake --cov-report=xml --keep-failed .failed", cwd = "autotest" } autotest-Windows = { cmd = "pytest -v -m='base' --durations 0 --cov=pymake --cov-report=xml --basetemp=$RUNNER_TEMP/pytest_temp --keep-failed .failed", cwd = "autotest" } +autotest-request = { cmd = "pytest -v -n=auto -m='requests' --durations=0 --cov=pymake --cov-report=xml --keep-failed .failed", cwd = "autotest" } +autotest-schedule = { cmd = "pytest -v -m='schedule' --durations=0 --cov=pymake --cov-report=xml --keep-failed .failed", cwd = "autotest" } # coverage report coverage-report = { cmd = "coverage report", cwd = "autotest"}