diff --git a/.mdformat.toml b/.mdformat.toml new file mode 100644 index 00000000..972483a8 --- /dev/null +++ b/.mdformat.toml @@ -0,0 +1,3 @@ +wrap = 89 +number = true +end_of_line = "lf" diff --git a/doc/conf.py b/doc/conf.py index 8be11da7..35f363f7 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -136,7 +136,10 @@ # directories to ignore when looking for source files. exclude_patterns = [ "_build", - "examples/clusters/*", + "examples/clusters/jean_zay", + "examples/clusters/legi", + "examples/clusters/licallo", + "examples/clusters/occigen", "examples/forcing_anisotropic_3d/toro2022/*", ] diff --git a/doc/examples/clusters/adastra/README.md b/doc/examples/clusters/adastra/README.md new file mode 100644 index 00000000..f77429c7 --- /dev/null +++ b/doc/examples/clusters/adastra/README.md @@ -0,0 +1,102 @@ +# Using Fluidsim on Adastra (CINES) + +We show in this directory +() +how to use Fluidsim on Adastra. The main documentation for this HPC platform is +[here](https://dci.dci-gitlab.cines.fr/webextranet/index.html). We use modules produced +by [Spack](https://spack.io/). + +## Get a login and setup ssh + +Get an account on . + +Set the alias + +```sh +alias sshadastra='ssh -X augier@adastra.cines.fr' +``` + +## Setup Mercurial and clone fluidsim + +Ask authorization to be able to clone the Fluidsim repository from + as explained +[here](https://dci.dci-gitlab.cines.fr/webextranet/data_storage_and_transfers/index.html#authorizing-an-outbound-connection). + +Install and setup Mercurial as explained +[here](https://fluidhowto.readthedocs.io/en/latest/mercurial/install-setup.html). Clone +the Fluidsim repository in `$HOME/dev`. + +```{warning} +The file `.bashrc` is not sourced at login so the user should do it +to use pipx-installed applications. +``` + +```sh +mkdir ~/dev +cd ~/dev +. ~/.bashrc +hg clone https://foss.heptapod.net/fluiddyn/fluidsim +cd ~/dev/fluidsim/doc/examples/clusters/adastra + +``` + +## Setup a virtual environment + +Execute the script `setup_venv.sh`. + +```sh +./setup_venv.sh +``` + +```{literalinclude} ./setup_venv.sh +``` + +Due to a bug in Meson (the build system used by few fluidfft pluggins, see +https://github.com/mesonbuild/meson/pull/13619), we need to complete the installation: + +```sh +module purge +module load cpe/23.12 +module load craype-x86-genoa +module load PrgEnv-gnu +module load gcc/13.2.0 +module load cray-hdf5-parallel cray-fftw +module load cray-python + +export LIBRARY_PATH=/opt/cray/pe/fftw/3.3.10.6/x86_genoa/lib +export CFLAGS="-I/opt/cray/pe/fftw/3.3.10.6/x86_genoa/include" + +. ~/venv-fluidsim/bin/activate + +# --no-build-isolation because of the Meson bug + +# because of --no-build-isolation +pip install meson-python ninja fluidfft-builder cython +cd ~/dev +hg clone https://github.com/paugier/meson.git +cd ~/dev/meson +hg up mpi-detection +pip install -e . +cd +# + +pip install fluidfft-fftwmpi --no-binary fluidfft-fftwmpi --no-build-isolation --force-reinstall --no-cache-dir --no-deps -v +``` + +## Install Fluidsim from source + +```sh +module purge +module load cpe/23.12 +module load craype-x86-genoa +module load PrgEnv-gnu +module load gcc/13.2.0 +module load cray-hdf5-parallel cray-fftw +module load cray-python + +. ~/venv-fluidsim/bin/activate + +cd ~/dev/fluidsim +# update to the wanted commit +pip install . -v -C setup-args=-Dnative=true +``` diff --git a/doc/examples/clusters/adastra/setup_venv.sh b/doc/examples/clusters/adastra/setup_venv.sh new file mode 100755 index 00000000..5fd9039b --- /dev/null +++ b/doc/examples/clusters/adastra/setup_venv.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash +set -e + +module purge +module load cpe/23.12 +module load craype-x86-genoa +module load PrgEnv-gnu +module load gcc/13.2.0 +module load cray-hdf5-parallel cray-fftw +module load cray-python + +cd $HOME +python -m venv venv-fluidsim +. ~/venv-fluidsim/bin/activate +pip install --upgrade pip + +# install fluidsim and all dependencies from wheels! +pip install "fluidsim[fft,test]" + +# fix/improve few packages (force recompilation) +pip install fluidfft --no-binary fluidfft -C setup-args=-Dnative=true --force-reinstall --no-cache-dir --no-deps -v + +CC=mpicc pip install mpi4py --no-binary mpi4py --force-reinstall --no-cache-dir --no-deps -v +CC="mpicc" HDF5_MPI="ON" pip install h5py --no-binary=h5py --force-reinstall --no-cache-dir --no-deps -v + +export LIBRARY_PATH=/opt/cray/pe/fftw/3.3.10.6/x86_genoa/lib +export CFLAGS="-I/opt/cray/pe/fftw/3.3.10.6/x86_genoa/include" +export PYFFTW_LIB_DIR="/opt/cray/pe/fftw/3.3.10.6/x86_genoa/lib" + +pip install pyfftw --no-binary pyfftw --force-reinstall --no-cache-dir --no-deps -v + +# install fluidfft pluggins +pip install fluidfft-fftw --no-binary fluidfft-fftw --force-reinstall --no-cache-dir --no-deps -v diff --git a/doc/examples/clusters/gricad/README.md b/doc/examples/clusters/gricad/README.md index 44f648df..35d776cc 100644 --- a/doc/examples/clusters/gricad/README.md +++ b/doc/examples/clusters/gricad/README.md @@ -1,24 +1,25 @@ # Using Fluidsim on Gricad clusters -We show how to use Fluidsim on Gricad clusters. The main documentation for this -HPC platform is [here](https://gricad-doc.univ-grenoble-alpes.fr/hpc/). We -will use [Guix](https://gricad-doc.univ-grenoble-alpes.fr/hpc/softenv/guix/), -which is one of the recommended package managers for this platform. +We show in this directory +() +how to use Fluidsim on Gricad clusters. The main documentation for this HPC platform is +[here](https://gricad-doc.univ-grenoble-alpes.fr/hpc/). We will use +[Guix](https://gricad-doc.univ-grenoble-alpes.fr/hpc/softenv/guix/), which is one of the +recommended package managers for this platform. ## Get a login and setup ssh -Get an account on https://perseus.univ-grenoble-alpes.fr/ +Get an account on . Set an ssh key and the alias ```sh -alias sshdahu='ssh dahu.ciment' +alias sshdahu='ssh -X dahu.ciment' ``` ## Setup Guix -The first thing to do, is to create the following file -`~/.config/guix/channels.scm`: +The first thing to do, is to create the following file `~/.config/guix/channels.scm`: ```lisp (cons* (channel @@ -35,11 +36,11 @@ source /applis/site/guix-start.sh guix pull # This will take a while ``` -> You only need to update the guix environment (and thus run `guix pull`) when -a package you want to use has been created or updated. +You only need to update the guix environment (and thus run `guix pull`) when a package +you want to use has been created or updated. -After `guix pull`, you should run the following command to be sure you use the -lastest `guix` command: +After `guix pull`, you should run the following command to be sure you use the lastest +`guix` command: ```sh GUIX_PROFILE="$HOME/.config/guix/current" @@ -48,7 +49,9 @@ GUIX_PROFILE="$HOME/.config/guix/current" ## Install Fluidsim from source -Clone the Fluidsim repository in `$HOME/dev`. +Install and setup Mercurial as explained +[here](https://fluidhowto.readthedocs.io/en/latest/mercurial/install-setup.html). Clone +the Fluidsim repository in `$HOME/dev`. ### Change the changeset used for the Guix environment @@ -121,5 +124,5 @@ Submit with cd ~/dev/fluidsim/doc/examples/clusters/gricad source /applis/environments/conda.sh conda activate env-fluiddyn -python3 submit_bench_fluidsim.py +python submit_bench_fluidsim.py ``` diff --git a/doc/index.md b/doc/index.md index c8cd4f86..9fd33190 100644 --- a/doc/index.md +++ b/doc/index.md @@ -115,6 +115,7 @@ install tutorials examples build-from-source +install-clusters faq ``` diff --git a/doc/install-clusters.md b/doc/install-clusters.md new file mode 100644 index 00000000..08097fd4 --- /dev/null +++ b/doc/install-clusters.md @@ -0,0 +1,38 @@ +# Fluidsim on clusters + +Computing clusters are sets of computers used for HPC. Installing on such machines in +order to run very large simulations is particular since + +- Performance is key. With very large simulations, differences of few percents in + performance can lead to important differences of electricity consumption and CO₂ + production. + + With large simulations, a large proportion of elapsed time is spent in crushing numbers + (concentrated in few functions) and MPI communications. For pseudo-spectral simulations + based on Fourier transform, the FFT functions and few other numerical kernels have to + be very efficient. This is achieved by using advanced FFT libraries and by compiling + with special options like `-march=native` and `-Ofast`. + +- Parallelism is done trough MPI with advanced hardware so it's important to use the + right MPI implementation compiled with the right options. + +- The software environment is usually quite different than on more standard machines, + with quite old operative systems and particular systems to use other software (modules, + Guix, Spack, ...). + +- Computations are launched through a schedulers (like Slurm, OAR, ...) with a launching + script. In the Fluiddyn project, we tend to avoid writting manually the launching + scripts (which is IMHO error prone and slow) and prefer to use the `fluiddyn.clusters` + API, which allows users to launch simulations with simple Python scripts. + +We present here few examples of installation methods and launching scripts on different +kinds of clusters: + +```{toctree} +--- +caption: Examples +maxdepth: 1 +--- +./examples/clusters/adastra/README.md +./examples/clusters/gricad/README.md +``` diff --git a/doc/install.md b/doc/install.md index c25fcf1e..2bbae2d2 100644 --- a/doc/install.md +++ b/doc/install.md @@ -160,7 +160,7 @@ If the system has multiple MPI libraries, it is advised to explicitly mention th MPI command. For instance to use Intel MPI: ```sh -CC=mpiicc pip install mpi4py --no-binary mpi4py +CC=mpicc pip install mpi4py --no-binary mpi4py --force-reinstall --no-cache-dir --no-deps -v ``` ```` @@ -176,7 +176,8 @@ time, this is what you want. However, you can install h5py from source and link to a hdf5 built with MPI support, as follows: ```bash -CC="mpicc" HDF5_MPI="ON" HDF5_DIR=/path/to/parallel-hdf5 pip install --no-deps --no-binary=h5py h5py +CC="mpicc" HDF5_MPI="ON" HDF5_DIR=/path/to/parallel-hdf5 \ + pip install h5py --no-binary=h5py --force-reinstall --no-cache-dir --no-deps -v python -c 'import h5py; h5py.run_tests()' ```