From f1b66c69eab51d9b3b2e92360a9649ce8af5209f Mon Sep 17 00:00:00 2001 From: paugier Date: Wed, 4 Sep 2024 11:36:06 +0200 Subject: [PATCH] Doc install clusters (only Gricad) --- .mdformat.toml | 3 +++ doc/conf.py | 5 +++- doc/examples/clusters/gricad/README.md | 31 ++++++++++++---------- doc/index.md | 1 + doc/install-clusters.md | 36 ++++++++++++++++++++++++++ 5 files changed, 61 insertions(+), 15 deletions(-) create mode 100644 .mdformat.toml create mode 100644 doc/install-clusters.md diff --git a/.mdformat.toml b/.mdformat.toml new file mode 100644 index 00000000..972483a8 --- /dev/null +++ b/.mdformat.toml @@ -0,0 +1,3 @@ +wrap = 89 +number = true +end_of_line = "lf" diff --git a/doc/conf.py b/doc/conf.py index 8be11da7..35f363f7 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -136,7 +136,10 @@ # directories to ignore when looking for source files. exclude_patterns = [ "_build", - "examples/clusters/*", + "examples/clusters/jean_zay", + "examples/clusters/legi", + "examples/clusters/licallo", + "examples/clusters/occigen", "examples/forcing_anisotropic_3d/toro2022/*", ] diff --git a/doc/examples/clusters/gricad/README.md b/doc/examples/clusters/gricad/README.md index 44f648df..35d776cc 100644 --- a/doc/examples/clusters/gricad/README.md +++ b/doc/examples/clusters/gricad/README.md @@ -1,24 +1,25 @@ # Using Fluidsim on Gricad clusters -We show how to use Fluidsim on Gricad clusters. The main documentation for this -HPC platform is [here](https://gricad-doc.univ-grenoble-alpes.fr/hpc/). We -will use [Guix](https://gricad-doc.univ-grenoble-alpes.fr/hpc/softenv/guix/), -which is one of the recommended package managers for this platform. +We show in this directory +() +how to use Fluidsim on Gricad clusters. The main documentation for this HPC platform is +[here](https://gricad-doc.univ-grenoble-alpes.fr/hpc/). We will use +[Guix](https://gricad-doc.univ-grenoble-alpes.fr/hpc/softenv/guix/), which is one of the +recommended package managers for this platform. ## Get a login and setup ssh -Get an account on https://perseus.univ-grenoble-alpes.fr/ +Get an account on . Set an ssh key and the alias ```sh -alias sshdahu='ssh dahu.ciment' +alias sshdahu='ssh -X dahu.ciment' ``` ## Setup Guix -The first thing to do, is to create the following file -`~/.config/guix/channels.scm`: +The first thing to do, is to create the following file `~/.config/guix/channels.scm`: ```lisp (cons* (channel @@ -35,11 +36,11 @@ source /applis/site/guix-start.sh guix pull # This will take a while ``` -> You only need to update the guix environment (and thus run `guix pull`) when -a package you want to use has been created or updated. +You only need to update the guix environment (and thus run `guix pull`) when a package +you want to use has been created or updated. -After `guix pull`, you should run the following command to be sure you use the -lastest `guix` command: +After `guix pull`, you should run the following command to be sure you use the lastest +`guix` command: ```sh GUIX_PROFILE="$HOME/.config/guix/current" @@ -48,7 +49,9 @@ GUIX_PROFILE="$HOME/.config/guix/current" ## Install Fluidsim from source -Clone the Fluidsim repository in `$HOME/dev`. +Install and setup Mercurial as explained +[here](https://fluidhowto.readthedocs.io/en/latest/mercurial/install-setup.html). Clone +the Fluidsim repository in `$HOME/dev`. ### Change the changeset used for the Guix environment @@ -121,5 +124,5 @@ Submit with cd ~/dev/fluidsim/doc/examples/clusters/gricad source /applis/environments/conda.sh conda activate env-fluiddyn -python3 submit_bench_fluidsim.py +python submit_bench_fluidsim.py ``` diff --git a/doc/index.md b/doc/index.md index c8cd4f86..9fd33190 100644 --- a/doc/index.md +++ b/doc/index.md @@ -115,6 +115,7 @@ install tutorials examples build-from-source +install-clusters faq ``` diff --git a/doc/install-clusters.md b/doc/install-clusters.md new file mode 100644 index 00000000..35746b8d --- /dev/null +++ b/doc/install-clusters.md @@ -0,0 +1,36 @@ +# Fluidsim on clusters + +Computing clusters are sets of computers used for HPC. Installing on such machines in +order to run very large simulations is particular since + +- Performance is key. With very large simulations, differences of few percents in + performance can lead to important differences of electricity consumption and CO₂ + production. + + With large simulations, a large proportion of elapsed time is spent in crushing numbers + (concentrated in few functions) and MPI communications. For pseudo-spectral simulations + based on Fourier transform, the FFT functions and few other numerical kernels have to + be very efficient. This is achieved by using advanced FFT libraries and by compiling + with special options like `-march=native` and `-Ofast`. + +- Parallelism is done trough MPI with advanced hardware so it's important to use the + right MPI implementation compiled with the right options. + +- The software environment is usually quite different than on more standard smaller + machines, with quite old operative systems and particular systems to use other software + (modules, Guix, Spack, ...). + +- Computations are launched through a schedulers (like Slurm, OAR, ...) with a launching + script. In the Fluiddyn project, we tend to avoid writting manually the launching + scripts (which is IMHO error prone and slow) and prefer to use the `fluiddyn.clusters` + API, which allows users to launch simulations with simple Python scripts. + +We present here few examples of installations on different kinds of clusters: + +```{toctree} +--- +caption: Examples +maxdepth: 1 +--- +./examples/clusters/gricad/README.md +```