diff --git a/anemoi_jobscript.sh b/anemoi_jobscript.sh deleted file mode 100644 index 43cf4011..00000000 --- a/anemoi_jobscript.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -#SBATCH --qos=ng -#SBATCH --nodes=1 -#SBATCH --ntasks-per-node=4 -#SBATCH --gpus-per-node=4 -#SBATCH --cpus-per-task=32 -#SBATCH --mem=256G -#SBATCH --time=1:00:00 -#SBATCH --job-name=aifs-dowa-model-test - -# debugging flags (optional) -# export NCCL_DEBUG=INFO -# export PYTHONFAULTHANDLER=1 - -# on your cluster you might need these: -# set the network interface -# export NCCL_SOCKET_IFNAME=ib0,lo - -# generic settings -CONDA_ENV=aifs -GITDIR=/hpcperm/nld1247/aifs-lam-dowa -WORKDIR=$GITDIR - -cd $WORKDIR -module load conda -conda activate $CONDA_ENV -# srun python train_from_checkpoint.py -srun anemoi-training train --config-name=stretched_grid.yaml diff --git a/lumi_train.sh b/lumi_train.sh deleted file mode 100644 index fd9a1fed..00000000 --- a/lumi_train.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/bin/bash -#SBATCH --output=/scratch/project_465001383/aifs/logs/name2.out -#SBATCH --error=/scratch/project_465001383/aifs/logs/name2.err -#SBATCH --nodes=1 -#SBATCH --ntasks-per-node=8 -#SBATCH --account=project_465001383 -#SBATCH --partition=dev-g -#SBATCH --gpus-per-node=8 -#SBATCH --time=00:15:00 -#SBATCH --job-name=aifs -#SBATCH --exclusive - -PROJECT_DIR=/pfs/lustrep4/scratch/$SLURM_JOB_ACCOUNT -CONTAINER_SCRIPT=$PROJECT_DIR/aifs/run-pytorch/run-pytorch.sh - -#CHANGE THESE: -CONTAINER=$PROJECT_DIR/aifs/container/containers/aifs-met-pytorch-2.2.0-rocm-5.6.1-py3.9-v2.0-new-correct-anemoi-models-sort-vars.sif -PYTHON_SCRIPT=$PROJECT_DIR/haugenha/anemoi-training-setup/anemoi-training-config/anemoi-training/src/lumi_train.py -VENV=/users/haugenha/work/.venv-anemoi-training - - -module load LUMI/23.09 partition/G - -export SINGULARITYENV_LD_LIBRARY_PATH=/opt/ompi/lib:${EBROOTAWSMINOFIMINRCCL}/lib:/opt/cray/xpmem/2.4.4-2.3_9.1__gff0e1d9.shasta/lib64:${SINGULARITYENV_LD_LIBRARY_PATH} - -# MPI + OpenMP bindings: https://docs.lumi-supercomputer.eu/runjobs/scheduled-jobs/distribution-binding -CPU_BIND="mask_cpu:fe000000000000,fe00000000000000,fe0000,fe000000,fe,fe00,fe00000000,fe0000000000" - -if [[ "$VENV" != "None" && -n "$VENV" ]]; then -# Set this virtual environment - export VIRTUAL_ENV=$VENV - -# Ensure the virtual environment is loaded inside the container - export PYTHONUSERBASE=$VIRTUAL_ENV - export PATH=$PATH:$VIRTUAL_ENV/bin -else - : -fi - -# run run-pytorch.sh in singularity container like recommended -# in LUMI doc: https://lumi-supercomputer.github.io/LUMI-EasyBuild-docs/p/PyTorch -srun --cpu-bind=$CPU_BIND \ - singularity exec -B /pfs:/pfs \ - -B /var/spool/slurmd \ - -B /opt/cray \ - -B /usr/lib64 \ - -B /usr/lib64/libjansson.so.4 \ - $CONTAINER $CONTAINER_SCRIPT $PYTHON_SCRIPT diff --git a/src/anemoi/training/config/data/zarr.yaml b/src/anemoi/training/config/data/zarr.yaml index ba8b1117..1657861f 100644 --- a/src/anemoi/training/config/data/zarr.yaml +++ b/src/anemoi/training/config/data/zarr.yaml @@ -18,23 +18,23 @@ forcing: - "sin_local_time" - "insolation" - "lsm" -#- "sdor" -#- "slor" -#- "z" +- "sdor" +- "slor" +- "z" # features that are only part of the forecast state # but are not used as the input to the model diagnostic: - tp -#- cp +- cp remapped: normalizer: default: "mean-std" min-max: max: -# - "sdor" -# - "slor" -# - "z" + - "sdor" + - "slor" + - "z" none: - "cos_latitude" - "cos_longitude" diff --git a/src/anemoi/training/config/debug_dowa.yaml b/src/anemoi/training/config/debug_dowa.yaml deleted file mode 100644 index 12679aa5..00000000 --- a/src/anemoi/training/config/debug_dowa.yaml +++ /dev/null @@ -1,199 +0,0 @@ -defaults: - - hardware: atos_slurm - - data: zarr - - dataloader: default - - model: graphtransformer - - training: default - - graphs: stretched_grid - - diagnostics: eval_rollout - - override hydra/job_logging: none - - override hydra/hydra_logging: none - - _self_ - -training: -#provide original parent run if resuming (not latest parent run) -# run_id: '' - max_epochs: 75 - multistep_input: 2 - lr: - #increase default learning rate by a factor 8, based on previous experiments - rate: 5e-4 #8 * 0.625e-4 - # rate: 0.4e-5 - # min: 2.4e-6 #8 * 3e-7 - min: 3e-7 - iterations: 150000 -data: - resolution: 5p2.5km - imputer: - mean: - - q_1000 - - w_1000 - - z_1000 - - t_1000 - - u_1000 - - v_1000 - - q_925 - - w_925 - - z_925 - - t_925 - - u_925 - - v_925 - - q_850 - - w_850 - - z_850 - - t_850 - - u_850 - - v_850 - - z_700 - - w_700 - - q_700 - - u_700 - - v_700 - - t_700 - - sst - -dataloader: - limit_batches: - training: 2000 - validation: 700 - training: - cutout: - - dataset: ${hardware.paths.localdata}/${hardware.files.localdata} - start: "2008-02-04" - end: "2009-10-30" - drop: ["sst"] - # missing_dates: ["2009-03-31T18", "2009-01-31T00", "2009-01-31T06", "2009-03-31T00", "2009-01-31T12", "2009-03-31T06", "2009-01-31T18", "2009-03-31T12"] - skip_missing_dates: True - expected_access: 2 - reorder: ['q_50', 'q_100', 'q_150', 'q_200', 'q_250', 'q_300', 'q_400', 'q_500', 'q_600', 'q_700', 'q_850', 'q_925', 'q_1000', 'z_50', 'z_100', 'z_150', 'z_200', 'z_250', 'z_300', 'z_400', 'z_500', 'z_600', 'z_700', 'z_850', 'z_925', 'z_1000', 'sp', 'msl', 't_50', 't_100', 't_150', 't_200', 't_250', 't_300', 't_400', 't_500', 't_600', 't_700', 't_850', 't_925', 't_1000', '2t', 'u_50', 'u_100', 'u_150', 'u_200', 'u_250', 'u_300', 'u_400', 'u_500', 'u_600', 'u_700', 'u_850', 'u_925', 'u_1000', '10u', 'v_50', 'v_100', 'v_150', 'v_200', 'v_250', 'v_300', 'v_400', 'v_500', 'v_600', 'v_700', 'v_850', 'v_925', 'v_1000', '10v', 'w_50', 'w_100', 'w_150', 'w_200', 'w_250', 'w_300', 'w_400', 'w_500', 'w_600', 'w_700', 'w_850', 'w_925', 'w_1000', '2d', 'sdor', 'slor', 'cp', 'z', 'cos_latitude', 'cos_longitude', 'sin_latitude', 'sin_longitude', 'cos_julian_day', 'cos_local_time', 'sin_julian_day', 'sin_local_time', 'insolation', 'tcw', 'skt', 'lsm'] - - dataset: ${hardware.paths.data}/${hardware.files.dataset} - start: "2008-02-04" - end: "2009-10-30" - drop: ["tp"] - missing_dates: ["2009-03-31T18", "2009-01-31T06", "2009-01-31T12", "2009-03-31T06", "2009-01-31T18", "2009-03-31T12"] - skip_missing_dates: True - expected_access: 2 - reorder: ['q_50', 'q_100', 'q_150', 'q_200', 'q_250', 'q_300', 'q_400', 'q_500', 'q_600', 'q_700', 'q_850', 'q_925', 'q_1000', 'z_50', 'z_100', 'z_150', 'z_200', 'z_250', 'z_300', 'z_400', 'z_500', 'z_600', 'z_700', 'z_850', 'z_925', 'z_1000', 'sp', 'msl', 't_50', 't_100', 't_150', 't_200', 't_250', 't_300', 't_400', 't_500', 't_600', 't_700', 't_850', 't_925', 't_1000', '2t', 'u_50', 'u_100', 'u_150', 'u_200', 'u_250', 'u_300', 'u_400', 'u_500', 'u_600', 'u_700', 'u_850', 'u_925', 'u_1000', '10u', 'v_50', 'v_100', 'v_150', 'v_200', 'v_250', 'v_300', 'v_400', 'v_500', 'v_600', 'v_700', 'v_850', 'v_925', 'v_1000', '10v', 'w_50', 'w_100', 'w_150', 'w_200', 'w_250', 'w_300', 'w_400', 'w_500', 'w_600', 'w_700', 'w_850', 'w_925', 'w_1000', '2d', 'sdor', 'slor', 'cp', 'z', 'cos_latitude', 'cos_longitude', 'sin_latitude', 'sin_longitude', 'cos_julian_day', 'cos_local_time', 'sin_julian_day', 'sin_local_time', 'insolation', 'tcw', 'skt', 'lsm'] - start: "2008-02-04" - end: "2009-10-30" - frequency: ${data.frequency} - drop: ["tcw", "t_500", "q_500", "z_500", "u_500", "v_500", "w_500"] - - validation: - cutout: - - dataset: ${hardware.paths.localdata}/${hardware.files.localdata} - start: "2009-12-01" - end: "2009-12-15" - drop: ["sst"] - reorder: ['q_50', 'q_100', 'q_150', 'q_200', 'q_250', 'q_300', 'q_400', 'q_500', 'q_600', 'q_700', 'q_850', 'q_925', 'q_1000', 'z_50', 'z_100', 'z_150', 'z_200', 'z_250', 'z_300', 'z_400', 'z_500', 'z_600', 'z_700', 'z_850', 'z_925', 'z_1000', 'sp', 'msl', 't_50', 't_100', 't_150', 't_200', 't_250', 't_300', 't_400', 't_500', 't_600', 't_700', 't_850', 't_925', 't_1000', '2t', 'u_50', 'u_100', 'u_150', 'u_200', 'u_250', 'u_300', 'u_400', 'u_500', 'u_600', 'u_700', 'u_850', 'u_925', 'u_1000', '10u', 'v_50', 'v_100', 'v_150', 'v_200', 'v_250', 'v_300', 'v_400', 'v_500', 'v_600', 'v_700', 'v_850', 'v_925', 'v_1000', '10v', 'w_50', 'w_100', 'w_150', 'w_200', 'w_250', 'w_300', 'w_400', 'w_500', 'w_600', 'w_700', 'w_850', 'w_925', 'w_1000', '2d', 'sdor', 'slor', 'cp', 'z', 'cos_latitude', 'cos_longitude', 'sin_latitude', 'sin_longitude', 'cos_julian_day', 'cos_local_time', 'sin_julian_day', 'sin_local_time', 'insolation', 'tcw', 'skt', 'lsm'] - - dataset: ${hardware.paths.data}/${hardware.files.dataset} - start: "2009-12-01" - end: "2009-12-15" - reorder: ['q_50', 'q_100', 'q_150', 'q_200', 'q_250', 'q_300', 'q_400', 'q_500', 'q_600', 'q_700', 'q_850', 'q_925', 'q_1000', 'z_50', 'z_100', 'z_150', 'z_200', 'z_250', 'z_300', 'z_400', 'z_500', 'z_600', 'z_700', 'z_850', 'z_925', 'z_1000', 'sp', 'msl', 't_50', 't_100', 't_150', 't_200', 't_250', 't_300', 't_400', 't_500', 't_600', 't_700', 't_850', 't_925', 't_1000', '2t', 'u_50', 'u_100', 'u_150', 'u_200', 'u_250', 'u_300', 'u_400', 'u_500', 'u_600', 'u_700', 'u_850', 'u_925', 'u_1000', '10u', 'v_50', 'v_100', 'v_150', 'v_200', 'v_250', 'v_300', 'v_400', 'v_500', 'v_600', 'v_700', 'v_850', 'v_925', 'v_1000', '10v', 'w_50', 'w_100', 'w_150', 'w_200', 'w_250', 'w_300', 'w_400', 'w_500', 'w_600', 'w_700', 'w_850', 'w_925', 'w_1000', '2d', 'sdor', 'slor', 'cp', 'z', 'cos_latitude', 'cos_longitude', 'sin_latitude', 'sin_longitude', 'cos_julian_day', 'cos_local_time', 'sin_julian_day', 'sin_local_time', 'insolation', 'tcw', 'skt', 'lsm'] - drop: ["tp"] - start: "2009-12-01" - end: "2009-12-20" - frequency: ${data.frequency} - drop: ["tcw", "t_500", "q_500", "z_500", "u_500", "v_500", "w_500"] - - test: - cutout: - - dataset: ${hardware.paths.localdata}/${hardware.files.localdata} - start: "2009-12-01" - end: "2009-12-15" - drop: ["sst"] - reorder: ['q_50', 'q_100', 'q_150', 'q_200', 'q_250', 'q_300', 'q_400', 'q_500', 'q_600', 'q_700', 'q_850', 'q_925', 'q_1000', 'z_50', 'z_100', 'z_150', 'z_200', 'z_250', 'z_300', 'z_400', 'z_500', 'z_600', 'z_700', 'z_850', 'z_925', 'z_1000', 'sp', 'msl', 't_50', 't_100', 't_150', 't_200', 't_250', 't_300', 't_400', 't_500', 't_600', 't_700', 't_850', 't_925', 't_1000', '2t', 'u_50', 'u_100', 'u_150', 'u_200', 'u_250', 'u_300', 'u_400', 'u_500', 'u_600', 'u_700', 'u_850', 'u_925', 'u_1000', '10u', 'v_50', 'v_100', 'v_150', 'v_200', 'v_250', 'v_300', 'v_400', 'v_500', 'v_600', 'v_700', 'v_850', 'v_925', 'v_1000', '10v', 'w_50', 'w_100', 'w_150', 'w_200', 'w_250', 'w_300', 'w_400', 'w_500', 'w_600', 'w_700', 'w_850', 'w_925', 'w_1000', '2d', 'sdor', 'slor', 'cp', 'z', 'cos_latitude', 'cos_longitude', 'sin_latitude', 'sin_longitude', 'cos_julian_day', 'cos_local_time', 'sin_julian_day', 'sin_local_time', 'insolation', 'tcw', 'skt', 'lsm'] - - dataset: ${hardware.paths.data}/${hardware.files.dataset} - start: "2009-12-01" - end: "2009-12-15" - reorder: ['q_50', 'q_100', 'q_150', 'q_200', 'q_250', 'q_300', 'q_400', 'q_500', 'q_600', 'q_700', 'q_850', 'q_925', 'q_1000', 'z_50', 'z_100', 'z_150', 'z_200', 'z_250', 'z_300', 'z_400', 'z_500', 'z_600', 'z_700', 'z_850', 'z_925', 'z_1000', 'sp', 'msl', 't_50', 't_100', 't_150', 't_200', 't_250', 't_300', 't_400', 't_500', 't_600', 't_700', 't_850', 't_925', 't_1000', '2t', 'u_50', 'u_100', 'u_150', 'u_200', 'u_250', 'u_300', 'u_400', 'u_500', 'u_600', 'u_700', 'u_850', 'u_925', 'u_1000', '10u', 'v_50', 'v_100', 'v_150', 'v_200', 'v_250', 'v_300', 'v_400', 'v_500', 'v_600', 'v_700', 'v_850', 'v_925', 'v_1000', '10v', 'w_50', 'w_100', 'w_150', 'w_200', 'w_250', 'w_300', 'w_400', 'w_500', 'w_600', 'w_700', 'w_850', 'w_925', 'w_1000', '2d', 'sdor', 'slor', 'cp', 'z', 'cos_latitude', 'cos_longitude', 'sin_latitude', 'sin_longitude', 'cos_julian_day', 'cos_local_time', 'sin_julian_day', 'sin_local_time', 'insolation', 'tcw', 'skt', 'lsm'] - drop: ["tp"] - frequency: ${data.frequency} - start: "2009-12-01" - end: "2009-12-15" - drop: ["tcw", "t_500", "q_500", "z_500", "u_500", "v_500", "w_500"] - batch_size: - training: 1 - validation: 1 - test: 1 - predict: 1 - -hardware: - paths: - data: /home/mlx/ai-ml/datasets/ - checkpoints: ${oc.env:SCRATCH}/aifs-output/${data.resolution}/checkpoint/ - localdata: /ec/res4/scratch/nld1247/output - # checkpoint: /hpcperm/nld1247/checkpoint_metno - # checkpoints: - # parent: ${oc.env:SCRATCH}/aifs-output/${data.resolution}/checkpoint/ - files: - dataset: aifs-ea-an-oper-0001-mars-o96-1979-2022-6h-v6.zarr -# dataset: aifs-ea-an-oper-0001-mars-n320-1979-2022-6h-v6.zarr - dataset_lam: dowa_2008-2012.zarr - localdata: dowa_2008-2012.zarr - warm_start: aifs-by_time-epoch_049-step_098709.ckpt - num_gpus_per_model: 4 - -diagnostics: - log: - interval: 100 - mlflow: - enabled: True - offline: False - experiment_name: 'sophie' - wandb: - enabled: False - plot: - enabled: True - learned_features: False - parameters: - # - z_500 - - t_850 - - u_850 - - v_850 - - 2t - - sp - - msl - parameters_histogram: [] - # - z_500 - # - 2t - parameters_spectrum: [] - print_memory_summary: False - show_entire_globe: False - -graphs: - save_graph_plots: False - clobber: True - data_mesh: - - name: meps - from: zarr - dataset: ${dataloader.training} - node_weights_type: area # options: area, uniform - node_weights_norm: unit-max # options: l1, l2, unit-max, unit-sum, unit-std - hidden_mesh: - resolution: [5,9] - padding: 16 # km of padding in processor mesh around refined limited areas. Some padding needed with knn encoder to avoid grid orphans - limited_areas: #list of paths to limited areas to fetch lat/lon from - - ${hardware.paths.localdata}/${hardware.files.localdata} - encoders: - - src_mesh: meps - method: knn # options: knn, cutoff - nearest_neighbours: 10 # only for knn method - add_directional_features: True - weight_norm: global-max - decoders: - - dst_mesh: meps - method: knn # options: knn, cutoff - nearest_neighbours: 3 # only for knn method - add_directional_features: True - weight_norm: global-max - -model: - run_id: - trainable_parameters: - data: 0 - hidden: 0 - data2hidden: 0 - hidden2data: 0 - hidden2hidden: 0 - num_channels: 256 diff --git a/src/anemoi/training/config/diagnostics/eval_rollout.yaml b/src/anemoi/training/config/diagnostics/eval_rollout.yaml index 2d033960..a8e9a845 100644 --- a/src/anemoi/training/config/diagnostics/eval_rollout.yaml +++ b/src/anemoi/training/config/diagnostics/eval_rollout.yaml @@ -5,7 +5,7 @@ eval: rollout: 12 frequency: 20 plot: - enabled: False + enabled: True asynchronous: True frequency: 750 sample_idx: 0 @@ -20,7 +20,7 @@ plot: - 10v - sp - tp -# - cp + - cp #Defining the accumulation levels for precipitation related fields and the colormap accumulation_levels_plot: [0, 0.05, 0.1, 0.25, 0.5, 1, 1.5, 2, 3, 4, 5, 6, 7, 100] # in mm cmap_accumulation: ["#ffffff", "#04e9e7", "#019ff4", "#0300f4", "#02fd02", "#01c501", "#008e00", "#fdf802", "#e5bc00", "#fd9500", "#fd0000", "#d40000", "#bc0000", "#f800fd"] @@ -81,6 +81,7 @@ log: log_model: False project: 'Anemoi' entity: ??? + # logger options (these probably come with some overhead) gradients: False parameters: False tensorboard: diff --git a/src/anemoi/training/config/stretched_grid.yaml b/src/anemoi/training/config/stretched_grid.yaml deleted file mode 100644 index f2bd9548..00000000 --- a/src/anemoi/training/config/stretched_grid.yaml +++ /dev/null @@ -1,91 +0,0 @@ -defaults: - - data: zarr - - dataloader: native_grid - - diagnostics: eval_rollout - - hardware: slurm - - graph: stretched_grid - - model: graphtransformer - - training: default - - _self_ - -dataloader: - num_workers: - training: 2 - validation: 2 - test: 2 - predict: 2 - batch_size: - training: 1 - validation: 1 - test: 1 - predict: 1 - - dataset: - cutout: - - dataset: ${hardware.paths.data}/${hardware.files.dataset_lam} - - dataset: ${hardware.paths.data}/${hardware.files.dataset} - adjust: all - - limit_batches: - training: 20 - validation: 20 - - training: - start: 2020-02-05 - end: 2022-05-31 #15 - #drop: [sdor, slor, cp] #, u_600, v_600, z_600, t_600, q_600, w_600] - statistics: ${hardware.paths.data}/ERA5/aifs-od-an-oper-0001-mars-n320-2019-2023-6h-v6.zarr -# sort_vars: True - validation: - start: 2022-06-01 - end: 2023-05-31 - #drop: [sdor, slor, cp] #, u_600, v_600, z_600, t_600, q_600, w_600] - statistics: ${hardware.paths.data}/ERA5/aifs-od-an-oper-0001-mars-n320-2019-2023-6h-v6.zarr -# sort_vars: True - test: - start: 2022-06-01 - end: 2023-05-31 - #drop: [sdor, slor, cp] #, u_600, v_600, z_600, t_600, q_600, w_600] - statistics: ${hardware.paths.data}/ERA5/aifs-od-an-oper-0001-mars-n320-2019-2023-6h-v6.zarr -# sort_vars: True - -hardware: #change these to lumi paths - num_gpus_per_node: 8 - num_nodes: 1 - num_gpus_per_model: 1 - paths: - data: /pfs/lustrep4/scratch/project_465001383/aifs/dataset/ -# output_base: /pfs/lustrep4/scratch/project_465000899/aifs/experiments/ #/lustre/storeB/project/nwp/aifs/test_output/ - output: /pfs/lustrep4/scratch/project_465001383/aifs/experiments/test-anemoi-training/ #do not change this, it will be modified in code to be output_base + run_id. - graph: /pfs/lustrep4/scratch/project_465001383/aifs/graphs/ #/lustre/storeB/project/nwp/aifs/test_graphs/ - files: - dataset: ERA5/aifs-od-an-oper-0001-mars-o96-2016-2023-6h-v6.zarr #aifs-od-an-oper-0001-mars-o96-2016-2023-6h-v6.zarr - dataset_lam: MEPS/aifs-meps-10km-2020-2024-6h-v6.zarr - graph: test-anemoi-training.pt - warm_start: null #specific checkpoint to start from, defaults to last.ckpt - -data: - resolution: None - -model: - num_channels: 512 - trainable_parameters: - data: 0 - hidden: 0 - data2hidden: 0 - hidden2data: 0 - hidden2hidden: 0 # GNN and GraphTransformer Processor only - -graphs: - output_path: ${hardware.paths.graph}${hardware.files.graph} - save_graph_plots: False - -training: - run_id: null #path to store the experiment in with output_base as root, null for random name, =fork_run_id to continue training in the same folder. - fork_run_id: null #path to the experiment to fork from with output_base as root - load_weights_only: False #loads entire model if False, loads only weights if True - max_epochs: 50 - lr: - rate: 5.0e-6 - iterations: 10000 - min: 8.0e-6 diff --git a/src/anemoi/training/config/training/default.yaml b/src/anemoi/training/config/training/default.yaml index 6bd90dbf..870eeb7a 100644 --- a/src/anemoi/training/config/training/default.yaml +++ b/src/anemoi/training/config/training/default.yaml @@ -71,7 +71,7 @@ loss_scaling: 10v: 0.1 2d: 0.5 tp: 0.025 -# cp: 0.0025 + cp: 0.0025 metrics: - z_500 diff --git a/src/lumi_train.py b/src/lumi_train.py deleted file mode 100644 index c058e854..00000000 --- a/src/lumi_train.py +++ /dev/null @@ -1,18 +0,0 @@ -# (C) Copyright 2024 Anemoi contributors -# This software is licensed under the terms of the Apache Licence Version 2.0 -# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. -# In applying this licence, ECMWF does not waive the privileges and immunities -# granted to it by virtue of its status as an intergovernmental organisation -# nor does it submit to any jurisdiction. - -from hydra import compose -from hydra import initialize - -from anemoi.training.train.train import AnemoiTrainer - -with initialize(version_base=None, config_path="anemoi/training/config"): - config = compose(config_name="stretched_grid") - -T = AnemoiTrainer(config) - -T.train()