diff --git a/conf/common/build.yml b/conf/common/build.yml index 8f4e190796cb6c6167930f15645765adecd8a57a..9875df32417fb2fc99f5dd38a437bc3945d3eccf 100644 --- a/conf/common/build.yml +++ b/conf/common/build.yml @@ -1,20 +1,24 @@ spack: - #url: git@gitlab.physik.uni-muenchen.de:LDAP_rbg/spack.git - #branch: lmu/ubuntu20.04-icon - #compiler: gcc@11.3.0 - init: "" # command to load spack environment, e.g. module load spack, use spack/setup-env.sh if empty - url: https://github.com/spack/spack.git # url to download spack if necessary - branch: develop # if downloaded, branch name to use - externals: "" # list of packages we try to find with spack external find - compiler: "gcc@12.2.0" # desired compiler for spack - root: "%HPCROOTDIR%/spack" # path to a spack install, will be downloaded to if not present - user_cache_path: "%HPCROOTDIR%/spack_user_cache_path" # spack puts data here when bootstrapping, leave empty to use home folder + init: "" # command to load spack environment, e.g. module load spack, + # use spack/setup-env.sh if empty + url: https://github.com/spack/spack.git # url to download spack if necessary + branch: develop # if downloaded, branch name to use + externals: "" # list of packages we try to find with spack external find + # In LMU we need to include slurm to this list. + compiler: "gcc@12.2.0" # desired compiler for spack + root: "%HPCROOTDIR%/spack" # path to a spack install, will be downloaded to if not present + user_cache_path: "%HPCROOTDIR%/spack_user_cache_path" # spack puts data here when bootstrapping, leave empty to use home folder user_config_path: "%HPCROOTDIR%/spack_user_config_path" # spack puts data here when bootstrapping, leave empty to use home folder - disable_local_config: false # if true, spack installs into spack source dir + disable_local_config: false # if true, spack installs into spack source dir icon: - build_cmd: "icon-nwp@%ICON.VERSION%% %SPACK.COMPILER% ^openmpi+pmi+legacylaunchers schedulers=slurm fabrics=ucx ucx+dc+dm+ib_hw_tm+mlx5_dv+rc+rdmacm+thread_multiple+ud+verbs" - version: master + # The command that will be used to build icon-nwp with spack + build_cmd: "icon-nwp@%ICON.VERSION%% %SPACK.COMPILER%" + # In LRZ we used the following command to point to a specific versio of openmpi: + # build_cmd: "icon-nwp@%ICON.VERSION%% %SPACK.COMPILER% ^openmpi/amct7nx" + # In LMU we used the following command to build the appropriate a version of openmpi works with slurm: + # build_cmd: "icon-nwp@%ICON.VERSION%% %SPACK.COMPILER% ^openmpi+pmi+legacylaunchers schedulers=slurm fabrics=ucx ucx+dc+dm+ib_hw_tm+mlx5_dv+rc+rdmacm+thread_multiple+ud+verbs" + version: master # The latest release at the moment of creating this file was 2.6.5-nwp0 python_environment: # Name of the virtual environment in the remote platform experiment folder @@ -23,9 +27,7 @@ python_environment: requirements: # Because there's an issue with numba, for now we need to keep a specific version of numpy - numpy==1.23 + # Using enstools-compression to compress the outputs. - enstools-compression - # Just to try a library from a git repository. - - git+https://gitlab.physik.uni-muenchen.de/Oriol.Tinto/otils.git + # We are using the f90nml library to manipulate name lists. - f90nml - -LOGIN: "LOGIN" diff --git a/conf/common/icon.yml b/conf/common/icon.yml deleted file mode 100644 index 387ab0d43b2c651198891dddc005db4ff12dce65..0000000000000000000000000000000000000000 --- a/conf/common/icon.yml +++ /dev/null @@ -1,2 +0,0 @@ -icon: - version: 2.6.5-nwp0 diff --git a/conf/common/platforms.yml b/conf/common/platforms.yml index 2452b8ac5af53699b031f78a3be00453ad9f4ed4..a1ebe8ee80a73fa569066038761f60fab61f36ba 100644 --- a/conf/common/platforms.yml +++ b/conf/common/platforms.yml @@ -1,32 +1,54 @@ +UserInformation: + LMU: + user: FILL_USER + host: FILL_HOST + project: FILL_PROJECT + scratch: FILL_SCRATCH + LRZ: + user: FILL_USER + host: FILL_HOST + project: FILL_PROJECT + scratch: FILL_SCRATCH + Platforms: LMU: TYPE: slurm - HOST: FILL_HOST - PROJECT: FILL_PROJECT - USER: FILL_USER - SCRATCH_DIR: FILL_SCRATCH + HOST: %UserInformation.LMU.host% + PROJECT: %UserInformation.LMU.project% + USER: %UserInformation.LMU.project% + SCRATCH_DIR: %UserInformation.LMU.scratch% ADD_PROJECT_TO_HOST: False MAX_WALLCLOCK: '48:00' TEMP_DIR: '' - # CUSTOM_DIRECTIVES: ["--export=ALL,OMPI_MCA_btl_tcp_if_include=\"10.0.0.0/8\""] CUSTOM_DIRECTIVES: "#SBATCH --export=ALL,OMPI_MCA_btl_tcp_if_include=10.0.0.0/8" - LOGIN: + + LMU_LOGIN: TYPE: ps - HOST: FILL_HOST - PROJECT: FILL_PROJECT - USER: FILL_USER - SCRATCH_DIR: FILL_SCRATCH + HOST: %UserInformation.LMU.host% + PROJECT: %UserInformation.LMU.project% + USER: %UserInformation.LMU.project% + SCRATCH_DIR: %UserInformation.LMU.scratch% ADD_PROJECT_TO_HOST: False MAX_WALLCLOCK: '48:00' TEMP_DIR: '' LRZ: - # TYPE: slurm + TYPE: slurm + HOST: %UserInformation.LRZ.host% + PROJECT: %UserInformation.LRZ.project% + USER: %UserInformation.LRZ.project% + SCRATCH_DIR: %UserInformation.LRZ.scratch% + ADD_PROJECT_TO_HOST: False + MAX_WALLCLOCK: '48:00' + TEMP_DIR: '' + + LRZ_LOGIN: TYPE: ps - HOST: FILL_HOST - PROJECT: FILL_PROJECT - USER: FILL_USER - SCRATCH_DIR: FILL_SCRATCH + HOST: %UserInformation.LRZ.host% + PROJECT: %UserInformation.LRZ.project% + USER: %UserInformation.LRZ.project% + SCRATCH_DIR: %UserInformation.LRZ.scratch% ADD_PROJECT_TO_HOST: False MAX_WALLCLOCK: '48:00' TEMP_DIR: '' + diff --git a/conf/real-from-dwd-ana/jobs.yml b/conf/real-from-dwd-ana/jobs.yml index 594eba29f14ab552f28c02329afc9ecd5fc86a0b..04e57d429e8dee534b1b7e7c51edc9c279a06378 100644 --- a/conf/real-from-dwd-ana/jobs.yml +++ b/conf/real-from-dwd-ana/jobs.yml @@ -1,61 +1,4 @@ -# Example job with all options specified JOBS: - ## Job name - # JOBNAME: - ## Script to execute. If not specified, job will be omitted from workflow. "You can also specify additional files separated by a ",". - # Note: The post-processed additional_files will be sent to %HPCROOT%/LOG_%EXPID% - ## Path relative to the project directory - # FILE: - ## Platform to execute the job. If not specified, defaults to HPCARCH in expedf file. - ## LOCAL is always defined and refers to current machine - # PLATFORM: - ## Queue to add the job to. If not specified, uses PLATFORM default. - # QUEUE: - ## Defines dependencies from job as a list of parents jobs separated by spaces. - ## Dependencies to jobs in previous chunk, member o startdate, use -(DISTANCE) - # DEPENDENCIES:INI SIM-1 CLEAN-2 - ## Define if jobs runs once, once per stardate, once per member or once per chunk. Options: once, date, member, chunk. - ## If not specified, defaults to once - # RUNNING:once - ## Specifies that job has only to be run after X dates, members or chunk. A job will always be created for the last - ## If not specified, defaults to 1 - # FREQUENCY:3 - ## On a job with FREQUENCY > 1, if True, the dependencies are evaluated against all - ## jobs in the frequency interval, otherwise only evaluate dependencies against current - ## iteration. - ## If not specified, defaults to True - # WAIT:False - ## Defines if job is only to be executed in reruns. If not specified, defaults to false. - # RERUN_ONLY:False - ## Wallclock to be submitted to the HPC queue in format HH:MM - # WALLCLOCK:00:05 - - ## Processors number to be submitted to the HPC. If not specified, defaults to 1. - ## Wallclock chunk increase (WALLCLOCK will be increased according to the formula WALLCLOCK + WCHUNKINC * (chunk - 1)). - ## Ideal for sequences of jobs that change their expected running time according to the current chunk. - # WCHUNKINC: 00:01 - # PROCESSORS: 1 - ## Threads number to be submitted to the HPC. If not specified, defaults to 1. - # THREADS: 1 - ## Enables hyper-threading. If not specified, defaults to false. - # HYPERTHREADING: false - ## Tasks number to be submitted to the HPC. If not specified, defaults to 1. - # Tasks: 1 - ## Memory requirements for the job in MB - # MEMORY: 4096 - ## Number of retrials if a job fails. If not specified, defaults to the value given on experiment's autosubmit.yml - # RETRIALS: 4 - ## Allows to put a delay between retries, of retrials if a job fails. If not specified, it will be static - # DELAY_RETRY_TIME: 11 - # DELAY_RETRY_TIME: +11 # will wait 11,22,33,44... - # DELAY_RETRY_TIME: *11 # will wait 11,110,1110,11110... - ## Some jobs can not be checked before running previous jobs. Set this option to false if that is the case - # CHECK: False - ## Select the interpreter that will run the job. Options: bash, python, r Default: bash - # TYPE: bash - ## Specify the path to the interpreter. If empty, use system default based on job type . Default: empty - # EXECUTABLE: /my_python_env/python3 - TRANSFER_PROJECT: FILE: templates/common/transfer_project.sh PLATFORM: LOCAL @@ -65,7 +8,7 @@ JOBS: DEPENDENCIES: TRANSFER_PROJECT WALLCLOCK: 04:00 PROCESSORS: 16 - RETRIALS: 2 # retry beause spack downloads sometimes timeout + RETRIALS: 2 # retry because spack downloads sometimes timeout NODES: 1 BUILD_PYTHON_ENVIRONMENT: @@ -100,8 +43,8 @@ JOBS: RUNNING: member WALLCLOCK: 01:00 - PREPARE_CHUNK: - FILE: templates/real-from-dwd-ana/prepare_chunk.py + PREPARE_NAMELIST: + FILE: templates/real-from-dwd-ana/prepare_namelist.py DEPENDENCIES: TRANSFER_PROJECT BUILD_PYTHON_ENVIRONMENT PREPARE_MEMBER RUN_ICON-1 WALLCLOCK: 00:05 RUNNING: chunk @@ -112,12 +55,12 @@ JOBS: RUN_ICON: FILE: templates/common/run_icon.sh - DEPENDENCIES: PREPARE_CHUNK + DEPENDENCIES: PREPARE_NAMELIST WALLCLOCK: 04:00 RUNNING: chunk PROCESSORS: 64 MEMORY: 81920 - CUSTOM_DIRECTIVES: [ "#SBATCH --exclusive"] + CUSTOM_DIRECTIVES: [ "#SBATCH --exclusive" ] COMPRESS: FILE: templates/common/compress.py diff --git a/conf/real-from-ideal/jobs.yml b/conf/real-from-ideal/jobs.yml index 46473987aa0aaf0dc6c708e770ca4a12fc4886b6..a098d920334182e985989dfe0b8d87466e36293b 100644 --- a/conf/real-from-ideal/jobs.yml +++ b/conf/real-from-ideal/jobs.yml @@ -1,60 +1,4 @@ -# Example job with all options specified JOBS: - ## Job name - # JOBNAME: - ## Script to execute. If not specified, job will be omitted from workflow. "You can also specify additional files separated by a ",". - # Note: The post-processed additional_files will be sent to %HPCROOT%/LOG_%EXPID% - ## Path relative to the project directory - # FILE: - ## Platform to execute the job. If not specified, defaults to HPCARCH in expedf file. - ## LOCAL is always defined and refers to current machine - # PLATFORM: - ## Queue to add the job to. If not specified, uses PLATFORM default. - # QUEUE: - ## Defines dependencies from job as a list of parents jobs separated by spaces. - ## Dependencies to jobs in previous chunk, member o startdate, use -(DISTANCE) - # DEPENDENCIES:INI SIM-1 CLEAN-2 - ## Define if jobs runs once, once per stardate, once per member or once per chunk. Options: once, date, member, chunk. - ## If not specified, defaults to once - # RUNNING:once - ## Specifies that job has only to be run after X dates, members or chunk. A job will always be created for the last - ## If not specified, defaults to 1 - # FREQUENCY:3 - ## On a job with FREQUENCY > 1, if True, the dependencies are evaluated against all - ## jobs in the frequency interval, otherwise only evaluate dependencies against current - ## iteration. - ## If not specified, defaults to True - # WAIT:False - ## Defines if job is only to be executed in reruns. If not specified, defaults to false. - # RERUN_ONLY:False - ## Wallclock to be submitted to the HPC queue in format HH:MM - # WALLCLOCK:00:05 - - ## Processors number to be submitted to the HPC. If not specified, defaults to 1. - ## Wallclock chunk increase (WALLCLOCK will be increased according to the formula WALLCLOCK + WCHUNKINC * (chunk - 1)). - ## Ideal for sequences of jobs that change their expected running time according to the current chunk. - # WCHUNKINC: 00:01 - # PROCESSORS: 1 - ## Threads number to be submitted to the HPC. If not specified, defaults to 1. - # THREADS: 1 - ## Enables hyper-threading. If not specified, defaults to false. - # HYPERTHREADING: false - ## Tasks number to be submitted to the HPC. If not specified, defaults to 1. - # Tasks: 1 - ## Memory requirements for the job in MB - # MEMORY: 4096 - ## Number of retrials if a job fails. If not specified, defaults to the value given on experiment's autosubmit.yml - # RETRIALS: 4 - ## Allows to put a delay between retries, of retrials if a job fails. If not specified, it will be static - # DELAY_RETRY_TIME: 11 - # DELAY_RETRY_TIME: +11 # will wait 11,22,33,44... - # DELAY_RETRY_TIME: *11 # will wait 11,110,1110,11110... - ## Some jobs can not be checked before running previous jobs. Set this option to false if that is the case - # CHECK: False - ## Select the interpreter that will run the job. Options: bash, python, r Default: bash - # TYPE: bash - ## Specify the path to the interpreter. If empty, use system default based on job type . Default: empty - # EXECUTABLE: /my_python_env/python3 TRANSFER_PROJECT: FILE: templates/common/transfer_project.sh PLATFORM: LOCAL @@ -64,7 +8,7 @@ JOBS: DEPENDENCIES: TRANSFER_PROJECT WALLCLOCK: 04:00 PROCESSORS: 16 - RETRIALS: 2 # retry beause spack downloads sometimes timeout + RETRIALS: 2 # retry because spack downloads sometimes timeout NODES: 1 BUILD_PYTHON_ENVIRONMENT: @@ -128,8 +72,8 @@ JOBS: WALLCLOCK: 01:00 PLATFORM: "%DEFAULT.HPCARCH%_LOGIN" - PREPARE_CHUNK: - FILE: templates/real-from-ideal/prepare_chunk.py + PREPARE_NAMELIST: + FILE: templates/real-from-ideal/prepare_namelist.py DEPENDENCIES: TRANSFER_PROJECT BUILD_PYTHON_ENVIRONMENT PREPARE_MEMBER RUN_ICON-1 WALLCLOCK: 00:05 RUNNING: chunk @@ -139,7 +83,7 @@ JOBS: RUN_ICON: FILE: templates/common/run_icon.sh - DEPENDENCIES: PREPARE_CHUNK + DEPENDENCIES: PREPARE_NAMELIST WALLCLOCK: 01:00 RUNNING: chunk PROCESSORS: 16 diff --git a/templates/real-from-dwd-ana/prepare_chunk.py b/templates/real-from-dwd-ana/prepare_namelist.py similarity index 100% rename from templates/real-from-dwd-ana/prepare_chunk.py rename to templates/real-from-dwd-ana/prepare_namelist.py diff --git a/templates/real-from-ideal/prepare_chunk.py b/templates/real-from-ideal/prepare_namelist.py similarity index 100% rename from templates/real-from-ideal/prepare_chunk.py rename to templates/real-from-ideal/prepare_namelist.py