Differences
This shows you the differences between two versions of the page.
Both sides previous revision Previous revision Next revision | Previous revision Next revisionBoth sides next revision | ||
doku:comsol [2021/10/22 09:46] – [Possible IO-Error] ir | doku:comsol [2024/04/03 07:55] – [Job script] amelic | ||
---|---|---|---|
Line 1: | Line 1: | ||
====== COMSOL ====== | ====== COMSOL ====== | ||
- | The following case is provided here including the directories-structure\\ | + | The following case is provided here including the directories-structure and the appropriate batch-file: {{ : |
- | and the appropriate batch-file: {{ : | + | |
===== Module ===== | ===== Module ===== | ||
Line 9: | Line 9: | ||
< | < | ||
- | module avail 2>&1 | grep -i comsol | + | module avail Comsol |
</ | </ | ||
- | Currently on VSC-4, these versions can be loaded: | + | Currently on VSC-4 and VSC-5, these versions can be loaded: |
* Comsol/5.5 | * Comsol/5.5 | ||
* Comsol/5.6 | * Comsol/5.6 | ||
+ | * Comsol/6.1 | ||
< | < | ||
Line 36: | Line 37: | ||
#SBATCH --nodes=1 | #SBATCH --nodes=1 | ||
- | #SBATCH --ntasks-per-node=24 | + | #SBATCH --ntasks-per-node=4 |
#SBATCH --job-name=" | #SBATCH --job-name=" | ||
- | #SBATCH --partition=mem_0384 | + | #SBATCH --partition=zen3_0512 |
- | #SBATCH --qos=mem_0384 | + | #SBATCH --qos=zen3_0512_devel |
+ | |||
+ | export I_MPI_PIN_RESPECT_CPUSET=0 | ||
+ | export I_MPI_PIN_PROCESSOR_LIST=0-3 | ||
module purge | module purge | ||
- | module load Comsol/5.6 | + | module load intel-mpi/2021.5.0 |
+ | module load Comsol/6.1 | ||
MODELTOCOMPUTE=" | MODELTOCOMPUTE=" | ||
Line 59: | Line 64: | ||
echo "and the usual slurm...out" | echo "and the usual slurm...out" | ||
- | # COMSOL' | + | # Example |
- | comsol | + | |
+ | comsol | ||
+ | |||
</ | </ | ||
Line 79: | Line 87: | ||
==== Using a shared node ==== | ==== Using a shared node ==== | ||
- | If your case isn't that demanding | + | If your case isn't that demanding |
- | On these nodes you have to tell SLURM, how much memory (RAM) your case would need. This value should be less than the maximum of 96GB these nodes uses. Otherwise your job needs a whole node anyway. | + | |
+ | On these nodes you have to tell SLURM, | ||
Here we use --mem=20G, to dedicate 20GB of memory. | Here we use --mem=20G, to dedicate 20GB of memory. | ||
Line 90: | Line 99: | ||
#SBATCH --ntasks-per-node=1 | #SBATCH --ntasks-per-node=1 | ||
#SBATCH --job-name=" | #SBATCH --job-name=" | ||
- | #SBATCH --qos=mem_0096 | + | #SBATCH --qos=skylake_0096 |
#SBATCH --mem=20G | #SBATCH --mem=20G | ||