Differences
This shows you the differences between two versions of the page.
Both sides previous revision Previous revision Next revision | Previous revision | ||
doku:multimpi [2021/04/01 07:03] – markus | doku:multimpi [2023/03/14 12:53] (current) – [VSC-3] goldenberg | ||
---|---|---|---|
Line 5: | Line 5: | ||
==== VSC-4 ===== | ==== VSC-4 ===== | ||
- | Sample Job when for running multiple mpi jobs on a VSC-4 node. Note: | + | Sample Job when for running multiple mpi jobs on a VSC-4 node. |
+ | |||
+ | Note: The " | ||
< | < | ||
- | # | + | mem_per_task |
- | #SBATCH -J many | + | |
- | #SBATCH -N 1 | + | |
- | + | ||
- | export SLURM_STEP_GRES=none | + | |
- | + | ||
- | mytasks=4 | + | |
- | cmd=" | + | |
- | mem_per_task=10G | + | |
- | + | ||
- | for i in `seq 1 $mytasks` | + | |
- | do | + | |
- | srun --mem=$mem_per_task --cpus-per-task=2 --ntasks=1 $cmd & | + | |
- | done | + | |
- | wait | + | |
</ | </ | ||
- | ==== VSC-3 ===== | + | The approx 2Gb reduction in available memory is due to operating system stored in memory. For a standard node with 96 Gb of Memory this would be eg.: |
- | === With srun: === | ||
< | < | ||
- | # | + | 23 Gb * 4 = 92 Gb < 94 Gb |
- | #SBATCH -J test | + | |
- | #SBATCH -N 1 | + | |
- | #SBATCH --ntasks-per-core=1 | + | |
- | #SBATCH --ntasks-per-node=2 | + | |
- | + | ||
- | export SLURM_STEP_GRES=none | + | |
- | + | ||
- | module load intel/18 intel-mpi/ | + | |
- | + | ||
- | for i in 0 8 | + | |
- | do | + | |
- | j=$(($i+1)) | + | |
- | srun -n 2 --cpu_bind=map_cpu: | + | |
- | done | + | |
- | wait | + | |
- | + | ||
- | exit 0 | + | |
</ | </ | ||
- | |||
- | === With mpirun (Intel MPI): === | ||
< | < | ||
#!/bin/bash | #!/bin/bash | ||
- | #SBATCH -J test | + | #SBATCH -J many |
#SBATCH -N 1 | #SBATCH -N 1 | ||
- | #SBATCH --ntasks-per-core=1 | ||
- | #SBATCH --ntasks-per-node=2 | ||
export SLURM_STEP_GRES=none | export SLURM_STEP_GRES=none | ||
- | module load intel/18 intel-mpi/2018 | + | mytasks=4 |
+ | cmd=" | ||
+ | mem_per_task=10G | ||
- | for i in 0 8 | + | for i in `seq 1 $mytasks` |
do | do | ||
- | j=$(($i+1)) | + | srun --mem=$mem_per_task |
- | mpirun | + | |
done | done | ||
wait | wait | ||
- | exit 0 | ||
- | </ | ||
- | |||
- | You can download the C code example here: {{ : | ||
- | |||
- | Compile it e.g. with: | ||
- | |||
- | < | ||
- | # module load intel/18 intel-mpi/ | ||
- | # mpiicc -lhwloc hello_world.c -o hello_world_intelmpi2018 | ||
</ | </ | ||