#!/bin/sh #< 2 nodes, 2 MPI processes per node, 6 OpenMP threads per MPI process #SBATCH --partition=cpu #SBATCH --qos=cpu #SBATCH --job-name="mpi_hello-GNU_compiler" #SBATCH --output=%x.o%j ##SBATCH --error=%x.e%j #If error is not specified stderr is redirected to stdout #SBATCH --nodes=2 #SBATCH --ntasks-per-node=2 #< Number of MPI task per node #SBATCH --cpus-per-task=6 #< Number of OpenMP threads per MPI task ## Uncomment the following line if you need an amount of memory other than default (512MB) ##SBATCH --mem=2G ## Uncomment the following line if your job needs a wall clock time other than default (1 hour) ##SBATCH --time=0-00:30:00 ## Please note that priority of queued job decreases as requested time increases ## Uncomment the following line if you want to use an account other than your default account ##SBATCH --account= echo "# SLURM_JOB_NODELIST : $SLURM_JOB_NODELIST" echo "# SLURM_CPUS_PER_TASK: $SLURM_CPUS_PER_TASK" export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK module load gnu openmpi mpicc -fopenmp mpiomp_hello.c -o mpiomp_hello mpirun mpiomp_hello ## Uncomment the following lines to compile and run with the INTEL compiler #module load intel impi #mpicc -qopenmp mpiomp_hello.c -o mpiomp_hello_intel #mpirun mpiomp_hello_intel