#!/bin/bash --login #SBATCH --job-name=mdrun_mpi_omp #SBATCH --output=%x.o%j #SBATCH --error=%x.e%j #SBATCH --nodes=1 #SBATCH --ntasks-per-node=6 #SBATCH --exclusive #SBATCH --time=0-24:00:00 #SBATCH --mem=120G #SBATCH --partition=cpu #SBATCH --qos=cpu #SBATCH --account= shopt -q login_shell || exit 1 test -n "$SLURM_NODELIST" || exit 1 module load gnu module load openmpi module load gromacs/5.1.5-cpu # Set OMP_NUM_THREADS to the same value as --cpus-per-task with a fallback in # case it isn't set. SLURM_CPUS_PER_TASK is set to the value of --cpus-per-task, # but only if --cpus-per-task is explicitly set. if [ -n "$SLURM_CPUS_PER_TASK" ]; then OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK else if [ -n "$SLURM_NTASKS_PER_NODE" ]; then OMP_NUM_THREADS=$((SLURM_CPUS_ON_NODE/SLURM_NTASKS_PER_NODE)) else if [ -n "$SLURM_NTASKS" ] && [ -n "$SLURM_NNODES" ]; then OMP_NUM_THREADS=$((SLURM_CPUS_ON_NODE/(SLURM_NTASKS/SLURM_NNODES))) else OMP_NUM_THREADS=1 fi fi fi export OMP_NUM_THREADS mpirun gmx mdrun -deffnm topology -pin on