#!/bin/bash --login #SBATCH --job-name=mdrun_omp_cuda #SBATCH --output=%x.o%j #SBATCH --error=%x.e%j #SBATCH --ntasks=1 #SBATCH --cpus-per-task=4 #SBATCH --gres=gpu:1 #SBATCH --time=0-24:00:00 #SBATCH --mem=16G #SBATCH --partition=gpu #SBATCH --qos=gpu #SBATCH --account= shopt -q login_shell || exit 1 test -n "$SLURM_NODELIST" || exit 1 module load gnu module load openmpi module load gromacs/2016.6-gpu # Set OMP_NUM_THREADS to the same value as --cpus-per-task with a fallback in # case it isn't set. SLURM_CPUS_PER_TASK is set to the value of --cpus-per-task, # but only if --cpus-per-task is explicitly set. export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK:-1} gmx mdrun -deffnm topology -pin on -dlb auto -plumed plumed.dat