calcoloscientifico:userguide:gromacs
Differenze
Queste sono le differenze tra la revisione selezionata e la versione attuale della pagina.
Entrambe le parti precedenti la revisioneRevisione precedenteProssima revisione | Revisione precedente | ||
calcoloscientifico:userguide:gromacs [21/09/2023 18:03] – fabio.spataro | calcoloscientifico:userguide:gromacs [25/09/2023 13:12] (versione attuale) – fabio.spataro | ||
---|---|---|---|
Linea 21: | Linea 21: | ||
^ Gromacs | ^ Gromacs | ||
- | | 4.5.7 | 2.3.2 | GNU 5.4.0 | OpenMPI 1.10.7 | + | | 4.5.7 | |
- | | 5.1.4 | 2.3.2 | GNU 5.4.0 | OpenMPI 1.10.7 | + | | 5.1.4 | 2.3.8 | GNU 5.4.0 | OpenMPI 1.10.7 |
- | | 5.1.5 | 2.3.2 | GNU 5.4.0 | OpenMPI 1.10.7 | + | | 5.1.5 | 2.3.8 | GNU 5.4.0 | OpenMPI 1.10.7 |
- | | 2016.4 | + | | 2016.4 |
- | | 2016.6 | + | | 2016.6 |
- | | 2018.6 | + | | 2018.6 |
- | | 2018.6 | + | | 2018.6 |
- | | 2018.8 | + | | 2018.8 |
- | | 2018.8 | + | | 2018.8 |
- | | 2019.4 | + | | 2019.4 |
- | | 2019.4 | + | | 2019.4 |
- | | 2019.6 | + | | 2019.6 |
- | | 2019.6 | + | | 2019.6 |
- | | 2020.7 | + | | 2020.7 |
- | | 2021.4 | + | | 2021.4 |
- | | 2021.7 | + | | 2021.7 |
| 2022.6 | | 2022.6 | ||
Linea 61: | Linea 61: | ||
| 2016.4 | | 2016.4 | ||
| 2016.4 | | 2016.4 | ||
+ | | 2016.4 | ||
| 2016.6 | | 2016.6 | ||
| 2016.6 | | 2016.6 | ||
| 2016.6 | | 2016.6 | ||
| 2016.6 | | 2016.6 | ||
+ | | 2016.6 | ||
| 2018.6 | | 2018.6 | ||
| 2018.6 | | 2018.6 | ||
Linea 125: | Linea 127: | ||
| 2022.6 | | 2022.6 | ||
| 2022.6 | | 2022.6 | ||
+ | |||
+ | < | ||
+ | If required, the Plumed ed CUDA modules are automatically loaded. | ||
+ | </ | ||
=== GMXLIB environment variable === | === GMXLIB environment variable === | ||
Linea 144: | Linea 150: | ||
< | < | ||
This will initiate a single process and will result in suboptimal performance. | This will initiate a single process and will result in suboptimal performance. | ||
+ | </ | ||
+ | |||
+ | === Gromacs 4.5.7 === | ||
+ | |||
+ | < | ||
+ | Loading the gromacs/ | ||
+ | tools energy2bfac, | ||
</ | </ | ||
=== Gromacs 5.1.4 === | === Gromacs 5.1.4 === | ||
+ | |||
+ | < | ||
+ | Loading the gromacs/ | ||
+ | tools energy2bfac, | ||
+ | </ | ||
+ | |||
+ | === Gromacs 5.1.5 === | ||
+ | |||
+ | < | ||
+ | Loading the gromacs/ | ||
+ | tools energy2bfac, | ||
+ | </ | ||
Script '' | Script '' | ||
<code bash mdrun-omp.sh> | <code bash mdrun-omp.sh> | ||
- | # | + | # |
#SBATCH --job-name=mdrun_omp | #SBATCH --job-name=mdrun_omp | ||
#SBATCH --output=%x.o%j | #SBATCH --output=%x.o%j | ||
Linea 162: | Linea 187: | ||
#SBATCH --mem=120G | #SBATCH --mem=120G | ||
#SBATCH --partition=cpu | #SBATCH --partition=cpu | ||
- | # | + | #SBATCH --qos=cpu |
- | # Charge resources to account | + | |
#SBATCH --account=< | #SBATCH --account=< | ||
- | test -n " | + | shopt -q login_shell || exit 1 |
+ | test -n " | ||
module load gnu | module load gnu | ||
module load openmpi | module load openmpi | ||
- | module load gromacs/ | + | module load gromacs/ |
# Set OMP_NUM_THREADS to the same value as --cpus-per-task with a fallback in | # Set OMP_NUM_THREADS to the same value as --cpus-per-task with a fallback in | ||
Linea 185: | Linea 210: | ||
<code bash mdrun-omp.sh> | <code bash mdrun-omp.sh> | ||
- | # | + | # |
#SBATCH --job-name=mdrun_omp | #SBATCH --job-name=mdrun_omp | ||
#SBATCH --output=%x.o%j | #SBATCH --output=%x.o%j | ||
Linea 196: | Linea 221: | ||
#SBATCH --mem=120G | #SBATCH --mem=120G | ||
#SBATCH --partition=cpu | #SBATCH --partition=cpu | ||
- | # | + | #SBATCH --qos=cpu |
- | # Charge resources to account | + | |
#SBATCH --account=< | #SBATCH --account=< | ||
- | test -n " | + | shopt -q login_shell || exit 1 |
+ | test -n " | ||
module load gnu | module load gnu | ||
Linea 213: | Linea 238: | ||
gmx mdrun -deffnm topology -pin on -plumed plumed.dat | gmx mdrun -deffnm topology -pin on -plumed plumed.dat | ||
</ | </ | ||
- | |||
- | < | ||
- | Plumed module is automatically loaded. | ||
- | </ | ||
==== Job Gromacs MPI OpenMP ==== | ==== Job Gromacs MPI OpenMP ==== | ||
Linea 224: | Linea 245: | ||
</ | </ | ||
- | === Gromacs 5.1.4 === | + | === Gromacs 5.1.5 === |
Script '' | Script '' | ||
<code bash mdrun-mpi-omp.sh> | <code bash mdrun-mpi-omp.sh> | ||
- | #!/bin/bash | + | # |
#SBATCH --job-name=mdrun_mpi_omp | #SBATCH --job-name=mdrun_mpi_omp | ||
#SBATCH --output=%x.o%j | #SBATCH --output=%x.o%j | ||
Linea 239: | Linea 260: | ||
#SBATCH --mem=120G | #SBATCH --mem=120G | ||
#SBATCH --partition=cpu | #SBATCH --partition=cpu | ||
- | # | + | #SBATCH --qos=cpu |
- | # Charge resources to account | + | |
#SBATCH --account=< | #SBATCH --account=< | ||
- | test -n " | + | shopt -q login_shell || exit 1 |
+ | test -n " | ||
module load gnu | module load gnu | ||
module load openmpi | module load openmpi | ||
- | module load gromacs/ | + | module load gromacs/ |
# Set OMP_NUM_THREADS to the same value as --cpus-per-task with a fallback in | # Set OMP_NUM_THREADS to the same value as --cpus-per-task with a fallback in | ||
Linea 276: | Linea 297: | ||
<code bash mdrun-mpi-omp.sh> | <code bash mdrun-mpi-omp.sh> | ||
- | #!/bin/bash | + | # |
#SBATCH --job-name=mdrun_mpi_omp | #SBATCH --job-name=mdrun_mpi_omp | ||
#SBATCH --output=%x.o%j | #SBATCH --output=%x.o%j | ||
Linea 286: | Linea 307: | ||
#SBATCH --mem=120G | #SBATCH --mem=120G | ||
#SBATCH --partition=cpu | #SBATCH --partition=cpu | ||
- | # | + | #SBATCH --qos=cpu |
- | # Charge resources to account | + | |
#SBATCH --account=< | #SBATCH --account=< | ||
- | test -n " | + | shopt -q login_shell || exit 1 |
+ | test -n " | ||
module load gnu | module load gnu | ||
Linea 327: | Linea 348: | ||
<code bash mdrun-mpi-omp-9x3.sh> | <code bash mdrun-mpi-omp-9x3.sh> | ||
- | #!/bin/bash | + | # |
#SBATCH --job-name=mdrun_mpi_omp_9x3 | #SBATCH --job-name=mdrun_mpi_omp_9x3 | ||
#SBATCH --output=%x.o%j | #SBATCH --output=%x.o%j | ||
Linea 335: | Linea 356: | ||
#SBATCH --time=0-24: | #SBATCH --time=0-24: | ||
#SBATCH --mem=80G | #SBATCH --mem=80G | ||
- | #SBATCH --partition=cpu,knl | + | #SBATCH --partition=cpu |
- | ##SBATCH --account=< | + | #SBATCH --qos=cpu |
+ | #SBATCH --account=< | ||
+ | |||
+ | shopt -q login_shell || exit 1 | ||
+ | test -n " | ||
module load gnu7 | module load gnu7 | ||
module load openmpi3 | module load openmpi3 | ||
- | module load gromacs/ | + | module load gromacs/ |
module list | module list | ||
Linea 353: | Linea 378: | ||
< | < | ||
Plumed module is automatically loaded. | Plumed module is automatically loaded. | ||
- | </ | ||
- | |||
- | < | ||
- | Partition may be indifferently cpu or knl. | ||
</ | </ | ||
Linea 364: | Linea 385: | ||
<code bash mdrun-mpi-omp-9x3.sh> | <code bash mdrun-mpi-omp-9x3.sh> | ||
- | #!/bin/bash | + | # |
#SBATCH --job-name=mdrun_mpi_omp_9x3 | #SBATCH --job-name=mdrun_mpi_omp_9x3 | ||
#SBATCH --output=%x.o%j | #SBATCH --output=%x.o%j | ||
Linea 372: | Linea 393: | ||
#SBATCH --time=0-24: | #SBATCH --time=0-24: | ||
#SBATCH --mem=80G | #SBATCH --mem=80G | ||
- | #SBATCH --partition=cpu,knl | + | #SBATCH --partition=cpu |
- | ##SBATCH --account=< | + | #SBATCH --qos=cpu |
+ | #SBATCH --account=< | ||
+ | |||
+ | shopt -q login_shell || exit 1 | ||
+ | test -n " | ||
module load gnu8 | module load gnu8 | ||
module load openmpi3 | module load openmpi3 | ||
- | module load gromacs/ | + | module load gromacs/ |
module list | module list | ||
Linea 387: | Linea 412: | ||
mpirun -np 9 gmx mdrun -deffnm meta -pin on -plumed plumed.dat | mpirun -np 9 gmx mdrun -deffnm meta -pin on -plumed plumed.dat | ||
</ | </ | ||
- | |||
- | < | ||
- | Plumed module is automatically loaded. | ||
- | </ | ||
- | |||
- | < | ||
- | Partition may be indifferently cpu or knl. | ||
- | </ | ||
==== Job Gromacs OpenMP GPU ==== | ==== Job Gromacs OpenMP GPU ==== | ||
Linea 403: | Linea 420: | ||
<code bash mdrun-omp-cuda.sh> | <code bash mdrun-omp-cuda.sh> | ||
- | #!/bin/bash | + | # |
#SBATCH --job-name=mdrun_omp_cuda | #SBATCH --job-name=mdrun_omp_cuda | ||
#SBATCH --output=%x.o%j | #SBATCH --output=%x.o%j | ||
Linea 413: | Linea 430: | ||
#SBATCH --mem=16G | #SBATCH --mem=16G | ||
#SBATCH --partition=gpu | #SBATCH --partition=gpu | ||
- | # | + | #SBATCH --qos=gpu |
- | # Charge resources to account | + | |
#SBATCH --account=< | #SBATCH --account=< | ||
- | test -n " | + | shopt -q login_shell || exit 1 |
+ | test -n " | ||
module load gnu | module load gnu | ||
Linea 430: | Linea 447: | ||
gmx mdrun -deffnm topology -pin on -dlb auto -plumed plumed.dat | gmx mdrun -deffnm topology -pin on -dlb auto -plumed plumed.dat | ||
</ | </ | ||
- | |||
- | < | ||
- | Plumed ed CUDA modules are automatically loaded. | ||
- | </ | ||
==== Job Gromacs MPI GPU ==== | ==== Job Gromacs MPI GPU ==== | ||
Linea 442: | Linea 455: | ||
<code bash mdrun-mpi-omp-gpu-4x2.sh> | <code bash mdrun-mpi-omp-gpu-4x2.sh> | ||
- | #!/bin/bash | + | # |
#SBATCH --job-name=mdrun_mpi_omp_gpu | #SBATCH --job-name=mdrun_mpi_omp_gpu | ||
#SBATCH --output=%x.o%j | #SBATCH --output=%x.o%j | ||
Linea 452: | Linea 465: | ||
#SBATCH --mem=16G | #SBATCH --mem=16G | ||
#SBATCH --partition=gpu | #SBATCH --partition=gpu | ||
- | # | + | #SBATCH --qos=gpu |
- | # Charge resources to account | + | |
#SBATCH --account=< | #SBATCH --account=< | ||
+ | |||
+ | shopt -q login_shell || exit 1 | ||
+ | test -n " | ||
module load gnu7 | module load gnu7 | ||
Linea 466: | Linea 481: | ||
mpirun -np 4 gmx mdrun -deffnm meta -dlb auto -plumed plumed.dat | mpirun -np 4 gmx mdrun -deffnm meta -dlb auto -plumed plumed.dat | ||
</ | </ | ||
- | |||
- | < | ||
- | Plumed ed CUDA modules are automatically loaded. | ||
- | </ | ||
=== Gromacs 2021.7 === | === Gromacs 2021.7 === | ||
Linea 476: | Linea 487: | ||
<code bash mdrun-mpi-omp-gpu-4x2.sh> | <code bash mdrun-mpi-omp-gpu-4x2.sh> | ||
- | #!/bin/bash | + | # |
#SBATCH --job-name=mdrun_mpi_omp_gpu | #SBATCH --job-name=mdrun_mpi_omp_gpu | ||
#SBATCH --output=%x.o%j | #SBATCH --output=%x.o%j | ||
Linea 486: | Linea 497: | ||
#SBATCH --mem=16G | #SBATCH --mem=16G | ||
#SBATCH --partition=gpu | #SBATCH --partition=gpu | ||
- | # | + | #SBATCH --qos=gpu |
- | # Charge resources to account | + | |
#SBATCH --account=< | #SBATCH --account=< | ||
+ | |||
+ | shopt -q login_shell || exit 1 | ||
+ | test -n " | ||
module load gnu8 | module load gnu8 | ||
Linea 500: | Linea 513: | ||
mpirun -np 4 gmx mdrun -deffnm meta -dlb auto -plumed plumed.dat | mpirun -np 4 gmx mdrun -deffnm meta -dlb auto -plumed plumed.dat | ||
</ | </ | ||
- | |||
- | < | ||
- | Plumed ed CUDA modules are automatically loaded. | ||
- | </ | ||
calcoloscientifico/userguide/gromacs.1695312198.txt.gz · Ultima modifica: 21/09/2023 18:03 da fabio.spataro