dfnWorks is a parallelized computational suite to generate three-dimensional discrete fracture networks (DFN) and simulate flow and transport. Developed at Los Alamos National Laboratory, it has been used to study flow and transport in fractured media at scales ranging from millimeters to kilometers.
Example script slurm-dfnworks-demo.sh to run dfnWorks on 1 node of the cpu partition using 4 cores and at most 2 GB of memory:
#!/bin/bash --login #SBATCH --job-name=dfnworks #SBATCH --output=%x.o%j #SBATCH --error=%x.e%j #SBATCH --nodes=1 #SBATCH --ntasks-per-node=4 #SBATCH --mem=2G #SBATCH --time=0-00:05:00 #SBATCH --partition=cpu #SBATCH --qos=cpu #SBATCH --account=<account> shopt -q login_shell || exit 1 test -n "$SLURM_NODELIST" || exit 1 test $SLURM_NNODES -eq 1 || exit 1 module load apptainer module load dfnworks/2.9.6 test -n "$DFNWORKS_CONTAINER" || exit 1 rm -rf demo apptainer exec \ "$DFNWORKS_CONTAINER" \ cp -a /dfnWorks/examples/4_user_rects demo cd demo sed -r -e "s/ncpu=[0-9]+/ncpu=int\(os.environ\['SLURM_NTASKS'\]\)/" driver.py > driver-slurm-aware.py apptainer exec \ "$DFNWORKS_CONTAINER" \ python3 driver-slurm-aware.py
Example script slurm-dfnworks.sh to run dfnWorks on 1 node of the cpu partition using 8 cores and at most 16 GB of memory:
#!/bin/bash --login #SBATCH --job-name=dfnworks #SBATCH --output=%x.o%j #SBATCH --error=%x.e%j #SBATCH --nodes=1 #SBATCH --ntasks-per-node=8 #SBATCH --mem=16G #SBATCH --time=0-06:00:00 #SBATCH --partition=cpu #SBATCH --qos=cpu #SBATCH --account=<account> shopt -q login_shell || exit 1 test -n "$SLURM_NODELIST" || exit 1 test $SLURM_NNODES -eq 1 || exit 1 module load apptainer module load dfnworks/2.9.6 test -n "$DFNWORKS_CONTAINER" || exit 1 sed -r -e "s/ncpu=[0-9]+/ncpu=int\(os.environ\['SLURM_NTASKS'\]\)/" driver.py > driver-slurm-aware.py apptainer exec \ "$DFNWORKS_CONTAINER" \ python3 driver-slurm-aware.py