cc_staff
163
edits
No edit summary |
|||
Line 103: | Line 103: | ||
}} | }} | ||
=== QM/MM distributed multi-GPU job === | |||
The example below requests eight GPUs. | |||
{{File | |||
|name=pmemd_MPI.sh | |||
|lang="bash" | |||
|contents= | |||
#!/bin/bash | |||
#SBATCH --ntasks=8 | |||
#SBATCH --cpus-per-task=1 | |||
#SBATCH --gpus-per-task=1 | |||
#SBATCH --mem-per-cpu=4000 | |||
#SBATCH --time=1:00:00 | |||
module load StdEnv/2020 gcc/9.3.0 cuda/11.4 openmpi/4.0.3 ambertools/21 | |||
source $EBROOTAMBERTOOLS/amber.sh | |||
srun sander.quick.cuda.MPI -O -i input.in -p topol.parm7 -c coord.rst7 -o output.mdout -r restart.rst7 | |||
}} | |||
<!--T:6--> | <!--T:6--> | ||
You can modify the script to fit your job's requirements for computing resources. See [[Running jobs]]. | You can modify the script to fit your job's requirements for computing resources. See [[Running jobs]]. |