cc_staff
163
edits
No edit summary |
|||
Line 119: | Line 119: | ||
srun sander.quick.cuda.MPI -O -i input.in -p topol.parm7 -c coord.rst7 -o output.mdout -r restart.rst7 | srun sander.quick.cuda.MPI -O -i input.in -p topol.parm7 -c coord.rst7 -o output.mdout -r restart.rst7 | ||
}} | }} | ||
=== Parallel MMPBSA job === | |||
The example below uses 32 MPI processes. MMPBSA scales linearly because each trajectory frame is processed independently. | |||
{{File | |||
|name=pmemd_MPI.sh | |||
|lang="bash" | |||
|contents= | |||
#!/bin/bash | |||
#SBATCH --ntasks=8 | |||
#SBATCH --mem-per-cpu=4000 | |||
#SBATCH --time=1:00:00 | |||
module load StdEnv/2020 gcc/9.3.0 openmpi/4.0.3 amber/20.9-20.15 scipy-stack | |||
srun MMPBSA.py.MPI -O -i mmpbsa.in -o mmpbsa.dat -sp solvated_complex.parm7 -cp complex.parm7 -rp receptor.parm7 -lp ligand.parm7 -y trajectory.nc | |||
}} | |||
<!--T:6--> | <!--T:6--> | ||
You can modify the script to fit your job's requirements for computing resources. See [[Running jobs]]. | You can modify the script to fit your job's requirements for computing resources. See [[Running jobs]]. |