cc_staff
127
edits
(Update examples to StdEnv/2023, amber/22, ambertools/23.5) |
(Add MPI job examples for all clusters) |
||
Line 97: | Line 97: | ||
=== CPU-only parallel MPI job === <!--T:47--> | === CPU-only parallel MPI job === <!--T:47--> | ||
<tabs> | |||
<tab name="Graham"> | |||
{{File | {{File | ||
|name= | |name=pmemd_MPI_job_graham.sh | ||
|lang="bash" | |lang="sh" | ||
|contents= | |||
#!/bin/bash | |||
#SBATCH --nodes=4 | |||
#SBATCH --ntasks-per-node=32 | |||
#SBATCH --mem-per-cpu=2000 | |||
#SBATCH --time=1:00:00 | |||
module purge | |||
module load StdEnv/2023 gcc/12.3 openmpi/4.1.5 amber/22 | |||
srun pmemd.MPI -O -i input.in -p topol.parm7 -c coord.rst7 -o output.mdout -r restart.rst7 | |||
}}</tab> | |||
<tab name="Cedar"> | |||
{{File | |||
|name=pmemd_MPI_job_cedar.sh | |||
|lang="sh" | |||
|contents= | |||
#!/bin/bash | |||
#SBATCH --nodes=4 | |||
#SBATCH --ntasks-per-node=48 | |||
#SBATCH --mem-per-cpu=2000 | |||
#SBATCH --time=1:00:00 | |||
module purge | |||
module load StdEnv/2023 gcc/12.3 openmpi/4.1.5 amber/22 | |||
srun pmemd.MPI -O -i input.in -p topol.parm7 -c coord.rst7 -o output.mdout -r restart.rst7 | |||
}}</tab> | |||
<tab name="Béluga"> | |||
{{File | |||
|name=pmemd_MPI_job_beluga.sh | |||
|lang="sh" | |||
|contents= | |||
#!/bin/bash | |||
#SBATCH --nodes=4 | |||
#SBATCH --ntasks-per-node=40 | |||
#SBATCH --mem-per-cpu=2000 | |||
#SBATCH --time=1:00:00 | |||
module purge | |||
module load StdEnv/2023 gcc/12.3 openmpi/4.1.5 amber/22 | |||
srun pmemd.MPI -O -i input.in -p topol.parm7 -c coord.rst7 -o output.mdout -r restart.rst7 | |||
}}</tab> | |||
<tab name="Narval"> | |||
{{File | |||
|name=pmemd_MPI_job_narval.sh | |||
|lang="sh" | |||
|contents= | |contents= | ||
#!/bin/bash | #!/bin/bash | ||
Line 113: | Line 162: | ||
srun pmemd.MPI -O -i input.in -p topol.parm7 -c coord.rst7 -o output.mdout -r restart.rst7 | srun pmemd.MPI -O -i input.in -p topol.parm7 -c coord.rst7 -o output.mdout -r restart.rst7 | ||
}} | }}</tab> | ||
<tab name="Niagara"> | |||
{{File | |||
|name=pmemd_MPI_job_narval.sh | |||
|lang="sh" | |||
|contents= | |||
#!/bin/bash | |||
#SBATCH --nodes=4 | |||
#SBATCH --ntasks-per-node=40 | |||
#SBATCH --mem-per-cpu=2000 | |||
#SBATCH --time=1:00:00 | |||
module purge | |||
module load StdEnv/2023 gcc/12.3 openmpi/4.1.5 amber/22 | |||
srun pmemd.MPI -O -i input.in -p topol.parm7 -c coord.rst7 -o output.mdout -r restart.rst7 | |||
}}</tab> | |||
</tabs> | |||
=== QM/MM distributed multi-GPU job === <!--T:48--> | === QM/MM distributed multi-GPU job === <!--T:48--> |