cc_staff
229
edits
(→Submission Scripts: MPI instructions) |
|||
Line 62: | Line 62: | ||
#SBATCH -o slurm.%N.%j.out # STDOUT | #SBATCH -o slurm.%N.%j.out # STDOUT | ||
#SBATCH -t 0:20:00 # time (D-HH:MM) | #SBATCH -t 0:20:00 # time (D-HH:MM) | ||
#SBATCH --account=def-specifyaccount | |||
module load namd-multicore/2.12 | module load namd-multicore/2.12 | ||
Line 83: | Line 85: | ||
#SBATCH -o slurm.%N.%j.out # STDOUT | #SBATCH -o slurm.%N.%j.out # STDOUT | ||
#SBATCH -t 0:05:00 # time (D-HH:MM) | #SBATCH -t 0:05:00 # time (D-HH:MM) | ||
#SBATCH --account=def-specifyaccount | |||
cat << EOF > nodefile.py | cat << EOF > nodefile.py | ||
Line 114: | Line 117: | ||
NAMD2=`which namd2` | NAMD2=`which namd2` | ||
$CHARMRUN ++p $P ++ppn $OMP_NUM_THREADS ++nodelist $NODEFILE $NAMD2 +idlepoll apoa1.namd | $CHARMRUN ++p $P ++ppn $OMP_NUM_THREADS ++nodelist $NODEFILE $NAMD2 +idlepoll apoa1.namd | ||
}} | |||
==== MPI Job ==== | |||
'''NOTE''': Use this only on cedar, where verbs version will not work. | |||
{{File | |||
|name=mpi_namd_job.sh | |||
|lang="sh" | |||
|contents= | |||
#!/bin/bash | |||
# | |||
#SBATCH --ntasks 64 # number of tasks | |||
#SBATCH --nodes=2 | |||
#SBATCH --mem 4024 # memory pool per process | |||
#SBATCH -o slurm.%N.%j.out # STDOUT | |||
#SBATCH -t 0:05:00 # time (D-HH:MM) | |||
#SBATCH --account=def-specifyaccount | |||
module load namd-mpi/2.12 | |||
NAMD2=`which namd2` | |||
srun $NAMD2 apoa1.namd | |||
}} | }} | ||
Line 129: | Line 152: | ||
#SBATCH -t 0:05:00 # time (D-HH:MM) | #SBATCH -t 0:05:00 # time (D-HH:MM) | ||
#SBATCH --gres=gpu:1 | #SBATCH --gres=gpu:1 | ||
#SBATCH --account=def-specifyaccount | |||
module load cuda/8.0.44 | module load cuda/8.0.44 |