cc_staff
229
edits
m (→Serial Job) |
(→Submission Scripts: provide some initial instructions for running with verbs module) |
||
Line 68: | Line 68: | ||
==== Verbs Job ==== | ==== Verbs Job ==== | ||
Instructions will be | These provisional Instructions will be refined further once this configuration can be fully tested on the new clusters. | ||
This example uses 64 processes in total on 2 nodes, each node running 32 processes, thus fully utilizing its 32 cores. This script assumes full nodes are used, thus ntasks/nodes should be 32 (on graham). For best performance, NAMD jobs should use full nodes. | |||
{{File | |||
|name=verbs_namd_job.sh | |||
|lang="sh" | |||
|contents= | |||
#!/bin/bash | |||
# | |||
#SBATCH --ntasks 64 # number of tasks | |||
#SBATCH --nodes=2 | |||
#SBATCH --mem 1024 # memory pool per process | |||
#SBATCH -o slurm.%N.%j.out # STDOUT | |||
#SBATCH -t 0:05:00 # time (D-HH:MM) | |||
cat << EOF > nodefile.py | |||
#!/usr/bin/python | |||
import sys | |||
a=sys.argv[1] | |||
nodefile=open("nodefile.dat","w") | |||
cluster=a[0:3] | |||
for st in a.lstrip(cluster+"[").rstrip("]").split(","): | |||
d=st.split("-") | |||
start=int(d[0]) | |||
finish=start | |||
if(len(d)==2): | |||
finish=int(d[1]) | |||
for i in range(start,finish+1): | |||
nodefile.write("host "+cluster+str(i)+"\n") | |||
nodefile.close() | |||
EOF | |||
python nodefile.py $SLURM_NODELIST | |||
NODEFILE=nodefile.dat | |||
OMP_NUM_THREADS=32 | |||
P=$SLURM_NTASKS | |||
module load namd-verbs/2.12 | |||
CHARMRUN=`which charmrun` | |||
NAMD2=`which namd2` | |||
$CHARMRUN ++p $P ++ppn $OMP_NUM_THREADS ++nodelist $NODEFILE $NAMD2 +idlepoll apoa1.namd | |||
}} | |||
==== GPU Job ==== | ==== GPU Job ==== |