cc_staff
1,857
edits
mNo edit summary |
mNo edit summary |
||
Line 33: | Line 33: | ||
<!--T:4--> | <!--T:4--> | ||
Neither IBM Platform MPI nor Intel MPI are tightly coupled with our scheduler; you must therefore tell <tt>starccm+</tt> which hosts to use by means of a file containing the list of available hosts. To produce this file, we provide the <tt>slurm_hl2hl.py</tt> script, which will output the list of hosts when called with the option <tt>--format STAR-CCM+</tt>. This list can then be written to a file and read by Star-CCM+. Also, because these distributions of MPI are not tightly integrated with our scheduler, you should use options <tt>--ntasks-per-node=1</tt> and set <tt>--cpus-per-task</tt> to use all cores as shown in the scripts | Neither IBM Platform MPI nor Intel MPI are tightly coupled with our scheduler; you must therefore tell <tt>starccm+</tt> which hosts to use by means of a file containing the list of available hosts. To produce this file, we provide the <tt>slurm_hl2hl.py</tt> script, which will output the list of hosts when called with the option <tt>--format STAR-CCM+</tt>. This list can then be written to a file and read by Star-CCM+. Also, because these distributions of MPI are not tightly integrated with our scheduler, you should use options <tt>--ntasks-per-node=1</tt> and set <tt>--cpus-per-task</tt> to use all cores as shown in the scripts. | ||
<!--T:5--> | <!--T:5--> | ||
Line 51: | Line 51: | ||
#SBATCH --account=def-group # Specify some account | #SBATCH --account=def-group # Specify some account | ||
#SBATCH --time=00-01:00 # Time limit: dd-hh:mm | #SBATCH --time=00-01:00 # Time limit: dd-hh:mm | ||
#SBATCH --nodes= | #SBATCH --nodes=1 # Specify 1 or more nodes | ||
#SBATCH --cpus-per-task=40 # Request all cores per node | #SBATCH --cpus-per-task=40 # Request all cores per node | ||
#SBATCH --mem=0 # Request all memory per node | #SBATCH --mem=0 # Request all memory per node | ||
Line 82: | Line 82: | ||
#SBATCH --account=def-group # Specify some account | #SBATCH --account=def-group # Specify some account | ||
#SBATCH --time=00-01:00 # Time limit: dd-hh:mm | #SBATCH --time=00-01:00 # Time limit: dd-hh:mm | ||
#SBATCH --nodes= | #SBATCH --nodes=1 # Specify 1 or more nodes | ||
#SBATCH --cpus-per-task=48 # or 32 Request all cores per node | #SBATCH --cpus-per-task=48 # or 32 Request all cores per node | ||
#SBATCH --mem=0 # Request all memory per node | #SBATCH --mem=0 # Request all memory per node | ||
Line 113: | Line 113: | ||
#SBATCH --account=def-group # Specify some account | #SBATCH --account=def-group # Specify some account | ||
#SBATCH --time=00-01:00 # Time limit: dd-hh:mm | #SBATCH --time=00-01:00 # Time limit: dd-hh:mm | ||
#SBATCH --nodes= | #SBATCH --nodes=1 # Specify 1 or more nodes | ||
#SBATCH --cpus-per-task=32 # or 44 Request all cores per node | #SBATCH --cpus-per-task=32 # or 44 Request all cores per node | ||
#SBATCH --mem=0 # Request all memory per node | #SBATCH --mem=0 # Request all memory per node | ||
Line 146: | Line 146: | ||
#SBATCH --account=def-group # Specify some account | #SBATCH --account=def-group # Specify some account | ||
#SBATCH --time=00-01:00 # Time limit: dd-hh:mm | #SBATCH --time=00-01:00 # Time limit: dd-hh:mm | ||
#SBATCH --nodes= | #SBATCH --nodes=1 # Specify 1 or more nodes | ||
#SBATCH --cpus-per-task=64 # Request all cores per node | #SBATCH --cpus-per-task=64 # Request all cores per node | ||
#SBATCH --mem=0 # Request all memory per node | #SBATCH --mem=0 # Request all memory per node | ||
Line 166: | Line 166: | ||
NCORE=$((SLURM_NTASKS * SLURM_CPUS_PER_TASK)) | NCORE=$((SLURM_NTASKS * SLURM_CPUS_PER_TASK)) | ||
starccm+ -jvmargs -Xmx4G -jvmargs -Djava.io.tmpdir=$SLURM_TMPDIR -batch -power -np $NCORE -podkey $LM_PROJECT -licpath $CDLMD_LICENSE_FILE -machinefile $PWD/machinefile-$SLURM_JOB_ID $PWD/your-file.sim -mpi openmpi | |||
}} | |||
</tab> | |||
}}</tab> | |||
<tab name="Niagara" > | <tab name="Niagara" > | ||
{{File | {{File | ||
Line 176: | Line 175: | ||
|lang="bash" | |lang="bash" | ||
|contents= | |contents= | ||
#!/bin/bash | #!/bin/bash | ||
#SBATCH --time=0-00:30 # Time limit: d-hh:mm | #SBATCH --time=0-00:30 # Time limit: d-hh:mm | ||
#SBATCH --nodes= | #SBATCH --nodes=1 # Specify 1 or more nodes | ||
#SBATCH --cpus-per-task=40 # Request all cores per node | #SBATCH --cpus-per-task=40 # Request all cores per node | ||
#SBATCH --ntasks-per-node=1 # Do not change this value | #SBATCH --ntasks-per-node=1 # Do not change this value | ||
Line 204: | Line 203: | ||
RET=-1 | RET=-1 | ||
while [ $i -le 5 ] && [ $RET -ne 0 ]; do | while [ $i -le 5 ] && [ $RET -ne 0 ]; do | ||
[ $i -eq 1 ] | [ $i -eq 1 ] {{!}}{{!}} sleep 5 | ||
echo "Attempt number: "$i | echo "Attempt number: "$i | ||
starccm+ -batch -power -np $NCORE -podkey $LM_PROJECT -machinefile $SLURM_SUBMIT_DIR/machinefile_$SLURM_JOB_ID $SLURM_SUBMIT_DIR/your-simulation-file.java $SLURM_SUBMIT_DIR/your-simulation-file.sim > $SLURM_JOB_ID.results | starccm+ -batch -power -np $NCORE -podkey $LM_PROJECT -machinefile $SLURM_SUBMIT_DIR/machinefile_$SLURM_JOB_ID $SLURM_SUBMIT_DIR/your-simulation-file.java $SLURM_SUBMIT_DIR/your-simulation-file.sim > $SLURM_JOB_ID.results | ||
Line 211: | Line 210: | ||
done | done | ||
exit $RET | exit $RET | ||
}} | |||
}}</tab> | </tab> | ||
</tabs> | </tabs> | ||
<translate> | <translate> |