rsnt_translations
57,772
edits
No edit summary |
(Marked this version for translation) |
||
Line 333: | Line 333: | ||
No input file modifications are required to restart the analysis. | No input file modifications are required to restart the analysis. | ||
<!--T:20899--> | |||
</tab> | </tab> | ||
<tab name="temporary directory script"> | <tab name="temporary directory script"> | ||
Line 346: | Line 347: | ||
#SBATCH --nodes=1 # do not change | #SBATCH --nodes=1 # do not change | ||
<!--T:20900--> | |||
module load abaqus/2021 | module load abaqus/2021 | ||
<!--T:20901--> | |||
unset SLURM_GTIDS | unset SLURM_GTIDS | ||
export MPI_IC_ORDER='tcp' | export MPI_IC_ORDER='tcp' | ||
Line 355: | Line 358: | ||
echo "SLURM_TMPDIR = " $SLURM_TMPDIR | echo "SLURM_TMPDIR = " $SLURM_TMPDIR | ||
<!--T:20902--> | |||
rm -f testet1* testet2* | rm -f testet1* testet2* | ||
cd $SLURM_TMPDIR | cd $SLURM_TMPDIR | ||
Line 374: | Line 378: | ||
egrep -i "step|restart" testet*.com testet*.msg testet*.sta | egrep -i "step|restart" testet*.com testet*.msg testet*.sta | ||
<!--T:20903--> | |||
</tab> | </tab> | ||
<tab name="temporary directory restart script"> | <tab name="temporary directory restart script"> | ||
Line 387: | Line 392: | ||
#SBATCH --nodes=1 # do not change | #SBATCH --nodes=1 # do not change | ||
<!--T:20904--> | |||
module load abaqus/2021 | module load abaqus/2021 | ||
<!--T:20905--> | |||
unset SLURM_GTIDS | unset SLURM_GTIDS | ||
export MPI_IC_ORDER='tcp' | export MPI_IC_ORDER='tcp' | ||
Line 396: | Line 403: | ||
echo "SLURM_TMPDIR = " $SLURM_TMPDIR | echo "SLURM_TMPDIR = " $SLURM_TMPDIR | ||
<!--T:20906--> | |||
rm -f testet2* testet1.lck | rm -f testet2* testet1.lck | ||
for f in testet1*; do cp -a "$f" $SLURM_TMPDIR/"testet2${f#testet1}"; done | for f in testet1*; do cp -a "$f" $SLURM_TMPDIR/"testet2${f#testet1}"; done | ||
Line 413: | Line 421: | ||
No input file modifications are required to restart the analysis. | No input file modifications are required to restart the analysis. | ||
<!--T:20907--> | |||
</tab> | </tab> | ||
</tabs> | </tabs> | ||
Line 419: | Line 428: | ||
=== Multiple node computing === <!--T:20839--> | === Multiple node computing === <!--T:20839--> | ||
<!--T:20908--> | |||
{{File | {{File | ||
|name="scriptep1-mpi.txt" | |name="scriptep1-mpi.txt" | ||
Line 431: | Line 441: | ||
#SBATCH --cpus-per-task=1 # Do not change ! | #SBATCH --cpus-per-task=1 # Do not change ! | ||
<!--T:20909--> | |||
module load abaqus/2021 | module load abaqus/2021 | ||
<!--T:20910--> | |||
unset SLURM_GTIDS | unset SLURM_GTIDS | ||
export MPI_IC_ORDER='tcp' | export MPI_IC_ORDER='tcp' | ||
Line 438: | Line 450: | ||
echo "ABAQUSLM_LICENSE_FILE=$ABAQUSLM_LICENSE_FILE" | echo "ABAQUSLM_LICENSE_FILE=$ABAQUSLM_LICENSE_FILE" | ||
<!--T:20911--> | |||
rm -f testep1-mpi* | rm -f testep1-mpi* | ||
<!--T:20912--> | |||
unset hostlist | unset hostlist | ||
nodes="$(slurm_hl2hl.py --format MPIHOSTLIST {{!}} xargs)" | nodes="$(slurm_hl2hl.py --format MPIHOSTLIST {{!}} xargs)" | ||
Line 448: | Line 462: | ||
echo "$mphostlist" > abaqus_v6.env | echo "$mphostlist" > abaqus_v6.env | ||
<!--T:20913--> | |||
abaqus job=testep1-mpi input=myexp-sim.inp \ | abaqus job=testep1-mpi input=myexp-sim.inp \ | ||
scratch=$SCRATCH cpus=$SLURM_NTASKS interactive mp_mode=mpi | scratch=$SCRATCH cpus=$SLURM_NTASKS interactive mp_mode=mpi | ||
Line 472: | Line 487: | ||
1) ssh into a cluster, obtain an allocation on a compute node (such as gra100), run abaqus ie) | 1) ssh into a cluster, obtain an allocation on a compute node (such as gra100), run abaqus ie) | ||
<!--T:20914--> | |||
{{Commands | {{Commands | ||
|salloc --time=0:30:00 --cpus-per-task=8 --mem=64G --account=def-piname | |salloc --time=0:30:00 --cpus-per-task=8 --mem=64G --account=def-piname | ||
Line 482: | Line 498: | ||
2) ssh into the cluster again, ssh into the compute node with the allocation, run top ie) | 2) ssh into the cluster again, ssh into the compute node with the allocation, run top ie) | ||
<!--T:20915--> | |||
{{Commands|ssh gra100 | {{Commands|ssh gra100 | ||
|top -u $USER}} | |top -u $USER}} |