Ansys: Difference between revisions

Jump to navigation Jump to search
2,864 bytes added ,  3 months ago
m
no edit summary
mNo edit summary
mNo edit summary
Line 175: Line 175:
<!--T:2306-->
<!--T:2306-->
module load StdEnv/2023
module load StdEnv/2023
module load ansys/2023R2      # or newer module versions (applies to all clusters)
module load ansys/2023R2      # or newer module versions


<!--T:4734-->
<!--T:4734-->
Line 231: Line 231:
<!--T:2206-->
<!--T:2206-->
module load StdEnv/2023
module load StdEnv/2023
module load ansys/2023R2      # or newer module versions (applies to all clusters)
module load ansys/2023R2      # or newer module versions


<!--T:4737-->
<!--T:4737-->
Line 273: Line 273:
#SBATCH --time=00-03:00      # Specify time limit dd-hh:mm
#SBATCH --time=00-03:00      # Specify time limit dd-hh:mm
#SBATCH --nodes=1            # Specify number of compute nodes (1 or more)
#SBATCH --nodes=1            # Specify number of compute nodes (1 or more)
#SBATCH --ntasks-per-node=64  # Specify number of cores per node (narval 64)
#SBATCH --ntasks-per-node=64  # Specify number of cores per node (narval 64 or less)
#SBATCH --mem=0              # Do not change (allocates all memory per compute node)
#SBATCH --mem=0              # Do not change (allocates all memory per compute node)
#SBATCH --cpus-per-task=1    # Do not change
#SBATCH --cpus-per-task=1    # Do not change
Line 282: Line 282:
<!--T:5306-->
<!--T:5306-->
module load StdEnv/2023       
module load StdEnv/2023       
module load ansys/2023R2      # only this version (applies to narval only)
module load ansys/2023R2      # only this version


<!--T:5735-->
<!--T:5735-->
Line 321: Line 321:
<!--T:6306-->
<!--T:6306-->
module load StdEnv/2023       
module load StdEnv/2023       
module load ansys/2023R2      # only this version (applies to narval only)
module load ansys/2023R2      # only this version


<!--T:6735-->
<!--T:6735-->
Line 340: Line 340:


<!--T:6739-->
<!--T:6739-->
<tab name="Multinode (by node, niagara)">
{{File
|name=script-flu-bynode-intel-nia.sh
|lang="bash"
|contents=
#!/bin/bash
<!--T:6740-->
#SBATCH --account=def-group      # Specify account name
#SBATCH --time=00-03:00          # Specify time limit dd-hh:mm
#SBATCH --nodes=1                # Specify number of compute nodes (1 or more)
#SBATCH --ntasks-per-node=80    # Specify number cores per node (niagara 80 or less)
#SBATCH --mem=0                  # Do not change (allocate all memory per compute node)
#SBATCH --cpus-per-task=1        # Do not change (required parameter)
##SBATCH -p debug                # Uncomment for debug jobs (less than 1hr)
<!--T:6741-->
rm -f cleanup* core*
<!--T:6742-->
module load CCEnv StdEnv/2023    # Do not change
module load ansys/2023R2        # or newer module versions
<!--T:6743-->
# Customize this section to specify your remote license server
# These settings are used instead of your ~/.licenses/ansys.lic
LICSERVER=license3.sharcnet.ca  # Specify license server hostname
FLEXPORT=1055                    # Specify server flex port
INTEPORT=2325                    # Specify server interconnect port
VENDPORT=1793                    # Specify server vendor port
<!--T:6744-->
ssh nia-gw -fNL $FLEXPORT:$LICSERVER:$FLEXPORT      # Do not change
ssh nia-gw -fNL $INTEPORT:$LICSERVER:$INTEPORT      # Do not change
ssh nia-gw -fNL $VENDPORT:$LICSERVER:$VENDPORT      # Do not change
export ANSYSLMD_LICENSE_FILE=$FLEXPORT@localhost    # Do not change
export ANSYSLI_SERVERS=$INTEPORT@localhost          # Do not change
<!--T:6745-->
slurm_hl2hl.py --format ANSYS-FLUENT > /tmp/machinefile-$SLURM_JOB_ID
NCORES=$((SLURM_NNODES * SLURM_NTASKS_PER_NODE * SLURM_CPUS_PER_TASK))
<!--T:6746-->
if [ ! -L "$HOME/.ansys" ]; then
  echo "ERROR: A link to a writable .ansys directory does not exist."
  echo 'Remove ~/.ansys if one exists and then run: ln -s $SCRATCH/.ansys ~/.ansys'
  echo "Then try submitting your job again. Aborting the current job now!"
elif [ ! -L "$HOME/.fluentconf" ]; then
  echo "ERROR: A link to a writable .fluentconf directory does not exist."
  echo 'Remove ~/.fluentconf if one exists and run: ln -s $SCRATCH/.fluentconf ~/.fluentconf'
  echo "Then try submitting your job again. Aborting the current job now!"
elif [ ! -L "$HOME/.flrecent" ]; then
  echo "ERROR: A link to a writable .flrecent file does not exist."
  echo 'Remove ~/.flrecent if one exists and then run: ln -s $SCRATCH/.flrecent ~/.flrecent'
  echo "Then try submitting your job again. Aborting the current job now!"
else
  mkdir -pv $SCRATCH/.ansys
  mkdir -pv $SCRATCH/.fluentconf
  touch $SCRATCH/.flrecent
  # Specify 2d, 2ddp, 3d or 3ddp and replace sample with your journal filename
  if [ "$SLURM_NNODES" == 1 ]; then
    fluent -g 2ddp -t $NCORES -affinity=0 -i cavity.jou
  else
    fluent -g 2ddp -t $NCORES -affinity=0 -cnf=/tmp/machinefile-$SLURM_JOB_ID -mpi=intel -ssh -i cavity.jou
  fi
fi
}}
</tab>
</tabs>
</tabs>


cc_staff
1,894

edits

Navigation menu