Ansys: Difference between revisions

Jump to navigation Jump to search
m
no edit summary
mNo edit summary
mNo edit summary
Line 990: Line 990:
=== Slurm scripts === <!--T:781-->
=== Slurm scripts === <!--T:781-->


<!--T:2832-->
<tabs>
<tabs>
<tab name="Multinode">
<!--T:2833-->
<tab name="Single node">
{{File
{{File
|name=script-cfx-dist.sh
|name=script-cfx-local.sh
|lang="bash"
|lang="bash"
|contents=
|contents=
#!/bin/bash
#!/bin/bash


<!--T:1643-->
<!--T:1647-->
#SBATCH --account=def-group  # Specify account name
#SBATCH --account=def-group  # Specify account name
#SBATCH --time=00-03:00      # Specify time limit dd-hh:mm
#SBATCH --time=00-03:00      # Specify time limit dd-hh:mm
#SBATCH --nodes=2             # Specify multiple (1 or more) compute nodes
#SBATCH --nodes=1             # Specify single compute node (do not change)
#SBATCH --ntasks-per-node=32  # Specify cores per node (graham 32 or 44, cedar 32 or 48, beluga 40, narval 64)
#SBATCH --ntasks-per-node=# Specify number cores (maximum values: graham 32 or 44, cedar 32 or 48, beluga 40, narval 64)
#SBATCH --mem=0               # Allocate all memory per compute node
#SBATCH --mem=16G            # Specify node memory (optionally set to 0 to allocate all node memory)
#SBATCH --cpus-per-task=1    # Do not change
#SBATCH --cpus-per-task=1    # Do not change


<!--T:166-->
<!--T:167-->
module load StdEnv/2023      # Applies to: beluga, cedar, graham, narval
module load StdEnv/2023      # Applies to: beluga, cedar, graham, narval
module load ansys/2023R2      # Specify 2021R1 or newer module versions
module load ansys/2023R2      # Specify 2021R1 or newer module versions


<!--T:4771-->
<!--T:1646-->
NNODES=$(slurm_hl2hl.py --format ANSYS-CFX)
# append additional cfx5solve command line options as required
 
<!--T:1644-->
# append additional cfx5solve options as required
if [[ "$CC_CLUSTER" = cedar || "$CC_CLUSTER" = narval ]]; then
if [[ "$CC_CLUSTER" = cedar || "$CC_CLUSTER" = narval ]]; then
   cfx5solve -def YOURFILE.def -start-method "Open MPI Distributed Parallel" -par-dist $NNODES
   cfx5solve -def YOURFILE.def -start-method "Open MPI Local Parallel" -part $SLURM_CPUS_ON_NODE
else
else
   cfx5solve -def YOURFILE.def -start-method "Intel MPI Distributed Parallel" -par-dist $NNODES
   cfx5solve -def YOURFILE.def -start-method "Intel MPI Local Parallel" -part $SLURM_CPUS_ON_NODE
fi
fi
<!--T:82-->
}}</tab>
}}</tab>


<!--T:2833-->
<!--T:2832-->
<tab name="Single node">
<tab name="Multinode">
{{File
{{File
|name=script-cfx-local.sh
|name=script-cfx-mult.sh
|lang="bash"
|lang="bash"
|contents=
|contents=
#!/bin/bash
#!/bin/bash


<!--T:1647-->
<!--T:1643-->
#SBATCH --account=def-group  # Specify account name
#SBATCH --account=def-group  # Specify account name
#SBATCH --time=00-03:00      # Specify time limit dd-hh:mm
#SBATCH --time=00-03:00      # Specify time limit dd-hh:mm
#SBATCH --nodes=1             # Specify single compute node (do not change)
#SBATCH --nodes=2             # Specify multiple compute nodes (2 or more)
#SBATCH --ntasks-per-node=# Specify total cores (narval up to 64)
#SBATCH --ntasks-per-node=64  # Specify all cores per node (graham 32 or 44, cedar 32 or 48, beluga 40, narval 64)
#SBATCH --mem=16G            # Specify 0 to use all node memory
#SBATCH --mem=0              # Use all memory per compute node (do not change)
#SBATCH --cpus-per-task=1    # Do not change
#SBATCH --cpus-per-task=1    # Do not change


<!--T:167-->
<!--T:166-->
module load StdEnv/2023      # Applies to: beluga, cedar, graham, narval
module load StdEnv/2023      # Applies to: beluga, cedar, graham, narval
module load ansys/2023R2      # Specify 2021R1 or newer module versions
module load ansys/2023R2      # Specify 2021R1 or newer module versions


<!--T:1646-->
<!--T:4771-->
# append additional command line options as required
NNODES=$(slurm_hl2hl.py --format ANSYS-CFX)
if [ "$CC_CLUSTER" = cedar ]; then
 
   cfx5solve -def YOURFILE.def -start-method "Open MPI Local Parallel" -part $SLURM_CPUS_ON_NODE
<!--T:1644-->
# append additional cfx5solve command line options as required
if [[ "$CC_CLUSTER" = cedar || "$CC_CLUSTER" = narval ]]; then
   cfx5solve -def YOURFILE.def -start-method "Open MPI Distributed Parallel" -par-dist $NNODES
else
else
   cfx5solve -def YOURFILE.def -start-method "Intel MPI Local Parallel" -part $SLURM_CPUS_ON_NODE
   cfx5solve -def YOURFILE.def -start-method "Intel MPI Distributed Parallel" -par-dist $NNODES
fi
fi
<!--T:82-->
}}</tab>
}}</tab>
</tabs>
</tabs>
<!--T:84-->
Note: You may get the following error in your output file which does not seem to affect the computation: <i>/etc/tmi.conf: No such file or directory</i>.


== Workbench == <!--T:280-->
== Workbench == <!--T:280-->
cc_staff
1,894

edits

Navigation menu