forked from rhdodds/warp3d
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathwarp3d_script_linux_Intel_hybrid
executable file
·135 lines (127 loc) · 3.72 KB
/
warp3d_script_linux_Intel_hybrid
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
#!/bin/bash
#
#
# Run hybrid WARP3D on Linux (MPI + OpenMP)
# Built with Intel Fortran + MPI system
# =========================================
#
# Last updated: March 7, 2019 (RHD)
#
# This bash script runs the "hybrid" version of WARP3D that
# includes MPI, OpenMP. The Intel MKL libraries
# are used by hypre, Pardiso, and Cluster Pardiso solvers.
#
echo " "
echo ">> Running MPI + OpenMP (hybrid) version of WARP3D on Linux (64)..."
echo " "
#
if [ $# != "2" ]; then
echo ">> Usage:"; echo " "
echo " warp3d_script_linux_Intel_hybrid <parms> ( < input ) ( > output )"; echo " "
echo " Requirements:"
echo " "
echo " - model must have domain decomposition w/ domains assigned in blocking"
echo " - or automatic domain assignment in the blocking command"
echo " "
echo " There are 2 parameters:"
echo " "
echo " (1) number of MPI processes (ranks) = # domains defined in blocking"
echo " "
echo " (2) number of OpenMP & MKL threads per rank (these will be set equal)"
echo " "
echo " Guidelines:"
echo " -----------"
echo " "
echo " 1. number of MPI ranks (NP) must = number of domains defined in model input"
echo " We recommend using NP = number of physical processors available"
echo " unless runs for testing purposes"
echo " "
echo " 2. number of OpenMP/MKL threads = number of cores to use on each rank"
echo " "
exit 1
fi
#
if [ -z "$WARP3D_HOME" ]; then
printf "[ERROR]\n"
printf "... An environment variable WARP3D_HOME is not set.\n"
printf "Quitting...\n"
exit 1
fi
#
# set executable file for MPI + OpenMP
#
warp3d_exe="$WARP3D_HOME/run_linux/warp3d_intel.mpi_omp"
#
# set LD_LIBRARY_PATH. Examine the ordering...
#
# Note: at runtime, WARP3D will use the MKL library
# files located in linux_packages/lib included in the
# WARP3D distribution.
# ** This applies even if you have the
# ** Intel compiler system installed on your machine.
# We distribute the most current versions of the
# the required MKL libraries. These are backwards
# compatible with older Intel processors.
#
# Intel makes this library freely available
#
# The Intel MPI system must be installed
#
export LD_LIBRARY_PATH=$WARP3D_HOME/linux_packages/lib:$LD_LIBRARY_PATH
#
export NUM_WARP3D_RANKS=$1
export OMP_NUM_THREADS=$2
export MKL_NUM_THREADS=$2
#
if [ $1 = "0" -o $2 = "0" ]; then
echo " "
echo "[ERROR]"
echo " "
echo "... values for 2 parameters must be > 0 ..."
echo " "
echo "Quitting..."; echo " "
exit 1
fi
#
mpi2019=`mpirun --version | grep 2019 | wc --lines`
#
# increase the allowable size of the runtime stack. needed
# for MPI jobs that also use threads
#
ulimit -s 100000
#
# set up for Intel MPI 2019
#
if [ $mpi2019 -ne "1" ]; then
export MPI_TYPE_MAX=4096
export I_MPI_PIN_DOMAIN=omp
export I_MPI_FABRICS=shm:ofi
export I_MPI_DEBUG=1
else
echo " "
echo ">>> Must have Intel MPI 2019.0.3 or newer"
echo ">>> Execution not started "
exit 1
fi
#
# touch core file and make it non-writable in case we dump
#
touch core &> /dev/null
chmod ugo-rwx core
#
# start WARP3D on each of the MPI processes using message
# passing based on shared memory
#
echo " Starting WARP3D with MPI + OpenMP hybrid execution..."
echo " o Number MPI processes: " $NUM_WARP3D_RANKS
echo " o Number OpenMP threads each rank: " $OMP_NUM_THREADS
#
# Let the OS sort out getting MPI daemons started
#
#
mpirun -np $NUM_WARP3D_RANKS $WARP3D_HOME/run_linux/warp3d_Intel.mpi_omp
#
# Cleanup the core file (if it was created)
#
/bin/rm -f core
exit