-
Notifications
You must be signed in to change notification settings - Fork 4
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge branch 'sc2021' of https://github.com/exanauts/ExaTron.jl into …
…sc2021
- Loading branch information
Showing
16 changed files
with
1,357 additions
and
110 deletions.
There are no files selected for viewing
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,32 @@ | ||
#!/bin/bash | ||
# | ||
# This script describes how to reproduce the results of Figure 5. | ||
# This is just an example for iillustration purposes. Different platforms | ||
# (such as Summit cluster) may require different setups. | ||
# | ||
# For each run of admm_standalone.jl, it will generate iteration logs | ||
# and timing results. The relevant timing results for Figure 5 are printed | ||
# at the end of its run and will be the following: | ||
# | ||
# Branch/iter = %.2f (millisecs) | ||
# | ||
# The above timing results were used for Figure 5. | ||
# | ||
# Prerequisite: | ||
# - CUDA library files should be accessible before executing this script, | ||
# e.g., module load cuda/10.2.89. | ||
# - CUDA aware MPI should be available. | ||
|
||
export JULIA_CUDA_VERBOSE=1 | ||
export JULIA_MPI_BINARY="system" | ||
|
||
DATA=("case2868rte" "case6515rte" "case9241pegase" "case13659pegase" "case19402_goc") | ||
PQ=(10 20 50 50 500) | ||
VA=(1000 2000 5000 5000 50000) | ||
ITER=(6000 15000 35000 45000 30000) | ||
|
||
for i in ${!DATA[@]}; do | ||
echo "Solving ${DATA[$i]} . . ." | ||
julia --project ./src/admm_standalone.jl "./data/${DATA[$i]}" ${PQ[$i]} ${VA[$i]} ${ITER[$i]} true > output_gpu1_${DATA[$i]}.txt 2>&1 | ||
done | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,36 @@ | ||
#!/bin/bash | ||
# | ||
# This script describes how to reproduce the results of Figure 6. | ||
# This is just an example for iillustration purposes. Different platforms | ||
# (such as Summit cluster) may require different setups. | ||
# | ||
# For each run of launch_mpi.jl, it will generate iteration logs | ||
# and timing results. The relevant timing results for Figure 6 are printed | ||
# at the end of its run and will be the following: | ||
# | ||
# (Br+MPI)/iter = %.2f (millisecs) | ||
# | ||
# We divide the above timing results by the timing results obtained when | ||
# we use a single GPU. | ||
# | ||
# Prerequisite: | ||
# - CUDA library files should be accessible before executing this script, | ||
# e.g., module load cuda/10.2.89. | ||
# - CUDA aware MPI should be available. | ||
|
||
export JULIA_CUDA_VERBOSE=1 | ||
export JULIA_MPI_BINARY="system" | ||
|
||
DATA=("case2868rte" "case6515rte" "case9241pegase" "case13659pegase" "case19402_goc") | ||
PQ=(10 20 50 50 500) | ||
VA=(1000 2000 5000 5000 50000) | ||
ITER=(5648 13651 30927 41126 28358) | ||
NGPU=(2 3 4 5 6) | ||
|
||
for j in ${!NGPU[@]}; do | ||
for i in ${!DATA[@]}; do | ||
echo "Solving ${DATA[$i]} using ${NGPU[$j]} GPUs . . ." | ||
mpirun -np ${NGPU[$j]} julia --project ./src/launch_mpi.jl "./data/${DATA[$i]}" ${PQ[$i]} ${VA[$i]} ${ITER[$i]} true > output_gpu${NGPU[$j]}_${DATA[$i]}.txt 2>&1 | ||
mv br_time_gpu.txt br_time_gpu${NGPU[$j]}_${DATA[$i]}.txt | ||
done | ||
done |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,21 @@ | ||
#!/bin/bash | ||
# | ||
# This script describes how to reproduce the results of Figure 7. | ||
# This is just an example for iillustration purposes. Different platforms | ||
# (such as Summit cluster) may require different setups. | ||
# | ||
# We need br_time_13659pegase.txt file which is obtained when we run | ||
# with 6 GPUs over 13659pegase example. The file can be obtained by | ||
# running figure6.sh. | ||
|
||
function usage() { | ||
echo "Usage: ./figure7.sh case" | ||
echo " case: the case file containing branch computation time of each GPU" | ||
} | ||
|
||
if [[ $# != 1 ]]; then | ||
usage | ||
exit | ||
fi | ||
|
||
julia --project ./src/heatmap.jl $1 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,32 @@ | ||
#!/bin/bash | ||
# | ||
# This script describes how to reproduce the results of Figure 8. | ||
# This is just an example for iillustration purposes. Different platforms | ||
# (such as Summit cluster) may require different setups. | ||
# | ||
# For each run of launch_mpi.jl, it will generate iteration logs | ||
# and timing results. The relevant timing results for Figure 8 are printed | ||
# at the end of its run and will be the following: | ||
# | ||
# (Br+MPI)/iter = %.2f (millisecs) | ||
# | ||
# We use these numbers for the timings of 40 CPU cores and use the timings | ||
# from Figure 6 for 6 GPUs. | ||
# | ||
# Prerequisite: | ||
# - CUDA library files should be accessible before executing this script, | ||
# e.g., module load cuda/10.2.89. | ||
# - CUDA aware MPI should be available. | ||
|
||
export JULIA_CUDA_VERBOSE=1 | ||
export JULIA_MPI_BINARY="system" | ||
|
||
DATA=("case2868rte" "case6515rte" "case9241pegase" "case13659pegase" "case19402_goc") | ||
PQ=(10 20 50 50 500) | ||
VA=(1000 2000 5000 5000 50000) | ||
ITER=(5718 13640 30932 41140 28358) | ||
|
||
for i in ${!DATA[@]}; do | ||
echo "Solving ${DATA[$i]} using 40 CPU cores . . ." | ||
mpirun -np 40 julia --project ./src/launch_mpi.jl "./data/${DATA[$i]}" ${PQ[$i]} ${VA[$i]} ${ITER[$i]} false > output_cpu40_${DATA[$i]}.txt 2>&1 | ||
done |
This file was deleted.
Oops, something went wrong.
Oops, something went wrong.