Skip to content

Commit

Permalink
scripts to reproduce experiments.
Browse files Browse the repository at this point in the history
  • Loading branch information
youngdae committed Jun 4, 2021
1 parent c139b82 commit 4bb2609
Show file tree
Hide file tree
Showing 15 changed files with 1,196 additions and 45 deletions.
583 changes: 583 additions & 0 deletions Manifest.toml

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -10,4 +10,5 @@ DelimitedFiles = "8bb1440f-4735-579b-a4ab-409b98df4dab"
Libdl = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195"
Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80"
Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7"
31 changes: 31 additions & 0 deletions figure5.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
#!/bin/bash
#
# This script describes how to reproduce the results of Figure 5.
# This is just an example for iillustration purposes. Different platforms
# (such as Summit cluster) may require different setups.
#
# For each run of admm_standalone.jl, it will generate iteration logs
# and timing results. The relevant timing results for Figure 5 are printed
# at the end of its run and will be the following:
#
# Branch/iter = %.2f (millisecs)
#
# The above timing results were used for Figure 5.
#
# Prerequisite:
# - CUDA library files should be accessible before executing this script,
# e.g., module load cuda/10.2.89.
# - CUDA aware MPI should be available.

export JULIA_CUDA_VERBOSE=1
export JULIA_MPI_BINARY="system"

DATA=("case2868rte" "case6515rte" "case9241pegase" "case13659pegase" "case19402_goc")
PQ=(10 20 50 50 500)
VA=(1000 2000 5000 5000 50000)
ITER=(6000 15000 35000 45000 30000)

for i in ${!DATA[@]}; do
julia --project ./src/admm_standalone.jl "./data/${DATA[$i]}" ${PQ[$i]} ${VA[$i]} ${ITER[$i]} true > output_gpu1_${DATA[$i]}.txt 2>&1
done

35 changes: 35 additions & 0 deletions figure6.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
#!/bin/bash
#
# This script describes how to reproduce the results of Figure 6.
# This is just an example for iillustration purposes. Different platforms
# (such as Summit cluster) may require different setups.
#
# For each run of launch_mpi.jl, it will generate iteration logs
# and timing results. The relevant timing results for Figure 6 are printed
# at the end of its run and will be the following:
#
# (Br+MPI)/iter = %.2f (millisecs)
#
# We divide the above timing results by the timing results obtained when
# we use a single GPU.
#
# Prerequisite:
# - CUDA library files should be accessible before executing this script,
# e.g., module load cuda/10.2.89.
# - CUDA aware MPI should be available.

export JULIA_CUDA_VERBOSE=1
export JULIA_MPI_BINARY="system"

DATA=("case2868rte" "case6515rte" "case9241pegase" "case13659pegase" "case19402_goc")
PQ=(10 20 50 50 500)
VA=(1000 2000 5000 5000 50000)
ITER=(6000 15000 35000 45000 30000)
NGPU=(2 3 4 5 6)

for j in ${!NGPU[@]}; do
for i in ${!DATA[@]}; do
mpirun -np ${j} julia --project ./src/launch_mpi.jl "./data/${DATA[$i]}" ${PQ[$i]} ${VA[$i]} ${ITER[$i]} true > output_gpu${j}_${DATA[$i]}.txt 2>&1
mv br_time_gpu.txt br_time_gpu${j}_${DATA[$i]}.txt
done
done
21 changes: 21 additions & 0 deletions figure7.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
#!/bin/bash
#
# This script describes how to reproduce the results of Figure 7.
# This is just an example for iillustration purposes. Different platforms
# (such as Summit cluster) may require different setups.
#
# We need br_time_13659pegase.txt file which is obtained when we run
# with 6 GPUs over 13659pegase example. The file can be obtained by
# running figure6.sh.

function usage() {
echo "Usage: ./figure7.sh case"
echo " case: the case file containing branch computation time of each GPU"
}

if [[ $# != 1 ]]; then
usage
exit
fi

julia --project ./src/heatmap.jl $1
31 changes: 31 additions & 0 deletions figure8.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
#!/bin/bash
#
# This script describes how to reproduce the results of Figure 8.
# This is just an example for iillustration purposes. Different platforms
# (such as Summit cluster) may require different setups.
#
# For each run of launch_mpi.jl, it will generate iteration logs
# and timing results. The relevant timing results for Figure 8 are printed
# at the end of its run and will be the following:
#
# (Br+MPI)/iter = %.2f (millisecs)
#
# We use these numbers for the timings of 40 CPU cores and use the timings
# from Figure 6 for 6 GPUs.
#
# Prerequisite:
# - CUDA library files should be accessible before executing this script,
# e.g., module load cuda/10.2.89.
# - CUDA aware MPI should be available.

export JULIA_CUDA_VERBOSE=1
export JULIA_MPI_BINARY="system"

DATA=("case2868rte" "case6515rte" "case9241pegase" "case13659pegase" "case19402_goc")
PQ=(10 20 50 50 500)
VA=(1000 2000 5000 5000 50000)
ITER=(6000 15000 35000 45000 30000)

for i in ${!DATA[@]}; do
mpirun -np 40 julia --project ./src/launch_mpi.jl "./data/${DATA[$i]}" ${PQ[$i]} ${VA[$i]} ${ITER[$i]} false > output_cpu40_${DATA[$i]}.txt 2>&1
done
19 changes: 0 additions & 19 deletions launch_mpi.jl

This file was deleted.

Loading

0 comments on commit 4bb2609

Please sign in to comment.