diff --git a/figure5.sh b/figure5.sh deleted file mode 100755 index 3125bb2..0000000 --- a/figure5.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -# -# This script describes how to reproduce the results of Figure 5. -# This is just an example for iillustration purposes. Different platforms -# (such as Summit cluster) may require different setups. -# -# For each run of admm_standalone.jl, it will generate iteration logs -# and timing results. The relevant timing results for Figure 5 are printed -# at the end of its run and will be the following: -# -# Branch/iter = %.2f (millisecs) -# -# The above timing results were used for Figure 5. -# -# Prerequisite: -# - CUDA library files should be accessible before executing this script, -# e.g., module load cuda/10.2.89. -# - CUDA aware MPI should be available. - -export JULIA_CUDA_VERBOSE=1 -export JULIA_MPI_BINARY="system" - -DATA=("case2868rte" "case6515rte" "case9241pegase" "case13659pegase" "case19402_goc") -PQ=(10 20 50 50 500) -VA=(1000 2000 5000 5000 50000) -ITER=(6000 15000 35000 45000 30000) - -for i in ${!DATA[@]}; do - echo "Solving ${DATA[$i]} . . ." - julia --project ./src/admm_standalone.jl "./data/${DATA[$i]}" ${PQ[$i]} ${VA[$i]} ${ITER[$i]} true > output_gpu1_${DATA[$i]}.txt 2>&1 -done - diff --git a/figure6.sh b/figure6.sh deleted file mode 100755 index 182fae0..0000000 --- a/figure6.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash -# -# This script describes how to reproduce the results of Figure 6. -# This is just an example for iillustration purposes. Different platforms -# (such as Summit cluster) may require different setups. -# -# For each run of launch_mpi.jl, it will generate iteration logs -# and timing results. The relevant timing results for Figure 6 are printed -# at the end of its run and will be the following: -# -# (Br+MPI)/iter = %.2f (millisecs) -# -# We divide the above timing results by the timing results obtained when -# we use a single GPU. -# -# Prerequisite: -# - CUDA library files should be accessible before executing this script, -# e.g., module load cuda/10.2.89. -# - CUDA aware MPI should be available. - -export JULIA_CUDA_VERBOSE=1 -export JULIA_MPI_BINARY="system" - -DATA=("case2868rte" "case6515rte" "case9241pegase" "case13659pegase" "case19402_goc") -PQ=(10 20 50 50 500) -VA=(1000 2000 5000 5000 50000) -ITER=(5648 13651 30927 41126 28358) -NGPU=(2 3 4 5 6) - -for j in ${!NGPU[@]}; do - for i in ${!DATA[@]}; do - echo "Solving ${DATA[$i]} using ${NGPU[$j]} GPUs . . ." - mpirun -np ${NGPU[$j]} julia --project ./src/launch_mpi.jl "./data/${DATA[$i]}" ${PQ[$i]} ${VA[$i]} ${ITER[$i]} true > output_gpu${NGPU[$j]}_${DATA[$i]}.txt 2>&1 - mv br_time_gpu.txt br_time_gpu${NGPU[$j]}_${DATA[$i]}.txt - done -done diff --git a/figure8.sh b/figure8.sh deleted file mode 100755 index 2fa71ca..0000000 --- a/figure8.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -# -# This script describes how to reproduce the results of Figure 8. -# This is just an example for iillustration purposes. Different platforms -# (such as Summit cluster) may require different setups. -# -# For each run of launch_mpi.jl, it will generate iteration logs -# and timing results. The relevant timing results for Figure 8 are printed -# at the end of its run and will be the following: -# -# (Br+MPI)/iter = %.2f (millisecs) -# -# We use these numbers for the timings of 40 CPU cores and use the timings -# from Figure 6 for 6 GPUs. -# -# Prerequisite: -# - CUDA library files should be accessible before executing this script, -# e.g., module load cuda/10.2.89. -# - CUDA aware MPI should be available. - -export JULIA_CUDA_VERBOSE=1 -export JULIA_MPI_BINARY="system" - -DATA=("case2868rte" "case6515rte" "case9241pegase" "case13659pegase" "case19402_goc") -PQ=(10 20 50 50 500) -VA=(1000 2000 5000 5000 50000) -ITER=(5718 13640 30932 41140 28358) - -for i in ${!DATA[@]}; do - echo "Solving ${DATA[$i]} using 40 CPU cores . . ." - mpirun -np 40 julia --project ./src/launch_mpi.jl "./data/${DATA[$i]}" ${PQ[$i]} ${VA[$i]} ${ITER[$i]} false > output_cpu40_${DATA[$i]}.txt 2>&1 -done