Skip to content

Commit

Permalink
Merge pull request #62 from HopkinsIDD/breaking-improvments
Browse files Browse the repository at this point in the history
Improvements for consistency
  • Loading branch information
jcblemai authored Apr 11, 2024
2 parents 0c30c23 + e5247d5 commit 312d0e3
Show file tree
Hide file tree
Showing 251 changed files with 33,775 additions and 27,740 deletions.
3 changes: 2 additions & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
name: unit-tests

on:
workflow_dispatch:
push:
branches:
- main
Expand Down Expand Up @@ -30,7 +31,7 @@ jobs:
run: |
source /var/python/3.10/virtualenv/bin/activate
python -m pip install --upgrade pip
python -m pip install flepimop/gempyor_pkg/
python -m pip install "flepimop/gempyor_pkg[test]"
shell: bash
- name: Install local R packages
run: Rscript build/local_install.R
Expand Down
6 changes: 6 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -65,3 +65,9 @@ Outcomes.egg-info/

# R package manuals
man/
flepimop/gempyor_pkg/.coverage
flepimop/gempyor_pkg/.coverage.kojis-mbp-8.sph.ad.jhsph.edu.6137.959542
flepimop/gempyor_pkg/get_value.prof
flepimop/gempyor_pkg/tests/seir/.coverage
flepimop/gempyor_pkg/tests/seir/.coverage.kojis-mbp-8.sph.ad.jhsph.edu.90615.974746
flepimop/gempyor_pkg/.coverage
65 changes: 55 additions & 10 deletions batch/AWS_inference_runner.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
set -x

# Expected environment variables from AWS Batch env
# S3_MODEL_DATA_PATH location in S3 with the code, data, and dvc pipeline to run
# S3_MODEL_PROJECT_PATH location in S3 with the code, data, and dvc pipeline to run
# DVC_OUTPUTS the names of the directories with outputs to save in S3, separated by a space
# SIMS_PER_JOB is the number of sims to run per job
# JOB_NAME the name of the job
Expand Down Expand Up @@ -40,7 +40,7 @@ aws configure set default.s3.multipart_chunksize 8MB

# Copy the complete model + data package from S3 and
# install the local R packages
aws s3 cp --quiet $S3_MODEL_DATA_PATH model_data.tar.gz
aws s3 cp --quiet $S3_MODEL_PROJECT_PATH model_data.tar.gz
mkdir model_data
tar -xzf model_data.tar.gz -C model_data # chadi: removed v(erbose) option here as it floods the log with data we have anyway from the s3 bucket
cd model_data
Expand Down Expand Up @@ -106,9 +106,20 @@ if [ -n "$LAST_JOB_OUTPUT" ]; then # -n Checks if the length of a string is non
fi
for liketype in "global" "chimeric"
do
export OUT_FILENAME=$(python -c "from gempyor import file_paths; print(file_paths.create_file_name('$FLEPI_RUN_INDEX','$FLEPI_PREFIX/$FLEPI_RUN_INDEX/$liketype/intermediate/%09d.'% $FLEPI_SLOT_INDEX,$FLEPI_BLOCK_INDEX-1,'$filetype','$extension'))")
export OUT_FILENAME=$(python -c "from gempyor import file_paths; print(file_paths.create_file_name(run_id='$FLEPI_RUN_INDEX',
prefix='$FLEPI_PREFIX/$FLEPI_RUN_INDEX',
inference_filepath_suffix='$liketype/intermediate',
inference_filename_prefix=%09d.'% $FLEPI_SLOT_INDEX,
index=$FLEPI_BLOCK_INDEX-1,
ftype='$filetype',
extension='$extension'))")
if [ $FLEPI_BLOCK_INDEX -eq 1 ]; then
export IN_FILENAME=$(python -c "from gempyor import file_paths; print(file_paths.create_file_name('$RESUME_FLEPI_RUN_INDEX','$FLEPI_PREFIX/$RESUME_FLEPI_RUN_INDEX/$liketype/final/',$FLEPI_SLOT_INDEX,'$filetype','$extension'))")
export IN_FILENAME=$(python -c "from gempyor import file_paths; print(file_paths.create_file_name(run_id='$RESUME_FLEPI_RUN_INDEX',
prefix='$FLEPI_PREFIX/$RESUME_FLEPI_RUN_INDEX',
inference_filepath_suffix='$liketype/final',
index=$FLEPI_SLOT_INDEX,
ftype='$filetype',
extension='$extension'))")
else
export IN_FILENAME=$OUT_FILENAME
fi
Expand Down Expand Up @@ -146,32 +157,66 @@ echo "***************** DONE RUNNING inference_slot.R *****************"
echo "***************** UPLOADING RESULT TO S3 *****************"
for type in "seir" "hosp" "llik" "spar" "snpi" "hnpi" "hpar"
do
export FILENAME=$(python -c "from gempyor import file_paths; print(file_paths.create_file_name('$FLEPI_RUN_INDEX','$FLEPI_PREFIX/$FLEPI_RUN_INDEX/chimeric/intermediate/%09d.'% $FLEPI_SLOT_INDEX,$FLEPI_BLOCK_INDEX,'$type','parquet'))")
export FILENAME=$(python -c "from gempyor import file_paths; print(file_paths.create_file_name(run_id='$FLEPI_RUN_INDEX',
prefix='$FLEPI_PREFIX/$FLEPI_RUN_INDEX',
inference_filepath_suffix='chimeric/intermediate',
inference_filename_prefix=%09d.'% $FLEPI_SLOT_INDEX,
index=$FLEPI_BLOCK_INDEX,
ftype='$type',
extension='parquet'))")
aws s3 cp --quiet $FILENAME $S3_RESULTS_PATH/$FILENAME
done
for type in "seed"
do
export FILENAME=$(python -c "from gempyor import file_paths; print(file_paths.create_file_name('$FLEPI_RUN_INDEX','$FLEPI_PREFIX/$FLEPI_RUN_INDEX/chimeric/intermediate/%09d.'% $FLEPI_SLOT_INDEX,$FLEPI_BLOCK_INDEX,'$type','csv'))")
export FILENAME=$(python -c "from gempyor import file_paths; print(file_paths.create_file_name(run_id='$FLEPI_RUN_INDEX',
prefix='$FLEPI_PREFIX/$FLEPI_RUN_INDEX',
inference_filepath_suffix='chimeric/intermediate',
inference_filename_prefix=%09d.'% $FLEPI_SLOT_INDEX,
index=$FLEPI_BLOCK_INDEX,
ftype='$type',
extension='csv'))")
aws s3 cp --quiet $FILENAME $S3_RESULTS_PATH/$FILENAME
done
for type in "seed"
do
export FILENAME=$(python -c "from gempyor import file_paths; print(file_paths.create_file_name('$FLEPI_RUN_INDEX','$FLEPI_PREFIX/$FLEPI_RUN_INDEX/global/intermediate/%09d.'% $FLEPI_SLOT_INDEX,$FLEPI_BLOCK_INDEX,'$type','csv'))")
export FILENAME=$(python -c "from gempyor import file_paths; print(file_paths.create_file_name(run_id='$FLEPI_RUN_INDEX',
prefix='$FLEPI_PREFIX/$FLEPI_RUN_INDEX',
inference_filepath_suffix='global/intermediate',
inference_filename_prefix=%09d.'% $FLEPI_SLOT_INDEX,
index=$FLEPI_BLOCK_INDEX,
ftype='$type',
extension='csv'))")
aws s3 cp --quiet $FILENAME $S3_RESULTS_PATH/$FILENAME
done
for type in "seir" "hosp" "llik" "spar" "snpi" "hnpi" "hpar" "memprof"
do
export FILENAME=$(python -c "from gempyor import file_paths; print(file_paths.create_file_name('$FLEPI_RUN_INDEX','$FLEPI_PREFIX/$FLEPI_RUN_INDEX/global/intermediate/%09d.'% $FLEPI_SLOT_INDEX,$FLEPI_BLOCK_INDEX,'$type','parquet'))")
export FILENAME=$(python -c "from gempyor import file_paths; print(file_paths.create_file_name(run_id='$FLEPI_RUN_INDEX',
prefix='$FLEPI_PREFIX/$FLEPI_RUN_INDEX',
inference_filepath_suffix='global/intermediate',
inference_filename_prefix=%09d.'% $FLEPI_SLOT_INDEX,
index=$FLEPI_BLOCK_INDEX,
ftype='$type',
extension='parquet'))")
aws s3 cp --quiet $FILENAME $S3_RESULTS_PATH/$FILENAME
done
for type in "seir" "hosp" "llik" "spar" "snpi" "hnpi" "hpar" "memprof"
do
export FILENAME=$(python -c "from gempyor import file_paths; print(file_paths.create_file_name('$FLEPI_RUN_INDEX','$FLEPI_PREFIX/$FLEPI_RUN_INDEX/global/final/', $FLEPI_SLOT_INDEX,'$type','parquet'))")
export FILENAME=$(python -c "from gempyor import file_paths; print(file_paths.create_file_name(run_id='$FLEPI_RUN_INDEX',
prefix='$FLEPI_PREFIX/$FLEPI_RUN_INDEX',
inference_filepath_suffix='global/final',
index=$FLEPI_SLOT_INDEX,
ftype='$type',
extension='parquet'))")
aws s3 cp --quiet $FILENAME $S3_RESULTS_PATH/$FILENAME
done
for type in "seed"
do
export FILENAME=$(python -c "from gempyor import file_paths; print(file_paths.create_file_name('$FLEPI_RUN_INDEX','$FLEPI_PREFIX/$FLEPI_RUN_INDEX/global/final/', $FLEPI_SLOT_INDEX,'$type','csv'))")
export FILENAME=$(python -c "from gempyor import file_paths; print(file_paths.create_file_name(run_id='$FLEPI_RUN_INDEX',
prefix='$FLEPI_PREFIX/$FLEPI_RUN_INDEX',
inference_filepath_suffix='global/final',
index=$FLEPI_SLOT_INDEX,
ftype='$type',
extension='csv'))")
aws s3 cp --quiet $FILENAME $S3_RESULTS_PATH/$FILENAME
done
echo "***************** DONE UPLOADING RESULT TO S3 *****************"
Expand Down
21 changes: 17 additions & 4 deletions batch/AWS_postprocess_runner.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
set -x

# Expected environment variables from AWS Batch env
# S3_MODEL_DATA_PATH location in S3 with the code, data, and dvc pipeline to run
# S3_MODEL_PROJECT_PATH location in S3 with the code, data, and dvc pipeline to run
# DVC_OUTPUTS the names of the directories with outputs to save in S3, separated by a space
# SIMS_PER_JOB is the number of sims to run per job
# JOB_NAME the name of the job
Expand Down Expand Up @@ -34,7 +34,7 @@ aws configure set default.s3.multipart_chunksize 8MB

# Copy the complete model + data package from S3 and
# install the local R packages
aws s3 cp --quiet $S3_MODEL_DATA_PATH model_data.tar.gz
aws s3 cp --quiet $S3_MODEL_PROJECT_PATH model_data.tar.gz
mkdir model_data
tar -xzf model_data.tar.gz -C model_data # chadi: removed v(erbose) option here as it floods the log with data we have anyway from the s3 bucket
cd model_data
Expand Down Expand Up @@ -100,9 +100,22 @@ if [ -n "$LAST_JOB_OUTPUT" ]; then # -n Checks if the length of a string is non
fi
for liketype in "global" "chimeric"
do
export OUT_FILENAME=$(python -c "from gempyor import file_paths; print(file_paths.create_file_name('$FLEPI_RUN_INDEX','$FLEPI_PREFIX/$FLEPI_RUN_INDEX/$liketype/intermediate/%09d.'% $FLEPI_SLOT_INDEX,$FLEPI_BLOCK_INDEX-1,'$filetype','$extension'))")
export OUT_FILENAME=$(python -c "from gempyor import file_paths; print(file_paths.create_file_name(
run_id='$FLEPI_RUN_INDEX',
prefix='$FLEPI_PREFIX/$FLEPI_RUN_INDEX',
inference_filepath_suffix='$liketype/intermediate',
inference_filename_prefix=%09d.'% $FLEPI_SLOT_INDEX,
index=$FLEPI_BLOCK_INDEX-1,
ftype='$filetype',
extension='$extension'))")
if [ $FLEPI_BLOCK_INDEX -eq 1 ]; then
export IN_FILENAME=$(python -c "from gempyor import file_paths; print(file_paths.create_file_name('$RESUME_FLEPI_RUN_INDEX','$FLEPI_PREFIX/$RESUME_FLEPI_RUN_INDEX/$liketype/final/',$FLEPI_SLOT_INDEX,'$filetype','$extension'))")
export IN_FILENAME=$(python -c "from gempyor import file_paths; print(file_paths.create_file_name(
run_id='$RESUME_FLEPI_RUN_INDEX',
prefix='$FLEPI_PREFIX/$RESUME_FLEPI_RUN_INDEX',
inference_filepath_suffix='$liketype/final',
index=$FLEPI_SLOT_INDEX,
ftype='$filetype',
extension='$extension'))")
else
export IN_FILENAME=$OUT_FILENAME
fi
Expand Down
4 changes: 2 additions & 2 deletions batch/AWS_scenario_runner.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
set -x

# Expected environment variables from AWS Batch env
# S3_MODEL_DATA_PATH location in S3 with the code, data, and dvc pipeline to run
# S3_MODEL_PROJECT_PATH location in S3 with the code, data, and dvc pipeline to run
# DVC_TARGET the name of the dvc file in the model that should be reproduced locally.
# DVC_OUTPUTS the names of the directories with outputs to save in S3, separated by a space
# S3_RESULTS_PATH location in S3 to store the results
Expand All @@ -24,7 +24,7 @@ aws configure set default.s3.multipart_chunksize 8MB

# Copy the complete model + data package from S3 and
# install the local R packages
aws s3 cp --quiet $S3_MODEL_DATA_PATH model_data.tar.gz
aws s3 cp --quiet $S3_MODEL_PROJECT_PATH model_data.tar.gz
mkdir model_data
tar -xvzf model_data.tar.gz -C model_data
cd model_data
Expand Down
Loading

0 comments on commit 312d0e3

Please sign in to comment.