notepad .ssh/configssh staginggit clone https://github.com/HopkinsIDD/flepiMoP.git
git clone https://github.com/HopkinsIDD/Flu_USA.git
git clone https://github.com/HopkinsIDD/COVID19_USA.git
cd COVID19_USA
git clone https://github.com/HopkinsIDD/flepiMoP.git
cd ..
# or any other data directoriesgit config --global credential.helper store
git config --global user.name "{NAME SURNAME}"
git config --global user.email YOUREMAIL@EMAIL.COM
git config --global pull.rebase false # so you use merge as the default reconciliation methodcd COVID19_USA
git config --global credential.helper cache
git pull
git checkout main
git pull
cd flepiMoP
git pull
git checkout main
git pull
cd .. sudo docker pull hopkinsidd/flepimop:latest
sudo docker run -it \
-v /home/ec2-user/COVID19_USA:/home/app/drp/COVID19_USA \
-v /home/ec2-user/flepiMoP:/home/app/drp/flepiMoP \
-v /home/ec2-user/.ssh:/home/app/.ssh \
hopkinsidd/flepimop:latest cd ~/drp
export CENSUS_API_KEY={A CENSUS API KEY}
export FLEPI_RESET_CHIMERICS=TRUE
export COMPUTE_QUEUE="Compartment-JQ-1588569574"
export VALIDATION_DATE="2023-01-29"
export RESUME_LOCATION=s3://idd-inference-runs/USA-20230122T145824
export FLEPI_RUN_INDEX=FCH_R16_lowBoo_modVar_ContRes_blk4_Jan29_tsvacc
export CONFIG_PATH=config_FCH_R16_lowBoo_modVar_ContRes_blk4_Jan29_tsvacc.ymlexport FLEPI_MEM_PROFILE=TRUE
export FLEPI_MEM_PROF_ITERS=50cd ~/drp
export PROJECT_PATH=$(pwd)/COVID19_USA
export GT_DATA_SOURCE="csse_case, fluview_death, hhs_hosp"cd ~/drp
export PROJECT_PATH=$(pwd)/Flu_USAcd $PROJECT_PATH
export FLEPI_PATH=$(pwd)/flepiMoP
cd $FLEPI_PATH
git checkout main
git pull
git config --global credential.helper 'cache --timeout 300000'
#install gempyor and the R modules. There should be no error, please report if not.
# Sometimes you might need to run the next line two times because inference depends
# on report.generation, which is installed later, in alphabetical order.
# (or if you know R well enough to fix that 😊)
Rscript build/local_install.R # warnings are ok; there should be no error.
python -m pip install --upgrade pip &
pip install -e flepimop/gempyor_pkg/ &
pip install boto3 &
cd ..
cd $PROJECT_PATH
git pull
git checkout maingit reset --hard && git clean -f -d # this deletes everything that is not on github in this repo !!!rm -rf model_output data/us_data.csv data-truth &&
rm -rf data/mobility_territories.csv data/geodata_territories.csv &&
rm -rf data/seeding_territories.csv &&
rm -rf data/seeding_territories_Level5.csv data/seeding_territories_Level67.csv
# don't delete model_output if you have another run in //
rm -rf $PROJECT_PATH/model_outputexport CONFIG_PATH=config_FCH_R16_lowBoo_modVar_ContRes_blk4_Jan29_tsvacc.yml # if you haven't already done this
Rscript $FLEPI_PATH/datasetup/build_US_setup.R
# For covid do
Rscript $FLEPI_PATH/datasetup/build_covid_data.R
# For Flu do
Rscript $FLEPI_PATH/datasetup/build_flu_data.Rflepimop-inference-main -c $CONFIG_PATH -j 1 -n 1 -k 1 rm -r model_outputaws configurepython $FLEPI_PATH/batch/inference_job_launcher.py --aws -c $CONFIG_PATH -q $COMPUTE_QUEUE python $FLEPI_PATH/batch/inference_job_launcher.py --aws \ ## FIX THIS TO REFLECT AWS OPTIONS
-c $CONFIG_PATH \
-p $FLEPI_PATH \
--data-path $PROJECT_PATH \
--upload-to-s3 True \
--id $FLEPI_RUN_INDEX \
--restart-from-location $RESUME_LOCATIONcd $PROJECT_PATH
$FLEPI_PATH/batch/inference_job_launcher.py --aws -c $CONFIG_PATH -q $COMPUTE_QUEUEcd $PROJECT_PATH
$FLEPI_PATH/batch/inference_job_launcher.py --aws -c $CONFIG_PATH -q $COMPUTE_QUEUE -j 1 -k 1cd $PROJECT_PATH
$FLEPI_PATH/batch/inference_job_launcher.py --aws -c $CONFIG_PATH -q $COMPUTE_QUEUE --resume-carry-seeding --restart-from-location $RESUME_LOCATIONcd $PROJECT_PATH
$COVID_PATH/batch/inference_job_launcher.py --aws -c $CONFIG_PATH -q $COMPUTE_QUEUE --resume-discard-seeding --restart-from-location $RESUME_LOCATIONgit add data/
git config --global user.email "[email]"
git config --global user.name "[github username]"
git commit -m"scenario run initial"
branch=$(git branch | sed -n -e 's/^\* \(.*\)/\1/p')
git push --set-upstream origin $branch
git checkout main
git pullLaunching USA-20230426T135628_inference_med on aws...
>> Job array: 300 slot(s) X 5 block(s) of 55 simulation(s) each.
>> Final output will be: s3://idd-inference-runs/USA-20230426T135628/model_output/
>> Run id is SMH_R17_noBoo_lowIE_phase1_blk1
>> config is config_SMH_R17_noBoo_lowIE_phase1_blk1.yml
>> FLEPIMOP branch is main with hash 3773ed8a20186e82accd6914bfaf907fd9c52002
>> DATA branch is R17 with hash 6f060fefa9784d3f98d88a313af6ce433b1ac913cd $PROJECT_PATH
$COVID_PATH/batch/inference_job_launcher.py -c $CONFIG_PATH -q $COMPUTE_QUEUE --resume-carry-seeding --restart-from-location $RESUME_LOCATIONTutorial on how to install and run flepiMoP on a supported HPC with slurm.
slurm-*_*.out$ curl -LsSf -o flepimop-install-<cluster-name> https://raw.githubusercontent.com/HopkinsIDD/flepiMoP/refs/heads/main/bin/flepimop-install-<cluster-name>
$ chmod +x flepimop-install-<cluster-name>
$ ./flepimop-install-<cluster-name>$ conda activate flepimop-env
$ rm flepimop-install-<cluster-name> flepimop-install$ ./bin/flepimop-install-<cluster-name>$ ./batch/hpc_init <cluster-name>$ export PROJECT_PATH="$FLEPI_PATH/examples/tutorials/"
$ cd $PROJECT_PATH
$ flepimop batch-calibrate \
--blocks 1 \
--chains 4 \
--samples 20 \
--simulations 100 \
--time-limit 30min \
--slurm \
--nodes 4 \
--cpus 1 \
--memory 1G \
--extra 'partition=<your partition, if relevant>' \
--extra 'email=<your email, if relevant>' \
--skip-checkout \
-vvv \
config_sample_2pop_inference.yml$ export PROJECT_PATH="$FLEPI_PATH/examples/simple_usa_statelevel/"
$ cd $PROJECT_PATH
$ flepimop batch-calibrate \
--blocks 1 \
--chains 4 \
--samples 20 \
--simulations 100 \
--time-limit 30min \
--slurm \
--nodes 1 \
--cpus 4 \
--memory 8G \
--extra 'partition=<your partition, if relevant>' \
--extra 'email=<your email, if relevant>' \
--skip-checkout \
-vvv \
simple_usa_statelevel.yml$ flepimop batch-calibrate \
--blocks 1 \
--chains 4 \
--samples 20 \
--simulations 500 \
--time-limit 2hr \
--slurm \
--nodes 1 \
--cpus 4 \
--memory 24GB \
--extra 'partition=<your partition, if relevant>' \
--extra 'email=<your email, if relevant>' \
--skip-checkout \
--estimate \
--estimate-runs 6 \
--estimate-interval 0.8 \
--estimate-vary simulations \
--estimate-factors simulations \
--estimate-measurements time \
--estimate-measurements memory \
--estimate-scale-upper 5 \
--estimate-scale-lower 10 \
-vvv \
simple_usa_statelevel.yml > simple_usa_statelevel_estimation.log 2>&1 & disown$ flepimop batch-calibrate \
--blocks 1 \
--chains 4 \
--samples 20 \
--simulations 500 \
--time-limit 2hr \
--slurm \
--nodes 1 \
--cpus 4 \
--memory 24GB \
--from-estimate USA_influpaint_resources.json \
--extra 'partition=<your partition, if relevant>' \
--extra 'email=<your email, if relevant>' \
--skip-checkout \
-vvv \
simple_usa_statelevel.ymlsync:
rsync-model-output:
type: rsync
source: model_output
target: /path/to/an/example-folder
s3-model-output:
type: s3sync
source: model_output
target: s3://my-bucket/and-sub-bucket$ export PROJECT_PATH="$FLEPI_PATH/examples/tutorials/"
$ cd $PROJECT_PATH
$ flepimop batch-calibrate \
--blocks 1 \
--chains 4 \
--samples 20 \
--simulations 100 \
--time-limit 30min \
--slurm \
--nodes 4 \
--cpus 1 \
--memory 1G \
--extra 'partition=<your partition, if relevant>' \
--extra 'email=<your email, if relevant>' \
--skip-checkout \
--sync-protocol <your sync protocol, either rsync-model-output or s3-model-output in this case> \
-vvv \
config_sample_2pop_inference.yml[twillard@longleaf-login6 tutorials]$ squeue -p jlessler
JOBID PARTITION NAME USER ST TIME NODES NODELIST(REASON)
2374868 jlessler sync_sam twillard PD 0:00 1 (Dependency)
2374867_1 jlessler sample_2 twillard R 2:26 1 g1803jles01
2374867_2 jlessler sample_2 twillard R 2:26 1 g1803jles01
2374867_3 jlessler sample_2 twillard R 2:26 1 g1803jles01
2374867_4 jlessler sample_2 twillard R 2:26 1 g1803jles01[twillard@longleaf-login6 sample_2pop-20250521T190823_Ro_all_test_limits]$ tree -L 4
.
├── manifest.json
└── model_output
└── sample_2pop_Ro_all_test_limits
└── sample_2pop-20250521T190823_Ro_all_test_limits
├── hnpi
├── hosp
├── hpar
├── init
├── llik
├── seir
├── snpi
└── spar
11 directories, 1 file$ flepimop batch-calibrate \
--blocks 1 \
--chains 4 \
--samples 20 \
--simulations 100 \
--time-limit 30min \
--slurm \
--nodes 4 \
--cpus 1 \
--memory 1G \
--extra 'partition=<your partition, if relevant>' \
--extra 'email=<your email, if relevant>' \
--skip-checkout \
--sync-protocol s3-idd-inference-runs \
-vvv \
config_sample_2pop_inference.yml $FLEPI_PATH/common/s3-idd-inference-runs.yml<FLEPI_PATH>docker pull hopkinsidd/flepimop:latest-devdocker run -it \
-v <FLEPI_PATH>:/home/app/flepimop \
-v <PROJECT_PATH>:/home/app/drp \
hopkinsidd/flepimop:latest-dev
Go into the code directory and do the installation the R and Python code packages
```bash
cd $FLEPI_PATH # move to the flepimop directory
Rscript build/local_install.R # Install R packages
pip install --no-deps -e flepimop/gempyor_pkg/ # Install Python package gempyorcd $PROJECT_PATH # goes to your project repository
rm -r model_output/ # delete the outputs of past run if there areflepimop-inference-main -c config.ymlflepimop-inference-main -j 1 -n 1 -k 1 -c config.ymldocker pull hopkinsidd/flepimop:latest-dev
docker run -it \
-v <FLEPI_PATH>:/home/app/flepimop \
-v <PROJECT_PATH>:/home/app/drp \
hopkinsidd/flepimop:latest-dev
export FLEPI_PATH=/home/app/flepimop/
export PROJECT_PATH=/home/app/drp/
cd $FLEPI_PATH
Rscript build/local_install.R
pip install --no-deps -e flepimop/gempyor_pkg/
cd $PROJECT_PATH
rm -rf model_output
flepimop-inference-main -j 1 -n 1 -k 1 -c config.ymlflepimop simulate config.ymldocker pull hopkinsidd/flepimop:latest-dev
docker run -it \
-v <FLEPI_PATH>:/home/app/flepimop \
-v <PROJECT_PATH>:/home/app/drp \
hopkinsidd/flepimop:latest-dev
export FLEPI_PATH=/home/app/flepimop/
export PROJECT_PATH=/home/app/drp/
cd $FLEPI_PATH
Rscript build/local_install.R
pip install --no-deps -e flepimop/gempyor_pkg/
cd $PROJECT_PATH
rm -rf model_output
flepimop simulate config.ymldocker ps> festive_feisteldocker start container_namedocker attach container_name