From aa0f6ea715d2b1fefb3151e184a20509c7075c62 Mon Sep 17 00:00:00 2001 From: Sueda Ciftci Date: Wed, 22 May 2024 16:11:23 +0100 Subject: [PATCH 1/4] creates README and installation docs --- .../cleanair/cleanair_data/docs/README.md | 47 ++++++++++++++- .../cleanair_data/docs/installation.md | 59 +++++++++++++++++++ .../cleanair/cleanair_data/requirements.txt | 12 ++++ 3 files changed, 115 insertions(+), 3 deletions(-) create mode 100644 containers/cleanair/cleanair_data/docs/installation.md create mode 100644 containers/cleanair/cleanair_data/requirements.txt diff --git a/containers/cleanair/cleanair_data/docs/README.md b/containers/cleanair/cleanair_data/docs/README.md index ab48c563e..5bc71f967 100644 --- a/containers/cleanair/cleanair_data/docs/README.md +++ b/containers/cleanair/cleanair_data/docs/README.md @@ -1,4 +1,45 @@ -# Inistalation +# London Air Quality Research Project -conda create -n urbanairdata -conda activate urbanairdata +This README provides an overview of the data package for the London Air Quality project. The data package includes essential documentation and resources to help you get started with the installation, development, and usage of the data associated with this project. + +## Contents + +- [📥 Installation](installation.md) +- [👨‍💻 Developer Guide](developer.md) +- [🐳 Docker Guide](docker.md) +- [📊 Datasets](datasets.md) +- [🔐 Secret Files](secretfile.md) +- [📋 Cheat Sheet](cheat.md) +- [🙌 Contributors](contributors.md) + +## Overview + +The London Air Quality data package provides access to comprehensive datasets and tools to facilitate the analysis and visualization of air quality in London. This package is designed to support researchers, developers, and analysts working on air quality projects. + +## 📥 Installation + +For detailed instructions on how to install the data package, please refer to the [Installation Guide](docs/installation.md). + +## 👨‍💻 Developer Guide + +To contribute to the development of this data package or to understand the underlying architecture, refer to the [Developer Guide](docs/developer.md). + +## 🐳 Docker Guide + +This project includes Docker support for creating consistent and reproducible development environments. For more information, see the [Docker Guide](docs/docker.md). + +## 📊 Datasets + +Comprehensive details about the datasets included in this project are available in the [Datasets Guide](docs/datasets.md). + +## 🔐 Secret Files + +To learn about managing and accessing secret files required by the project, refer to the [Secret Files Guide](docs/secretfile.md). + +## 📋 Cheat Sheet + +A quick reference guide for common commands and configurations can be found in the [Cheat Sheet](docs/cheat.md). + +## 🙌 Contributors + +We acknowledge and appreciate the contributions from various individuals and organizations. For a list of contributors, see the [Contributors Guide](docs/contributors.md). diff --git a/containers/cleanair/cleanair_data/docs/installation.md b/containers/cleanair/cleanair_data/docs/installation.md new file mode 100644 index 000000000..d8fc5f977 --- /dev/null +++ b/containers/cleanair/cleanair_data/docs/installation.md @@ -0,0 +1,59 @@ +# Installation + +First, we recommend creating a conda environment called `urbanair`. +Our packages work with *python 3.8 or later*, but we recommend python 3.10. + +> Please take care to install all packages from the *conda-forge* channel! + +```bash +# create the environment and activate +conda create -n urbanairdata +conda activate urbanairdata +# set the conda -forge channel as the top priority +conda config --env --add channels conda-forge +conda config --env --set channel_priority strict +# now install python, ecCodes, geopandas, plus any other libraries +conda install python=3.10 geopandas=0.10.2 eccodes==2.26.0 +``` + +We strongly recommend [installing geopandas](https://geopandas.org/en/stable/getting_started/install.html) +if you intend to handle the geospatial datasets. + +If you experience installation problems, why not try our [docker images](#what-about-docker)? + +To contribute to the cleanair package, please also see the [developer guide](developer.md). + +## Install cleanair + +To install the `cleanair_type` package with the requirements, follow these steps. All the `cleanair` packages use the ***cleanair_type*** type package for integrity. + +``` +pip install -e containers/cleanair_types +``` + +To install the `cleanair_data` package with the requirements: + +``` +pip install -e containers/cleanair_data +``` + +> If you are using an Apple M1 (ARM) processor you may experience unexpected problems, for example installing [psycopg-binary](https://www.psycopg.org/psycopg3/docs/basic/install.html#binary-installation). Our [docker images](#what-about-docker) offer a quick alternative! + +## Install the developer dependencies + +This line will install all the python packages needed for testing and linting: + +``` +pip install -r containers/cleanair/cleanair_data/requirements.txt +``` + +## What about docker? + +You can build our docker files using the normal docker commands. +For example: + +```bash +docker build -t cleanair:latest -f containers/dockerfiles/cleanair.Dockerfile containers +``` + +If you have difficulties with the system setup [ask an active contributor](contributors.md) for help. diff --git a/containers/cleanair/cleanair_data/requirements.txt b/containers/cleanair/cleanair_data/requirements.txt new file mode 100644 index 000000000..e76265f92 --- /dev/null +++ b/containers/cleanair/cleanair_data/requirements.txt @@ -0,0 +1,12 @@ +black==23.3.0 +flake8==6.0.0 +mkdocs==1.4.3 +mkdocs-material==9.1.5 +mkdocstrings==0.20.0 +pymdown-extensions==10.0 +pylint==2.17.4 +pytest==7.4.0 +pytest-datadir==1.4.0 +types-pytz==2023.3.0 +types-all==1.1.0 +mypy==1.3.0 From c1572cb310062dfdb64ac7eefc79f09f44204321 Mon Sep 17 00:00:00 2001 From: Sueda Ciftci Date: Wed, 22 May 2024 16:40:37 +0100 Subject: [PATCH 2/4] creates required initial .md files --- .../cleanair/cleanair_data/docs/codebase.md | 1 + .../cleanair_data/docs/contributors.md | 22 +++++++++++++++++++ .../cleanair_data/docs/database_connection.md | 3 +-- .../cleanair/cleanair_data/docs/developer.md | 5 +++++ .../cleanair/cleanair_data/docs/docker.md | 15 +++++++++++++ .../cleanair/gpflow1_models/contributors.md | 22 +++++++++++++++++++ 6 files changed, 66 insertions(+), 2 deletions(-) create mode 100644 containers/cleanair/cleanair_data/docs/codebase.md create mode 100644 containers/cleanair/cleanair_data/docs/contributors.md create mode 100644 containers/cleanair/cleanair_data/docs/developer.md create mode 100644 containers/cleanair/cleanair_data/docs/docker.md create mode 100644 containers/cleanair/gpflow1_models/contributors.md diff --git a/containers/cleanair/cleanair_data/docs/codebase.md b/containers/cleanair/cleanair_data/docs/codebase.md new file mode 100644 index 000000000..4d435e652 --- /dev/null +++ b/containers/cleanair/cleanair_data/docs/codebase.md @@ -0,0 +1 @@ +# Codebase diff --git a/containers/cleanair/cleanair_data/docs/contributors.md b/containers/cleanair/cleanair_data/docs/contributors.md new file mode 100644 index 000000000..529bbeb52 --- /dev/null +++ b/containers/cleanair/cleanair_data/docs/contributors.md @@ -0,0 +1,22 @@ +## Active contributors + +| Name | GitHub ID | Email | Admin | +| ------------------ | -----------------------------------------------------| --------------------------- | ------ | +| Sueda Ciftci | [@edasu](https://github.com/edasu) | | Infrastructure, Software Engineering | +| Oliver Hamelijnck | [@defaultobject](https://github.com/defaultobject) | | Modelling | +| Patrick O'Hara | [@PatrickOHara](https://github.com/PatrickOHara) | | Validation, visualisation | + +## Alumni contributors + +| Name | GitHub ID | Email | Admin | +| ------------------ | -----------------------------------------------------| --------------------------- | ------ | +| James Brandreth | [@jamesbrandreth](https://github.com/jamesbrandreth) | / | Infrastructure, Odysseus | +| Oscar Giles | [@OscartGiles](https://github.com/OscartGiles) | | Infrastructure, Prod Database, Kubernetes Cluster | +| Chance Haycock | [@chancehaycock](https://github.com/chancehaycock) | | | +| Christy Nakou | [@ChristyNou](https://github.com/ChristyNou) | | | +| Harry Moss | [@harryjmoss](https://github.com/harryjmoss) | | | +| David Perez-Suarez | [@dpshelio](https://github.com/dpshelio) | | | +| James Robinson | [@jemrobinson](https://github.com/jemrobinson) | | Infrastructure, Prod Database, Kubernetes Cluster | +| Tim Spain | [@timspainUCL](https://github.com/timspainUCL) | | | +| Edward Thorpe-Woods | [@TeddyTW](https://github.com/TeddyTW) | | | +| James Walsh | [@dead-water](https://github.com/dead-water) | | Infrastructure, Odysseus | diff --git a/containers/cleanair/cleanair_data/docs/database_connection.md b/containers/cleanair/cleanair_data/docs/database_connection.md index 41e4fd6ee..665eadf3a 100644 --- a/containers/cleanair/cleanair_data/docs/database_connection.md +++ b/containers/cleanair/cleanair_data/docs/database_connection.md @@ -5,9 +5,8 @@ This guide describes how to connect to the PostgreSQL database (DB) hosted by Az Before starting this guide, please make sure have completed the following tasks: 1. [Installing the cleanair package](installation.md) -2. [Login to Azure CLI](azure.md) -The recommended way of accessing the database is via our cleanair package and the `urbanair` CLI. +The recommended way of accessing the database is via our cleanair package and the `urbanair_db` CLI. ```bash urbanair_db init production diff --git a/containers/cleanair/cleanair_data/docs/developer.md b/containers/cleanair/cleanair_data/docs/developer.md new file mode 100644 index 000000000..958358cb5 --- /dev/null +++ b/containers/cleanair/cleanair_data/docs/developer.md @@ -0,0 +1,5 @@ +# Developer guide + +**Make sure you have [installed the packages](installation.md) before following this guide.** + +This guide describes all the tools you need for developing & contributing to the codebase of the London Air Quality Project. diff --git a/containers/cleanair/cleanair_data/docs/docker.md b/containers/cleanair/cleanair_data/docs/docker.md new file mode 100644 index 000000000..bb37ec928 --- /dev/null +++ b/containers/cleanair/cleanair_data/docs/docker.md @@ -0,0 +1,15 @@ +# Docker + +This guide walks you through how to build, pull, run and push our docker images. +Before starting this guide: + +- [Install docker!](https://docs.docker.com/get-docker/) +- Please make sure you are familiar with the [basic docker principles](https://docs.docker.com/get-started/) before starting this guide. + +## Contents + +- [Our docker images](#our-docker-images) +- [Pulling a docker image](#pulling) from the [Azure container registry](#azure-container-registry). +- [Building a docker image](#building) +- [Running a docker image](#running) including [mounting a secret file](#mounting-a-secrets-file) +- [(Optional) Networks of docker containers](#multi-container-networks) diff --git a/containers/cleanair/gpflow1_models/contributors.md b/containers/cleanair/gpflow1_models/contributors.md new file mode 100644 index 000000000..529bbeb52 --- /dev/null +++ b/containers/cleanair/gpflow1_models/contributors.md @@ -0,0 +1,22 @@ +## Active contributors + +| Name | GitHub ID | Email | Admin | +| ------------------ | -----------------------------------------------------| --------------------------- | ------ | +| Sueda Ciftci | [@edasu](https://github.com/edasu) | | Infrastructure, Software Engineering | +| Oliver Hamelijnck | [@defaultobject](https://github.com/defaultobject) | | Modelling | +| Patrick O'Hara | [@PatrickOHara](https://github.com/PatrickOHara) | | Validation, visualisation | + +## Alumni contributors + +| Name | GitHub ID | Email | Admin | +| ------------------ | -----------------------------------------------------| --------------------------- | ------ | +| James Brandreth | [@jamesbrandreth](https://github.com/jamesbrandreth) | / | Infrastructure, Odysseus | +| Oscar Giles | [@OscartGiles](https://github.com/OscartGiles) | | Infrastructure, Prod Database, Kubernetes Cluster | +| Chance Haycock | [@chancehaycock](https://github.com/chancehaycock) | | | +| Christy Nakou | [@ChristyNou](https://github.com/ChristyNou) | | | +| Harry Moss | [@harryjmoss](https://github.com/harryjmoss) | | | +| David Perez-Suarez | [@dpshelio](https://github.com/dpshelio) | | | +| James Robinson | [@jemrobinson](https://github.com/jemrobinson) | | Infrastructure, Prod Database, Kubernetes Cluster | +| Tim Spain | [@timspainUCL](https://github.com/timspainUCL) | | | +| Edward Thorpe-Woods | [@TeddyTW](https://github.com/TeddyTW) | | | +| James Walsh | [@dead-water](https://github.com/dead-water) | | Infrastructure, Odysseus | From 74528fd34ce5899b0733cf40425291c31fc29d83 Mon Sep 17 00:00:00 2001 From: Sueda Ciftci Date: Wed, 5 Jun 2024 16:40:35 +0100 Subject: [PATCH 3/4] updatinf dataset and database mdfiles --- .../cleanair/cleanair_data/docs/cheat.md | 20 -- .../docs/configure_local_database.md | 130 +++++++++ .../cleanair_data/docs/database_connection.md | 13 - .../cleanair_data/docs/dataset_APIs.md | 264 ++++++++++++++++++ .../cleanair/cleanair_data/docs/datasets.md | 43 ++- .../cleanair/cleanair_data/docs/developer.md | 46 ++- .../cleanair/cleanair_data/docs/scoot.md | 22 +- .../cleanair/cleanair_data/docs/secretfile.md | 108 ++++--- 8 files changed, 540 insertions(+), 106 deletions(-) delete mode 100644 containers/cleanair/cleanair_data/docs/cheat.md create mode 100644 containers/cleanair/cleanair_data/docs/configure_local_database.md delete mode 100644 containers/cleanair/cleanair_data/docs/database_connection.md create mode 100644 containers/cleanair/cleanair_data/docs/dataset_APIs.md diff --git a/containers/cleanair/cleanair_data/docs/cheat.md b/containers/cleanair/cleanair_data/docs/cheat.md deleted file mode 100644 index f1dcdf87b..000000000 --- a/containers/cleanair/cleanair_data/docs/cheat.md +++ /dev/null @@ -1,20 +0,0 @@ -# Cheat sheet - -Useful commands that you will use regularly when developing code. -We assume you have [installed cleanair](installation.md) and [logged into Azure CLI](azure.md#sign-into-the-azure-cli). - -## Database connections - -` - -```bash -# connect to the production database -urbanair_db init production - -# print your Azure database username & access token -urbanair_db echo dbuser -urbanair_db echo dbtoken - -# get the path to the urbanair CLI secrets file -export DB_SECRET_FILE="$(urbanair_db config path)/.db_secrets.json" -``` diff --git a/containers/cleanair/cleanair_data/docs/configure_local_database.md b/containers/cleanair/cleanair_data/docs/configure_local_database.md new file mode 100644 index 000000000..eb2997cc9 --- /dev/null +++ b/containers/cleanair/cleanair_data/docs/configure_local_database.md @@ -0,0 +1,130 @@ +## Installing and Starting PostgreSQL + +To install and start PostgreSQL on macOS, you can use Homebrew Services. Below are detailed, step-by-step instructions. + +### Step-by-Step Instructions + +1. **Install Homebrew:** + If Homebrew is not already installed on your system, open your terminal and execute the following command: + + ```bash + /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" + ``` + + **Documentation:** + - [Homebrew Installation](https://brew.sh/) + +2. **Install PostgreSQL using Homebrew:** + With Homebrew installed, install PostgreSQL by running: + + ```bash + brew install postgresql + ``` + + This command installs the latest version of PostgreSQL available via Homebrew. + + **Documentation:** + - [PostgreSQL Homebrew Installation](https://formulae.brew.sh/formula/postgresql) + +3. **Install Homebrew Services:** + Homebrew Services is an extension for Homebrew that simplifies the process of starting and stopping services. Install it with: + + ```bash + brew tap homebrew/services + ``` + + **Documentation:** + - [Homebrew Services](https://github.com/Homebrew/homebrew-services) + +4. **Start PostgreSQL Service:** + Start PostgreSQL as a service using Homebrew Services: + + ```bash + brew services start postgresql + ``` + + This command runs PostgreSQL as a background service, which will automatically restart after a reboot. + + **Documentation:** + - [Starting Services with Homebrew](https://github.com/Homebrew/homebrew-services#readme) + +### If You Installed PostgreSQL Using Conda + +To set up the PostgreSQL server and users, follow these steps: + +1. **Initialize the Database:** + + ```bash + initdb -D mylocal_db + ``` + +2. **Start the PostgreSQL Server:** + + ```bash + pg_ctl -D mylocal_db -l logfile start + ``` + +3. **Create a Database:** + + ```bash + createdb --owner=${USER} myinner_db + ``` + +4. **Start the Server for Future Sessions:** + When you want to work in this environment again, start the server with: + + ```bash + pg_ctl -D mylocal_db -l logfile start + ``` + +5. **Stop the Server:** + You can stop the server with: + + ```bash + pg_ctl -D mylocal_db stop + ``` + +## Creating a Local Secrets File + +Refer to the [secret file documentation](secretfile.md) for detailed instructions. + +In some cases, your default username may be your OS user. Adjust the username in the secrets file if necessary. + +## Create a Database on Your Machine + +Create a database named `cleanair_test_db` with the following command: + +```bash +createdb cleanair_test_db +``` + +## Create Schema and Roles + +To set up the database schema and create roles, follow these steps: + +1. **Set the Location of Your Secrets File:** + + ```bash + export DB_SECRET_FILE=$(pwd)/.secrets/.db_secrets_offline.json + ``` + +2. **Configure Database Roles:** + Run the following command to configure database roles: + + ```bash + python containers/entrypoints/setup/configure_db_roles.py -s $DB_SECRET_FILE -c configuration/database_role_config/local_database_config.yaml + ``` + +### Insert Static Data + +**TODO:** Instructions for inserting static data into the server database need to be provided. + +### Check the Database Configuration + +Verify that everything is configured correctly by running: + +```bash +pytest containers/tests/test_database_init --secretfile $DB_SECRET_FILE +``` + +By following these steps, you will have PostgreSQL installed, configured, and running on your macOS system. diff --git a/containers/cleanair/cleanair_data/docs/database_connection.md b/containers/cleanair/cleanair_data/docs/database_connection.md deleted file mode 100644 index 665eadf3a..000000000 --- a/containers/cleanair/cleanair_data/docs/database_connection.md +++ /dev/null @@ -1,13 +0,0 @@ -# Connecting to the Urbanair database - -This guide describes how to connect to the PostgreSQL database (DB) hosted by Azure for the London Air Quality project. - -Before starting this guide, please make sure have completed the following tasks: - -1. [Installing the cleanair package](installation.md) - -The recommended way of accessing the database is via our cleanair package and the `urbanair_db` CLI. - -```bash -urbanair_db init production -``` diff --git a/containers/cleanair/cleanair_data/docs/dataset_APIs.md b/containers/cleanair/cleanair_data/docs/dataset_APIs.md new file mode 100644 index 000000000..0683719fa --- /dev/null +++ b/containers/cleanair/cleanair_data/docs/dataset_APIs.md @@ -0,0 +1,264 @@ +# Steps to retrieve and store datasets from an API + +## LAQN + +### Types of Data Collected + +1. **Pollutant Measurements:** + - **Nitrogen Dioxide (NO2):** A common air pollutant from vehicle emissions and industrial activities. + - **Particulate Matter (PM10 and PM2.5):** Tiny particles that can penetrate the lungs and cause health issues. + - **Ozone (O3):** A gas formed by reactions between sunlight and pollutants like volatile organic compounds (VOCs) and nitrogen oxides (NOx). + - **Sulphur Dioxide (SO2):** A gas produced by burning fossil fuels containing sulfur. + - **Carbon Monoxide (CO):** A gas produced by incomplete combustion of carbon-containing fuels. + - **Other Pollutants:** Depending on the site, additional pollutants like VOCs and lead may be measured. + +2. **Meteorological Data:** + - **Temperature:** Ambient temperature readings. + - **Wind Speed and Direction:** Important for understanding pollution dispersion. + - **Humidity:** Moisture levels in the air. + - **Solar Radiation:** Sunlight intensity, which can affect photochemical reactions in the atmosphere. + +### Data Accessibility + +- **Real-Time Data:** LAQN provides real-time air quality data that can be accessed through its website or API. +- **Historical Data:** Users can access historical air quality data for analysis and research purposes. +- **Data Format:** Data is available in various formats, including JSON, CSV, and through web-based dashboards. + +### Accessing LAQN Data + +#### Using the LAQN Website + +1. **Visit the LAQN website:** The primary portal for accessing LAQN data. +2. **Data Dashboard:** The website typically features a dashboard where users can view real-time and historical air quality data for different monitoring sites. +3. **Data Downloads:** Users can download data for specific time periods and pollutants. + +#### Using the LAQN API + +1. **API Endpoint:** Access the LAQN API to programmatically retrieve air quality data. +2. **Parameters:** Specify parameters such as `SiteCode`, `StartDate`, `EndDate`, and `SpeciesCode` to customize your data request. +3. **Example API Call:** Retrieve data in JSON format. + + ```python + import requests + + url = 'https://api.erg.ic.ac.uk/AirQuality/Data/Site/SiteCode=KC1/StartDate=2022-06-01/EndDate=2022-12-01/Json' + response = requests.get(url) + + if response.status_code == 200: + data = response.json() + print(data) + else: + print("Error:", response.status_code) + ``` + +## Satalite + +Get Satellite data from: + + () + API INFO: + + + IMPORTANT: + Satellite forecast data should become available on API at: + 06:30 UTC for 0-48 hours. + 08:30 UTC for 49-72 hours. + +To get a square(ish) grid we note that a degree of longitude is cos(latitude) +times a degree of latitude. For London this means that a degree of latitude is about 1.5 times larger than one of longitude. We therefore use 1.5 times as many + +``` +latitude points as longitude + half_grid = 0.05 # size of half the grid in lat/lon + n_points_lat = 12 # number of discrete latitude points per satellite box + n_points_lon = 8 # number of discrete longitude points per satellite box +bounding box to fetch data for + sat_bounding_box = { + "lat_min": 51.2867601564841, + "lat_max": 51.6918741102915, + "lon_min": -0.51037511051915, + "lon_max": 0.334015522513336, + } + species_to_copernicus = { + "NO2": "nitrogen_dioxide", + "PM25": "particulate_matter_2.5um", + "PM10": "particulate_matter_10um", + "O3": "ozone", + +``` + +### Satellite Grid Builder Documentation + +We are using module provides a method, `build_satellite_grid`, for generating a dataframe of satellite grid points based on a given dataframe of satellite boxes. The grid points are created around each satellite box to facilitate data analysis and mapping. + +### Usage Example + +1. Define species mapping to Copernicus categories: + + ```python + species_to_copernicus = { + "NO2": "nitrogen_dioxide", + "PM25": "particulate_matter_2.5um", + "PM10": "particulate_matter_10um", + "O3": "ozone", + } + ``` + +2. Set the number of expected one-hourly data per grib file: + + ```python + n_grid_squares_expected = 32 # Adjust as needed + ``` + +3. Specify the list of species using the Enum 'Species': + + ```python + from enum import Enum + + class Species(Enum): + NO2 = "NO2" + PM25 = "PM25" + PM10 = "PM10" + O3 = "O3" + ``` + +4. Instantiate the SatelliteGridBuilder class and use the `build_satellite_grid` method: + + ```python + # Example instantiation + satellite_grid_builder = SatelliteGridBuilder(half_grid=0.1, n_points_lat=5, n_points_lon=5) + + # Example usage with a dataframe of satellite boxes (satellite_boxes_df) + satellite_grid_df = satellite_grid_builder.build_satellite_grid(satellite_boxes_df) + ``` + +### Parameters + +- `half_grid`: Half of the grid spacing around each satellite box. +- `n_points_lat`: Number of grid points along the latitude for each box. +- `n_points_lon`: Number of grid points along the longitude for each box. + +### Returns + +- A pandas DataFrame containing latitude, longitude, and box_id columns for each grid point. + +**Note:** Adjust the grid parameters according to the desired granularity and coverage. + +## Breath London + +### Types of Data Collected + +1. **Pollutant Measurements:** + - **Nitrogen Dioxide (NO2):** A key indicator of traffic-related air pollution. + - **Particulate Matter (PM1, PM2.5, and PM10):** Fine particles from various sources, including vehicle emissions, construction sites, and industrial activities. + - **Ozone (O3):** Formed by chemical reactions between other pollutants in the presence of sunlight. + - **Carbon Monoxide (CO):** Emitted from vehicles and other combustion sources. + - **Other Pollutants:** May include volatile organic compounds (VOCs) and black carbon. + +2. **Meteorological Data:** + - **Temperature:** Air temperature readings. + - **Humidity:** Measurement of moisture in the air. + - **Wind Speed and Direction:** Important for understanding the dispersion of pollutants. + +### Data Collection Methods + +- **Fixed Sensors:** High-quality air quality monitoring stations placed at strategic locations across the city. +- **Mobile Sensors:** Sensors mounted on vehicles or carried by individuals to measure air quality in different areas, including areas not covered by fixed stations. +- **Wearable Sensors:** Portable devices worn by individuals to measure personal exposure to air pollution. + +### Data Accessibility + +- **Real-Time Data:** Breathe London provides real-time air quality data through its website and mobile applications. +- **Historical Data:** Users can access historical air quality data for analysis and research purposes. +- **Data Format:** Data is available in various formats, including JSON, CSV, and through interactive web-based platforms. + +### Accessing Breathe London Data + +#### Using the Breathe London Website + +1. **Visit the Breathe London website:** The primary portal for accessing air quality data. +2. **Data Dashboard:** The website features a dashboard where users can view real-time and historical air quality data from different monitoring sites and mobile sensors. +3. **Data Downloads:** Users can download data for specific time periods and pollutants. + +#### Using the Breathe London API + +1. **API Endpoint:** Access the Breathe London API to programmatically retrieve air quality data. +2. **Parameters:** Specify parameters such as `location`, `pollutant`, `start_date`, and `end_date` to customize your data request. +3. **Example API Call:** Retrieve data in JSON format. + + ```python + import requests + + url = 'https://api.breathelondon.org/v1/measurements?location=some_location&start_date=2022-06-01&end_date=2022-12-01' + response = requests.get(url) + + if response.status_code == 200: + data = response.json() + print(data) + else: + print("Error:", response.status_code) + ``` + +## SCOOT + +### Data Collected by SCOOT + +1. **Traffic Flow Data:** + - **Vehicle Counts:** Number of vehicles passing through an intersection. + - **Traffic Speeds:** Speed of vehicles at different points in the network. + - **Queue Lengths:** Length of vehicle queues at traffic signals. + - **Cycle Time:** Duration of traffic signal cycles. + +2. **Detector Types:** + - **Inductive Loop Detectors:** Embedded in the road surface to detect vehicles. + - **Magnetometers:** Measure changes in the magnetic field caused by passing vehicles. + - **Other Sensors:** May include video and radar sensors for additional traffic monitoring. + +### How SCOOT Works + +1. **Data Collection:** + - **Real-Time Monitoring:** Continuous collection of traffic data from sensors at intersections. + - **Central Processing:** Data is sent to a central computer for processing. + +2. **Signal Optimization:** + - **Cycle Time Adjustment:** Dynamically adjusts the overall cycle time to balance traffic flow. + - **Green Time Allocation:** Distributes green time across different traffic movements to minimize delays. + - **Offset Coordination:** Coordinates signals at adjacent intersections to create a "green wave" for smoother traffic flow. + +3. **Adaptation:** + - **Continuous Updates:** Signal timings are updated every few seconds based on current traffic conditions. + - **Response to Changes:** Quickly adapts to changes in traffic patterns, such as during peak hours or incidents. + +### Accessing SCOOT Data + +While direct public access to raw SCOOT data is limited due to the proprietary nature of the system and data privacy concerns, traffic management centers (TMCs) and city authorities can access and utilize this data for urban planning and traffic management. + +### Use Cases and Applications + +1. **Traffic Management Centers (TMCs):** + - **Real-Time Monitoring:** TMCs use SCOOT data to monitor and manage traffic in real-time. + - **Incident Response:** Quickly respond to traffic incidents and adjust signals to mitigate impact. + +2. **Urban Planning:** + - **Infrastructure Improvements:** Use historical SCOOT data to plan infrastructure upgrades and improvements. + - **Policy Making:** Inform traffic policies and congestion management strategies. + +3. **Research and Analysis:** + - **Traffic Studies:** Conduct detailed traffic flow and congestion studies using SCOOT data. + - **Model Validation:** Validate traffic models and simulations with real-world data. + +### Example Use in Research + +Researchers and urban planners can use SCOOT data to study the impact of traffic signal optimization on air quality, traffic congestion, and road safety. For example: + +``` + + +- SCOOTLinkID – Refers to the SCOOT Link ID +- LinkDescription – A brief description of the location of the Junction typically by road name +- Date – The date of the data +- TwentyFourHourVehicleFlowTotal – The total flow observed within a 24 hour period across all links approaching the junction +- FlowDataCompletenessPercentage – Refers to the availability of data; 100% is a complete dataset. +- AverageCongestionPercentage – The average congestion within a 24 hour period across all links approaching the junction +- CongestionDataCompletenessPercentage – Refers to the availability of data; 100% is a complete dataset +``` diff --git a/containers/cleanair/cleanair_data/docs/datasets.md b/containers/cleanair/cleanair_data/docs/datasets.md index cc28e2190..363219877 100644 --- a/containers/cleanair/cleanair_data/docs/datasets.md +++ b/containers/cleanair/cleanair_data/docs/datasets.md @@ -7,4 +7,45 @@ This guide aims to give an overview of these datasets and describe how they fit | Dataset name | Provider | Description | |---------------|-------|---------------| | [London Air Quality Network](https://www.londonair.org.uk/) (LAQN) | Imperial College London | High accuracy air quality sensors every 15 minutes | -| [Copernicus satellite air quality forecasts](https://atmosphere.copernicus.eu/)| Copernicus, ECMWF | Hourly air quality forecasts derived from Satellite data | \ No newline at end of file +| [Copernicus satellite air quality forecasts](https://atmosphere.copernicus.eu/)| Copernicus, ECMWF | Hourly air quality forecasts derived from Satellite data | +| [Breath London](https://www.breathelondon.org/)| The cominity sensing network | Hourly air quality forecasts derived from low cost sensors data | +| [Scoot](https://www.breathelondon.org/)| The cominity sensing network | Traffic detector network via the S3 bucket maintained by TLF + +## LAQN + +The London Air Quality Network (LAQN) data provides detailed information about air pollution levels across London and the surrounding areas. It is a crucial resource for monitoring and analyzing air quality to understand pollution trends, identify sources of pollution, and inform public health decisions. Here's an overview of LAQN data: + +### Overview of LAQN + +- **Purpose:** LAQN aims to provide accurate and comprehensive air quality data to support research, policy-making, and public information efforts. +- **Managed by:** The network is managed by King's College London. +- **Coverage:** The network covers multiple monitoring sites across London and the South East of England. + +## Satellite Data + +Copernicus satellite data refers to the Earth observation data collected by the European Union's Copernicus program. This program operates a fleet of satellites that capture various types of data related to the Earth's atmosphere, land surface, oceans, and climate. These satellites provide valuable information about weather patterns, air quality, vegetation, ocean currents, and other environmental parameters. + +ECMWF (European Centre for Medium-Range Weather Forecasts) is one of the organizations responsible for processing and analyzing the Copernicus satellite data. They utilize advanced data assimilation and modeling techniques to integrate the satellite data with other observational and model-based information, allowing for more accurate weather forecasting, climate monitoring, and environmental analysis. The ECMWF satellite data products are widely used by meteorologists, climate scientists, and policymakers for a range of applications, including weather prediction, climate research, and disaster management. + +## Breath London + +**Breathe London** is an initiative aimed at monitoring and improving air quality in London. It leverages advanced technologies to provide detailed, real-time air quality data to support research, public health, and policy-making efforts. Here's an overview of Breathe London: + +### Overview of Breathe London + +- **Purpose:** Breathe London aims to deliver high-resolution, real-time air quality data to inform residents, policymakers, and researchers about air pollution in the city and support efforts to improve air quality. +- **Managed by:** The project is a collaboration involving various stakeholders, including government agencies, research institutions, and environmental organizations. + +## Scoot + +### Overview of SCOOT (Split Cycle Offset Optimization Technique) + +SCOOT is an advanced urban traffic control system designed to optimize traffic flow and reduce congestion in real-time. Widely used in London, it leverages real-time data from traffic detectors to dynamically adjust traffic signal timings at intersections. + +### Key Features of SCOOT + +- **Dynamic Signal Control:** Adjusts traffic signal timings continuously based on real-time traffic data. +- **Traffic Flow Optimization:** Aims to reduce congestion and improve traffic flow efficiency. +- **Wide Deployment:** Implemented in various cities, with extensive use in London. + +[more info](scoot.md) diff --git a/containers/cleanair/cleanair_data/docs/developer.md b/containers/cleanair/cleanair_data/docs/developer.md index 958358cb5..c58553c62 100644 --- a/containers/cleanair/cleanair_data/docs/developer.md +++ b/containers/cleanair/cleanair_data/docs/developer.md @@ -1,5 +1,47 @@ # Developer guide -**Make sure you have [installed the packages](installation.md) before following this guide.** +## Style guide -This guide describes all the tools you need for developing & contributing to the codebase of the London Air Quality Project. +### Writing Documentation + +Before being accepted into master all code should have well writen documentation. + +**Please use [Google Style Python Docstrings](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html)** + +We would like to move towards adding [type hints](https://docs.python.org/3.7/library/typing.html) so you may optionally add types to your code. In which case you do not need to include types in your google style docstrings. + +Adding and updating existing documentation is highly encouraged. + +### Gitmoji + +We like [gitmoji](https://gitmoji.carloscuesta.me/) for an emoji guide to our commit messages. You might consider (entirely optional) using the [gitmoji-cli](https://github.com/carloscuesta/gitmoji-cli) as a hook when writing commit messages. + +### Working on an issue + +The general workflow for contributing to the project is to first choose and issue (or create one) to work on and assign yourself to the issues. + +You can find issues that need work on by searching by the `Needs assignment` label. If you decide to move onto something else or wonder what you've got yourself into please unassign yourself, leave a comment about why you dropped the issue (e.g. got bored, blocked by something etc) and re-add the `Needs assignment` label. + +You are encouraged to open a pull request earlier rather than later (either a `draft pull request` or add `WIP` to the title) so others know what you are working on. + +How you label branches is optional, but we encourage using `iss__` where `` is the github issue number and `` is a very short description of the issue. For example `iss_928_add_api_docs`. + +## Writing tests + +The following shows an example test: + +```python +def test_scoot_reading_empty(secretfile, connection): + conn = DBWriter( + secretfile=secretfile, initialise_tables=True, connection=connection + ) + + with conn.dbcnxn.open_session() as session: + assert session.query(ScootReading).count() == 0 +``` + +It uses the `DBWriter` class to connect to the database. In general when interacting with a database we write a class which inherits from either `DBWriter` or `DBReader`. Both classes take a `secretfile` as an argument which provides database connection secrets. + +**Critically, we also pass a special `connection` fixture when initialising any class that interacts with the database**. + +This fixture ensures that all interactions with the database take place within a `transaction`. At the end of the test the transaction is rolled back leaving the database in the same state it was in before the test was run, even if `commit` is called on the database. diff --git a/containers/cleanair/cleanair_data/docs/scoot.md b/containers/cleanair/cleanair_data/docs/scoot.md index 27e9691af..d13fd7af8 100644 --- a/containers/cleanair/cleanair_data/docs/scoot.md +++ b/containers/cleanair/cleanair_data/docs/scoot.md @@ -3,20 +3,21 @@ The urbanair project used a traffic dataset called SCOOT. This guide shows you how to run the SCOOT forecasting model and then create traffic features which can be used by the air quality model. Before starting this guide, make sure you have: + 1. [Installed the cleanair package](installation.md#install-cleanair) -2. [Logged into the Azure CLI](azure.md#sign-into-the-azure-cli) -3. [Connect to the database](database_connection.md) +2. [Connect to the database](database_connection.md) > Note that the SCOOT forecasting model and feature extraction are automatically scheduled to run daily. This guide is purely for testing & development purposes. ## Contents + - [How it works](#how-it-works) - [How to check the SCOOT features](#how-to-check-the-features) - [How to extract the SCOOT features](#how-to-extract-the-features) *** -## How it works +## How it works We extract scoot features in multiple steps using the urbanair cli: @@ -27,13 +28,13 @@ We extract scoot features in multiple steps using the urbanair cli: 4. Join the scoot readings to table above and then calculate road readings using precalculated weightings and store as a [CTE](https://www.postgresql.org/docs/9.1/queries-with.html). This gives us a reading for every road segment in a buffer. 5. For every feature we want to extract aggregate the appropriate column from the CTE created in step 4. Then `UNION` all of these together so can process all features in the same query. Otherwise we'd have to repeat all the above steps for every feature. -### Benefits of this approach: +### Benefits of this approach + - We map scoot sensors to road readings when we need them. If you just need scoot features at LAQN or AQE sensors you don't want to have to map scoot readings to the entire road network before you can start feature processing - Don't need to store road readings in the database, which will get very large fast. -- Avoid doing a massive inner join of OSHighway X timestamps with buffers based on a spatial intersection. Instead just join OSHighway with buffers based on spatial intersection and then join scoot readings based on point_id. - -Calculating features for more days is not particularly expensive. Adding more interest points is more expensive. +- Avoid doing a massive inner join of OSHighway X timestamps with buffers based on a spatial intersection. Instead just join OSHighway with buffers based on spatial intersection and then join scoot readings based on point_id. +Calculating features for more days is not particularly expensive. Adding more interest points is more expensive. *** @@ -43,7 +44,7 @@ SCOOT features are stored in the PostgreSQL database under the `dynamic_features You can check the extracted features for a given data range and for a list of interest point "sources" (e.g. `laqn`, `aqe`, `satellite`): ```bash -urbanair features scoot check --ndays 1 --upto 2022-01-05 --source laqn --source aqe +urbanair_db features scoot check --ndays 1 --upto 2022-01-05 --source laqn --source aqe ``` To see if there are any *missing* features, append the `--only-missing` flag to the above command. @@ -57,12 +58,11 @@ If you find there are missing features from the above section, you can run the S First, check that the mapping from roads to SCOOT sensors is up to date: ```bash -urbanair features scoot update-road-maps +urbanair_db features scoot update-road-maps ``` Now process all the SCOOT features for any interest points that have any missing data for the given date range: ```bash -urbanair features scoot fill --ndays 1 --upto 2020-01-05 --source laqn --source aqe --insert-method missing +urbanair_db features scoot fill --ndays 1 --upto 2020-01-05 --source laqn --source aqe --insert-method missing ``` - diff --git a/containers/cleanair/cleanair_data/docs/secretfile.md b/containers/cleanair/cleanair_data/docs/secretfile.md index 889747f56..b5655073f 100644 --- a/containers/cleanair/cleanair_data/docs/secretfile.md +++ b/containers/cleanair/cleanair_data/docs/secretfile.md @@ -1,124 +1,114 @@ -# Secret file for database connection +# Secret File for Database Connection -The connection information for a PostgreSQL database is stored in a JSON "secret" file. -It contains entries for the username, password, host, port, database name and SSL mode. +The connection information for a PostgreSQL database is stored in a JSON "secret" file. This file contains entries for the username, password, host, port, database name, and SSL mode. -**Before starting this guide**, please make sure you have [installed the packages](installation.md), [logged into the Azure CLI](azure.md) and read the [developer guide](developer.md). -You may also find it useful to look at our [guide for docker](docker.md) and [mounting a secret file](docker.md#mounting-a-secrets-file). +**Before starting this guide**, ensure you have [installed the necessary packages](installation.md) and read the [developer guide](developer.md). Additionally, refer to our guides on [Docker](docker.md) and [mounting a secret file](docker.md#mounting-a-secrets-file). -**The contents of this guide** include: +**Contents of this Guide:** -- Connecting to the [Azure database](#Azure-database) -- Secret file for a local [docker database](#docker-database) +- Secret file for a local [Docker database](#docker-database) +- Secret file for a server [Aquifer database](#server-database) *** -## Azure database +## Local Database -The connection to the Azure database in Azure is managed through the urbanair CLI. -The username and password are generated using the [Azure CLI](azure.md). +We store database credentials in JSON files. **For production databases, never store database passwords in these files. For more information, see the production database section.** -***With Turing account*** - -You can store the connection credentials for the Azure database by running: +Create the secrets directory and the local database secrets file: ```bash -urbanair init production +mkdir -p .secrets +echo '{ + "username": "postgres", + "password": "", + "host": "localhost", + "port": 5432, + "db_name": "cleanair_test_db", + "ssl_mode": "prefer" +}' >> .secrets/.db_secrets_offline.json ``` -You *should* now be able to connect to the Azure database using the urbanair CLI. -If you would like to get the location of the JSON secret file for the Azure database, -you can run: +Initialize the local environment: ```bash -urbanair config path +urbanair init local --secretfile $DB_SECRET_FILE ``` -If you would like to get the username and password stored in the Azure JSON secret file, use the `urbanair echo` CLI: +*** -```bash -urbanair echo dbuser -urbanair echo dbtoken -``` +## Docker Database -***Without Turing account*** +> **Note:** Create a [test Docker PostgreSQL database](developer.md#setting-up-a-test-database-with-docker) before starting this section. -We are going to store the settings for the azure database in a JSON file.First, create environment variables to store the location of filepaths and create the hidden `.secrets` directory. We recommend doing this inside the repo. +Store the settings for the test Docker database in a JSON file. First, create environment variables to store the location of file paths and create the hidden `.secrets` directory. It is recommended to do this inside the repository. ```bash cd clean-air-infrastructure export SECRETS_DIR="$(pwd)/.secrets" -export DB_SECRET_FILE="${SECRETS_DIR}/.db_secrets_azure.json" +export DB_SECRET_FILE="${SECRETS_DIR}/.db_secrets_docker.json" mkdir "${SECRETS_DIR}" ``` -Next create `.db_secrets_azurer.json`: +> **Tip:** If using Conda, you might want to [save these environment variables](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#saving-environment-variables) to avoid setting them again. + +Next, create the `.db_secrets_docker.json` file: ```bash -echo '{ - "host": "cleanair-inputs-2021-server.postgres.database.azure.com", +echo '{ + "username": "postgres", + "password": "", + "host": "localhost", "port": 5432, - "db_name": "cleanair_inputs_db", - "ssl_mode": "require", - "username": USERNAME, - "password": PASSWORD, + "db_name": "cleanair_test_db", + "ssl_mode": "prefer" }' >> $DB_SECRET_FILE ``` -Fill the `USERNAME` and `PASSWORD` apporipiated.fThen run the comand to ???. - -``` -urbanair init local --secretfile $DB_SECRET_FILE -``` - *** -## Docker database +## Server Database -> Create a [test docker PostgreSQL database](developer.md#setting-up-a-test-database-with-docker) before starting this section. - -We are going to store the settings for the test docker database in a JSON file. -First, create environment variables to store the location of filepaths and create the hidden `.secrets` directory. We recommend doing this inside the repo. +Store the settings for the Aquifer PostgreSQL database in a JSON file. First, ensure you have access to the Aquifer server located in the Department of Computer Science at Warwick. Create environment variables to store the location of file paths and create the hidden `.secrets` directory. It is recommended to do this inside the repository. ```bash cd clean-air-infrastructure export SECRETS_DIR="$(pwd)/.secrets" -export DB_SECRET_FILE="${SECRETS_DIR}/.db_secrets_docker.json" +export DB_SECRET_FILE="${SECRETS_DIR}/.db_secrets_aqifer.json" mkdir "${SECRETS_DIR}" ``` -> If using conda, you might like to [save these environment variables](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#saving-environment-variables) so you never have to set them again +> **Tip:** If using Conda, you might want to [save these environment variables](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#saving-environment-variables) to avoid setting them again. -Next create `.db_secrets_docker.json`: +Next, create the `.db_secrets_aqifer.json` file: ```bash echo '{ "username": "postgres", - "password": "", + "password": "", "host": "localhost", "port": 5432, - "db_name": "cleanair_test_db", + "db_name": "cleanair_inputs_db", "ssl_mode": "prefer" }' >> $DB_SECRET_FILE ``` -<<<<<<< HEAD +If you do not have admin access to the Aquifer server or encounter any issues connecting to the database, please contact [Sueda Ciftci](mailto:sueda.ciftci@warwick.ac.uk). *** -## Mounting a secrets file +## Mounting a Secrets File -If you are running a docker container and you need the container to connect to the database, -you will need to [mount the directory](https://docs.docker.com/storage/bind-mounts/) `SECRETS_DIR` containing the JSON secret file on the host machine onto a target directory `/secrets` on the container file system. +When running a Docker container that needs to connect to the database, you must [mount the directory](https://docs.docker.com/storage/bind-mounts/) containing the JSON secret file from the host machine onto a target directory `/secrets` on the container file system. -Add the `-v "${SECRET_DIR}":/secrets` option to any `docker run` commands that need a database connection. +Add the `-v "${SECRETS_DIR}":/secrets` option to any `docker run` commands that require a database connection. -You will also need to *append* the location of the JSON secret file by using the `--secretfile` option for most urbanair CLI commands. +Additionally, append the location of the JSON secret file using the `--secretfile` option for most Urbanair CLI commands. -In summary, a docker run command might look something like: +A typical Docker run command might look like this: ```bash -docker run -v "${SECRET_DIR}":/secrets ... --secretfile /secrets/.db_secrets_docker.json +docker run -v "${SECRETS_DIR}":/secrets ... --secretfile /secrets/.db_secrets_docker.json ``` -If you don't use the `--secretfile` option for running the command, you *might* also need to add an environment variable that tells the *docker container* about the name of the secrets file by adding `-e DB_SECRET_FILE=".db_secrets_docker.json"`. +If you don't use the `--secretfile` option, you may need to add an environment variable that informs the Docker container about the secrets file by adding `-e DB_SECRET_FILE=".db_secrets_docker.json"`. From 650367ad9a638fe04999f2b2c31597e0e40dba43 Mon Sep 17 00:00:00 2001 From: Sueda Ciftci Date: Thu, 6 Jun 2024 10:22:13 +0100 Subject: [PATCH 4/4] adds base dockerfile --- .../dockerfiles/cleanair_base.Dockerfile | 33 +++++++++++++++++++ .../cleanair/cleanair_data/docs/docker.md | 6 +--- 2 files changed, 34 insertions(+), 5 deletions(-) create mode 100644 containers/cleanair/cleanair_data/dockerfiles/cleanair_base.Dockerfile diff --git a/containers/cleanair/cleanair_data/dockerfiles/cleanair_base.Dockerfile b/containers/cleanair/cleanair_data/dockerfiles/cleanair_base.Dockerfile new file mode 100644 index 000000000..95853eefb --- /dev/null +++ b/containers/cleanair/cleanair_data/dockerfiles/cleanair_base.Dockerfile @@ -0,0 +1,33 @@ +# Use an official Python runtime as a parent image +FROM python:3.10 + +# Update certificates +RUN apt update \ + && apt install -y \ + build-essential \ + ca-certificates \ + cmake \ + gfortran \ + libgeos-dev + +# download eccodes for ECMWF satellite +WORKDIR /app +ENV ECCODES_MAJOR_VERSION=2 +ENV ECCODES_MINOR_VERSION=26 +ENV ECCODES_PATCH_VERSION=0 +ENV ECCODES_VERSION=${ECCODES_MAJOR_VERSION}.${ECCODES_MINOR_VERSION}.${ECCODES_PATCH_VERSION} +ENV ECCODES_SRC_DIR=eccodes-${ECCODES_VERSION}-Source +ENV ECCODES_TAR_NAME=eccodes-${ECCODES_VERSION}-Source.tar.gz +RUN wget -c https://confluence.ecmwf.int/download/attachments/45757960/${ECCODES_TAR_NAME} +RUN tar -xzf ${ECCODES_TAR_NAME} + +# install eccodes with CMake +RUN mkdir ${ECCODES_SRC_DIR}/build +WORKDIR ${ECCODES_SRC_DIR}/build +RUN cmake -DENABLE_AEC=OFF .. +RUN cmake --build . +RUN cmake --install . + +# remove eccodes build directory +WORKDIR /app +RUN rm -r ${ECCODES_SRC_DIR}/build \ No newline at end of file diff --git a/containers/cleanair/cleanair_data/docs/docker.md b/containers/cleanair/cleanair_data/docs/docker.md index bb37ec928..a4fcda259 100644 --- a/containers/cleanair/cleanair_data/docs/docker.md +++ b/containers/cleanair/cleanair_data/docs/docker.md @@ -8,8 +8,4 @@ Before starting this guide: ## Contents -- [Our docker images](#our-docker-images) -- [Pulling a docker image](#pulling) from the [Azure container registry](#azure-container-registry). -- [Building a docker image](#building) -- [Running a docker image](#running) including [mounting a secret file](#mounting-a-secrets-file) -- [(Optional) Networks of docker containers](#multi-container-networks) +**TODO**